xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision 12ae65d98c188ed1269ec99f9ef70653dfdd5bf1)
1 /*	$NetBSD: apmdev.c,v 1.34 2021/09/26 01:16:08 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.34 2021/09/26 01:16:08 thorpej Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_apm.h"
40 #endif
41 
42 #if defined(DEBUG) && !defined(APMDEBUG)
43 #define	APMDEBUG
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/signalvar.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/device.h>
54 #include <sys/fcntl.h>
55 #include <sys/ioctl.h>
56 #include <sys/select.h>
57 #include <sys/poll.h>
58 #include <sys/conf.h>
59 
60 #include <dev/hpc/apm/apmvar.h>
61 
62 #ifdef APMDEBUG
63 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
64 
65 
66 #ifdef APMDEBUG_VALUE
67 int	apmdebug = APMDEBUG_VALUE;
68 #else
69 int	apmdebug = 0;
70 #endif /* APMDEBUG_VALUE */
71 
72 #else
73 #define	DPRINTF(f, x)		/**/
74 #endif /* APMDEBUG */
75 
76 #define	SCFLAG_OREAD	0x0000001
77 #define	SCFLAG_OWRITE	0x0000002
78 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
79 
80 #define	APMUNIT(dev)	(minor(dev)&0xf0)
81 #define	APM(dev)	(minor(dev)&0x0f)
82 #define APM_NORMAL	0
83 #define APM_CTL	8
84 
85 /*
86  * A brief note on the locking protocol: it's very simple; we
87  * assert an exclusive lock any time thread context enters the
88  * APM module.  This is both the APM thread itself, as well as
89  * user context.
90  */
91 #define	APM_LOCK(apmsc)						\
92 	(void) mutex_enter(&(apmsc)->sc_lock)
93 #define	APM_UNLOCK(apmsc)						\
94 	(void) mutex_exit(&(apmsc)->sc_lock)
95 
96 static void	apmdevattach(device_t, device_t, void *);
97 static int	apmdevmatch(device_t, cfdata_t, void *);
98 
99 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
100 static void	apm_periodic_check(struct apm_softc *);
101 static void	apm_thread(void *);
102 static void	apm_perror(const char *, int, ...)
103 		    __attribute__((__format__(__printf__,1,3)));
104 #ifdef APM_POWER_PRINT
105 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
106 #endif
107 static int	apm_record_event(struct apm_softc *, u_int);
108 static void	apm_set_ver(struct apm_softc *);
109 static void	apm_standby(struct apm_softc *);
110 static void	apm_suspend(struct apm_softc *);
111 static void	apm_resume(struct apm_softc *, u_int, u_int);
112 
113 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
114     apmdevmatch, apmdevattach, NULL, NULL);
115 
116 extern struct cfdriver apmdev_cd;
117 
118 dev_type_open(apmdevopen);
119 dev_type_close(apmdevclose);
120 dev_type_ioctl(apmdevioctl);
121 dev_type_poll(apmdevpoll);
122 dev_type_kqfilter(apmdevkqfilter);
123 
124 const struct cdevsw apmdev_cdevsw = {
125 	.d_open = apmdevopen,
126 	.d_close = apmdevclose,
127 	.d_read = noread,
128 	.d_write = nowrite,
129 	.d_ioctl = apmdevioctl,
130 	.d_stop = nostop,
131 	.d_tty = notty,
132 	.d_poll = apmdevpoll,
133 	.d_mmap = nommap,
134 	.d_kqfilter = apmdevkqfilter,
135 	.d_discard = nodiscard,
136 	.d_flag = D_OTHER
137 };
138 
139 /* configurable variables */
140 #ifdef APM_NO_STANDBY
141 int	apm_do_standby = 0;
142 #else
143 int	apm_do_standby = 1;
144 #endif
145 #ifdef APM_V10_ONLY
146 int	apm_v11_enabled = 0;
147 #else
148 int	apm_v11_enabled = 1;
149 #endif
150 #ifdef APM_NO_V12
151 int	apm_v12_enabled = 0;
152 #else
153 int	apm_v12_enabled = 1;
154 #endif
155 
156 /* variables used during operation (XXX cgd) */
157 u_char	apm_majver, apm_minver;
158 int	apm_inited;
159 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
160 int	apm_damn_fool_bios, apm_op_inprog;
161 int	apm_evindex;
162 
163 static int apm_spl;		/* saved spl while suspended */
164 
165 const char *
apm_strerror(int code)166 apm_strerror(int code)
167 {
168 	switch (code) {
169 	case APM_ERR_PM_DISABLED:
170 		return ("power management disabled");
171 	case APM_ERR_REALALREADY:
172 		return ("real mode interface already connected");
173 	case APM_ERR_NOTCONN:
174 		return ("interface not connected");
175 	case APM_ERR_16ALREADY:
176 		return ("16-bit interface already connected");
177 	case APM_ERR_16NOTSUPP:
178 		return ("16-bit interface not supported");
179 	case APM_ERR_32ALREADY:
180 		return ("32-bit interface already connected");
181 	case APM_ERR_32NOTSUPP:
182 		return ("32-bit interface not supported");
183 	case APM_ERR_UNRECOG_DEV:
184 		return ("unrecognized device ID");
185 	case APM_ERR_ERANGE:
186 		return ("parameter out of range");
187 	case APM_ERR_NOTENGAGED:
188 		return ("interface not engaged");
189 	case APM_ERR_UNABLE:
190 		return ("unable to enter requested state");
191 	case APM_ERR_NOEVENTS:
192 		return ("no pending events");
193 	case APM_ERR_NOT_PRESENT:
194 		return ("no APM present");
195 	default:
196 		return ("unknown error code");
197 	}
198 }
199 
200 static void
apm_perror(const char * str,int errinfo,...)201 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
202 {
203 	va_list ap;
204 
205 	printf("APM ");
206 
207 	va_start(ap, errinfo);
208 	vprintf(str, ap);			/* XXX cgd */
209 	va_end(ap);
210 
211 	printf(": %s\n", apm_strerror(errinfo));
212 }
213 
214 #ifdef APM_POWER_PRINT
215 static void
apm_power_print(struct apm_softc * sc,struct apm_power_info * pi)216 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
217 {
218 
219 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
220 		aprint_normal_dev(sc->sc_dev,
221 		    "battery life expectancy: %d%%\n",
222 		    pi->battery_life);
223 	}
224 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
225 	switch (pi->ac_state) {
226 	case APM_AC_OFF:
227 		printf("off\n");
228 		break;
229 	case APM_AC_ON:
230 		printf("on\n");
231 		break;
232 	case APM_AC_BACKUP:
233 		printf("backup power\n");
234 		break;
235 	default:
236 	case APM_AC_UNKNOWN:
237 		printf("unknown\n");
238 		break;
239 	}
240 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
241 	if (apm_minver == 0)
242 		switch (pi->battery_state) {
243 		case APM_BATT_HIGH:
244 			printf("high\n");
245 			break;
246 		case APM_BATT_LOW:
247 			printf("low\n");
248 			break;
249 		case APM_BATT_CRITICAL:
250 			printf("critical\n");
251 			break;
252 		case APM_BATT_CHARGING:
253 			printf("charging\n");
254 			break;
255 		case APM_BATT_UNKNOWN:
256 			printf("unknown\n");
257 			break;
258 		default:
259 			printf("undecoded state %x\n", pi->battery_state);
260 			break;
261 		}
262 	else if (apm_minver >= 1) {
263 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
264 			printf(" no battery");
265 		else {
266 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
267 				printf(" high");
268 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
269 				printf(" low");
270 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
271 				printf(" critical");
272 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
273 				printf(" charging");
274 		}
275 		printf("\n");
276 		if (pi->minutes_valid) {
277 			aprint_normal_dev(sc->sc_dev, "estimated ");
278 			if (pi->minutes_left / 60)
279 				printf("%dh ", pi->minutes_left / 60);
280 			printf("%dm\n", pi->minutes_left % 60);
281 		}
282 	}
283 	return;
284 }
285 #endif
286 
287 static void
apm_suspend(struct apm_softc * sc)288 apm_suspend(struct apm_softc *sc)
289 {
290 	int error;
291 
292 	if (sc->sc_power_state == PWR_SUSPEND) {
293 #ifdef APMDEBUG
294 		aprint_debug_dev(sc->sc_dev,
295 		    "apm_suspend: already suspended?\n");
296 #endif
297 		return;
298 	}
299 	sc->sc_power_state = PWR_SUSPEND;
300 
301 	dopowerhooks(PWR_SOFTSUSPEND);
302 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
303 
304 	apm_spl = splhigh();
305 
306 	dopowerhooks(PWR_SUSPEND);
307 
308 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
309 	    APM_SYS_SUSPEND);
310 
311 	if (error)
312 		apm_resume(sc, 0, 0);
313 }
314 
315 static void
apm_standby(struct apm_softc * sc)316 apm_standby(struct apm_softc *sc)
317 {
318 	int error;
319 
320 	if (sc->sc_power_state == PWR_STANDBY) {
321 #ifdef APMDEBUG
322 		aprint_debug_dev(sc->sc_dev,
323 		    "apm_standby: already standing by?\n");
324 #endif
325 		return;
326 	}
327 	sc->sc_power_state = PWR_STANDBY;
328 
329 	dopowerhooks(PWR_SOFTSTANDBY);
330 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
331 
332 	apm_spl = splhigh();
333 
334 	dopowerhooks(PWR_STANDBY);
335 
336 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
337 	    APM_SYS_STANDBY);
338 	if (error)
339 		apm_resume(sc, 0, 0);
340 }
341 
342 static void
apm_resume(struct apm_softc * sc,u_int event_type,u_int event_info)343 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
344 {
345 
346 	if (sc->sc_power_state == PWR_RESUME) {
347 #ifdef APMDEBUG
348 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
349 #endif
350 		return;
351 	}
352 	sc->sc_power_state = PWR_RESUME;
353 
354 #if 0 /* XXX: def TIME_FREQ */
355 	/*
356 	 * Some system requires its clock to be initialized after hybernation.
357 	 */
358 	initrtclock(TIMER_FREQ);
359 #endif
360 
361 	inittodr(time_second);
362 	dopowerhooks(PWR_RESUME);
363 
364 	splx(apm_spl);
365 
366 	dopowerhooks(PWR_SOFTRESUME);
367 
368 	apm_record_event(sc, event_type);
369 }
370 
371 /*
372  * return 0 if the user will notice and handle the event,
373  * return 1 if the kernel driver should do so.
374  */
375 static int
apm_record_event(struct apm_softc * sc,u_int event_type)376 apm_record_event(struct apm_softc *sc, u_int event_type)
377 {
378 	struct apm_event_info *evp;
379 
380 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
381 		return 1;		/* no user waiting */
382 	if (sc->sc_event_count == APM_NEVENTS)
383 		return 1;			/* overflow */
384 	evp = &sc->sc_event_list[sc->sc_event_ptr];
385 	sc->sc_event_count++;
386 	sc->sc_event_ptr++;
387 	sc->sc_event_ptr %= APM_NEVENTS;
388 	evp->type = event_type;
389 	evp->index = ++apm_evindex;
390 	selnotify(&sc->sc_rsel, 0, 0);
391 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
392 }
393 
394 static void
apm_event_handle(struct apm_softc * sc,u_int event_code,u_int event_info)395 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
396 {
397 	int error;
398 	const char *code;
399 	struct apm_power_info pi;
400 
401 	switch (event_code) {
402 	case APM_USER_STANDBY_REQ:
403 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
404 		if (apm_do_standby) {
405 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
406 				apm_userstandbys++;
407 			apm_op_inprog++;
408 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
409 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
410 		} else {
411 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
412 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
413 			/* in case BIOS hates being spurned */
414 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
415 		}
416 		break;
417 
418 	case APM_STANDBY_REQ:
419 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
420 		if (apm_standbys || apm_suspends) {
421 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
422 			    ("damn fool BIOS did not wait for answer\n"));
423 			/* just give up the fight */
424 			apm_damn_fool_bios = 1;
425 		}
426 		if (apm_do_standby) {
427 			if (apm_op_inprog == 0 &&
428 			    apm_record_event(sc, event_code))
429 				apm_standbys++;
430 			apm_op_inprog++;
431 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
432 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
433 		} else {
434 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
435 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
436 			/* in case BIOS hates being spurned */
437 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
438 		}
439 		break;
440 
441 	case APM_USER_SUSPEND_REQ:
442 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
443 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
444 			apm_suspends++;
445 		apm_op_inprog++;
446 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
447 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
448 		break;
449 
450 	case APM_SUSPEND_REQ:
451 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
452 		if (apm_standbys || apm_suspends) {
453 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
454 			    ("damn fool BIOS did not wait for answer\n"));
455 			/* just give up the fight */
456 			apm_damn_fool_bios = 1;
457 		}
458 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
459 			apm_suspends++;
460 		apm_op_inprog++;
461 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
462 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
463 		break;
464 
465 	case APM_POWER_CHANGE:
466 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
467 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
468 #ifdef APM_POWER_PRINT
469 		/* only print if nobody is catching events. */
470 		if (error == 0 &&
471 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
472 			apm_power_print(sc, &pi);
473 #else
474 		__USE(error);
475 #endif
476 		apm_record_event(sc, event_code);
477 		break;
478 
479 	case APM_NORMAL_RESUME:
480 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
481 		apm_resume(sc, event_code, event_info);
482 		break;
483 
484 	case APM_CRIT_RESUME:
485 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
486 		apm_resume(sc, event_code, event_info);
487 		break;
488 
489 	case APM_SYS_STANDBY_RESUME:
490 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
491 		apm_resume(sc, event_code, event_info);
492 		break;
493 
494 	case APM_UPDATE_TIME:
495 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
496 		apm_resume(sc, event_code, event_info);
497 		break;
498 
499 	case APM_CRIT_SUSPEND_REQ:
500 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
501 		apm_record_event(sc, event_code);
502 		apm_suspend(sc);
503 		break;
504 
505 	case APM_BATTERY_LOW:
506 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
507 		apm_battlow++;
508 		apm_record_event(sc, event_code);
509 		break;
510 
511 	case APM_CAP_CHANGE:
512 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
513 		if (apm_minver < 2) {
514 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
515 		} else {
516 			u_int numbatts, capflags;
517 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
518 			    &numbatts, &capflags);
519 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
520 		}
521 		break;
522 
523 	default:
524 		switch (event_code >> 8) {
525 			case 0:
526 				code = "reserved system";
527 				break;
528 			case 1:
529 				code = "reserved device";
530 				break;
531 			case 2:
532 				code = "OEM defined";
533 				break;
534 			default:
535 				code = "reserved";
536 				break;
537 		}
538 		printf("APM: %s event code %x\n", code, event_code);
539 	}
540 }
541 
542 static void
apm_periodic_check(struct apm_softc * sc)543 apm_periodic_check(struct apm_softc *sc)
544 {
545 	int error;
546 	u_int event_code, event_info;
547 
548 
549 	/*
550 	 * tell the BIOS we're working on it, if asked to do a
551 	 * suspend/standby
552 	 */
553 	if (apm_op_inprog)
554 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
555 		    APM_LASTREQ_INPROG);
556 
557 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
558 	    &event_info)) == 0 && !apm_damn_fool_bios)
559 		apm_event_handle(sc, event_code, event_info);
560 
561 	if (error != APM_ERR_NOEVENTS)
562 		apm_perror("get event", error);
563 	if (apm_suspends) {
564 		apm_op_inprog = 0;
565 		apm_suspend(sc);
566 	} else if (apm_standbys || apm_userstandbys) {
567 		apm_op_inprog = 0;
568 		apm_standby(sc);
569 	}
570 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
571 	apm_damn_fool_bios = 0;
572 }
573 
574 static void
apm_set_ver(struct apm_softc * sc)575 apm_set_ver(struct apm_softc *sc)
576 {
577 
578 	if (apm_v12_enabled &&
579 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
580 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
581 		apm_majver = 1;
582 		apm_minver = 2;
583 		goto ok;
584 	}
585 
586 	if (apm_v11_enabled &&
587 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
588 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
589 		apm_majver = 1;
590 		apm_minver = 1;
591 	} else {
592 		apm_majver = 1;
593 		apm_minver = 0;
594 	}
595 ok:
596 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
597 	apm_inited = 1;
598 }
599 
600 static int
apmdevmatch(device_t parent,cfdata_t match,void * aux)601 apmdevmatch(device_t parent, cfdata_t match, void *aux)
602 {
603 
604 	return apm_match();
605 }
606 
607 static void
apmdevattach(device_t parent,device_t self,void * aux)608 apmdevattach(device_t parent, device_t self, void *aux)
609 {
610 	struct apm_softc *sc;
611 	struct apmdev_attach_args *aaa = aux;
612 
613 	sc = device_private(self);
614 	sc->sc_dev = self;
615 
616 	sc->sc_detail = aaa->apm_detail;
617 	sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
618 
619 	sc->sc_ops = aaa->accessops;
620 	sc->sc_cookie = aaa->accesscookie;
621 
622 	apm_attach(sc);
623 }
624 
625 /*
626  * Print function (for parent devices).
627  */
628 int
apmprint(void * aux,const char * pnp)629 apmprint(void *aux, const char *pnp)
630 {
631 	if (pnp)
632 		aprint_normal("apm at %s", pnp);
633 
634 	return (UNCONF);
635 }
636 
637 int
apm_match(void)638 apm_match(void)
639 {
640 	static int got;
641 	return !got++;
642 }
643 
644 void
apm_attach(struct apm_softc * sc)645 apm_attach(struct apm_softc *sc)
646 {
647 	struct apm_power_info pinfo;
648 	u_int numbatts, capflags;
649 	int error;
650 
651 	aprint_naive("\n");
652 	aprint_normal(": ");
653 
654 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
655 	case 0x0100:
656 		apm_v11_enabled = 0;
657 		apm_v12_enabled = 0;
658 		break;
659 	case 0x0101:
660 		apm_v12_enabled = 0;
661 		/* fall through */
662 	case 0x0102:
663 	default:
664 		break;
665 	}
666 
667 	apm_set_ver(sc);	/* prints version info */
668 	aprint_normal("\n");
669 	if (apm_minver >= 2)
670 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
671 		    &capflags);
672 
673 	/*
674 	 * enable power management
675 	 */
676 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
677 
678 	error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
679 	if (error == 0) {
680 #ifdef APM_POWER_PRINT
681 		apm_power_print(sc, &pinfo);
682 #endif
683 	} else
684 		apm_perror("get power status", error);
685 
686 	if (sc->sc_ops->aa_cpu_busy)
687 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
688 
689 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
690 
691 	/* Initial state is `resumed'. */
692 	sc->sc_power_state = PWR_RESUME;
693 	selinit(&sc->sc_rsel);
694 	selinit(&sc->sc_xsel);
695 
696 	/* Do an initial check. */
697 	apm_periodic_check(sc);
698 
699 	/*
700 	 * Create a kernel thread to periodically check for APM events,
701 	 * and notify other subsystems when they occur.
702 	 */
703 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
704 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
705 		/*
706 		 * We were unable to create the APM thread; bail out.
707 		 */
708 		if (sc->sc_ops->aa_disconnect)
709 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
710 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
711 		    "kernel APM support disabled\n");
712 	}
713 }
714 
715 void
apm_thread(void * arg)716 apm_thread(void *arg)
717 {
718 	struct apm_softc *apmsc = arg;
719 
720 	/*
721 	 * Loop forever, doing a periodic check for APM events.
722 	 */
723 	for (;;) {
724 		APM_LOCK(apmsc);
725 		apm_periodic_check(apmsc);
726 		APM_UNLOCK(apmsc);
727 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
728 	}
729 }
730 
731 int
apmdevopen(dev_t dev,int flag,int mode,struct lwp * l)732 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
733 {
734 	int ctl = APM(dev);
735 	int error = 0;
736 	struct apm_softc *sc;
737 
738 	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
739 	if (!sc)
740 		return ENXIO;
741 
742 	if (!apm_inited)
743 		return ENXIO;
744 
745 	DPRINTF(APMDEBUG_DEVICE,
746 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
747 
748 	APM_LOCK(sc);
749 	switch (ctl) {
750 	case APM_CTL:
751 		if (!(flag & FWRITE)) {
752 			error = EINVAL;
753 			break;
754 		}
755 		if (sc->sc_flags & SCFLAG_OWRITE) {
756 			error = EBUSY;
757 			break;
758 		}
759 		sc->sc_flags |= SCFLAG_OWRITE;
760 		break;
761 	case APM_NORMAL:
762 		if (!(flag & FREAD) || (flag & FWRITE)) {
763 			error = EINVAL;
764 			break;
765 		}
766 		sc->sc_flags |= SCFLAG_OREAD;
767 		break;
768 	default:
769 		error = ENXIO;
770 		break;
771 	}
772 	APM_UNLOCK(sc);
773 
774 	return (error);
775 }
776 
777 int
apmdevclose(dev_t dev,int flag,int mode,struct lwp * l)778 apmdevclose(dev_t dev, int flag, int mode,
779 	    struct lwp *l)
780 {
781 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
782 	int ctl = APM(dev);
783 
784 	DPRINTF(APMDEBUG_DEVICE,
785 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
786 
787 	APM_LOCK(sc);
788 	switch (ctl) {
789 	case APM_CTL:
790 		sc->sc_flags &= ~SCFLAG_OWRITE;
791 		break;
792 	case APM_NORMAL:
793 		sc->sc_flags &= ~SCFLAG_OREAD;
794 		break;
795 	}
796 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
797 		sc->sc_event_count = 0;
798 		sc->sc_event_ptr = 0;
799 	}
800 	APM_UNLOCK(sc);
801 	return 0;
802 }
803 
804 int
apmdevioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)805 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
806 	    struct lwp *l)
807 {
808 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
809 	struct apm_power_info *powerp;
810 	struct apm_event_info *evp;
811 #if 0
812 	struct apm_ctl *actl;
813 #endif
814 	int i, error = 0;
815 	int batt_flags;
816 
817 	APM_LOCK(sc);
818 	switch (cmd) {
819 	case APM_IOC_STANDBY:
820 		if (!apm_do_standby) {
821 			error = EOPNOTSUPP;
822 			break;
823 		}
824 
825 		if ((flag & FWRITE) == 0) {
826 			error = EBADF;
827 			break;
828 		}
829 		apm_userstandbys++;
830 		break;
831 
832 	case APM_IOC_SUSPEND:
833 		if ((flag & FWRITE) == 0) {
834 			error = EBADF;
835 			break;
836 		}
837 		apm_suspends++;
838 		break;
839 
840 	case APM_IOC_NEXTEVENT:
841 		if (!sc->sc_event_count)
842 			error = EAGAIN;
843 		else {
844 			evp = (struct apm_event_info *)data;
845 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
846 			i %= APM_NEVENTS;
847 			*evp = sc->sc_event_list[i];
848 			sc->sc_event_count--;
849 		}
850 		break;
851 
852 	case OAPM_IOC_GETPOWER:
853 	case APM_IOC_GETPOWER:
854 		powerp = (struct apm_power_info *)data;
855 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
856 		    powerp)) != 0) {
857 			apm_perror("ioctl get power status", error);
858 			error = EIO;
859 			break;
860 		}
861 		switch (apm_minver) {
862 		case 0:
863 			break;
864 		case 1:
865 		default:
866 			batt_flags = powerp->battery_flags;
867 			powerp->battery_state = APM_BATT_UNKNOWN;
868 			if (batt_flags & APM_BATT_FLAG_HIGH)
869 				powerp->battery_state = APM_BATT_HIGH;
870 			else if (batt_flags & APM_BATT_FLAG_LOW)
871 				powerp->battery_state = APM_BATT_LOW;
872 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
873 				powerp->battery_state = APM_BATT_CRITICAL;
874 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
875 				powerp->battery_state = APM_BATT_CHARGING;
876 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
877 				powerp->battery_state = APM_BATT_ABSENT;
878 			break;
879 		}
880 		break;
881 
882 	default:
883 		error = ENOTTY;
884 	}
885 	APM_UNLOCK(sc);
886 
887 	return (error);
888 }
889 
890 int
apmdevpoll(dev_t dev,int events,struct lwp * l)891 apmdevpoll(dev_t dev, int events, struct lwp *l)
892 {
893 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
894 	int revents = 0;
895 
896 	APM_LOCK(sc);
897 	if (events & (POLLIN | POLLRDNORM)) {
898 		if (sc->sc_event_count)
899 			revents |= events & (POLLIN | POLLRDNORM);
900 		else
901 			selrecord(l, &sc->sc_rsel);
902 	}
903 	APM_UNLOCK(sc);
904 
905 	return (revents);
906 }
907 
908 static void
filt_apmrdetach(struct knote * kn)909 filt_apmrdetach(struct knote *kn)
910 {
911 	struct apm_softc *sc = kn->kn_hook;
912 
913 	APM_LOCK(sc);
914 	selremove_knote(&sc->sc_rsel, kn);
915 	APM_UNLOCK(sc);
916 }
917 
918 static int
filt_apmread(struct knote * kn,long hint)919 filt_apmread(struct knote *kn, long hint)
920 {
921 	struct apm_softc *sc = kn->kn_hook;
922 
923 	kn->kn_data = sc->sc_event_count;
924 	return (kn->kn_data > 0);
925 }
926 
927 static const struct filterops apmread_filtops = {
928 	.f_flags = FILTEROP_ISFD,
929 	.f_attach = NULL,
930 	.f_detach = filt_apmrdetach,
931 	.f_event = filt_apmread,
932 };
933 
934 int
apmdevkqfilter(dev_t dev,struct knote * kn)935 apmdevkqfilter(dev_t dev, struct knote *kn)
936 {
937 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
938 
939 	switch (kn->kn_filter) {
940 	case EVFILT_READ:
941 		kn->kn_fop = &apmread_filtops;
942 		break;
943 
944 	default:
945 		return (EINVAL);
946 	}
947 
948 	kn->kn_hook = sc;
949 
950 	APM_LOCK(sc);
951 	selrecord_knote(&sc->sc_rsel, kn);
952 	APM_UNLOCK(sc);
953 
954 	return (0);
955 }
956