xref: /openbsd-src/sys/dev/acpi/acpi.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /* $OpenBSD: acpi.c,v 1.136 2009/04/19 21:33:43 krw Exp $ */
2 /*
3  * Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
4  * Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/malloc.h>
23 #include <sys/fcntl.h>
24 #include <sys/ioccom.h>
25 #include <sys/event.h>
26 #include <sys/signalvar.h>
27 #include <sys/proc.h>
28 #include <sys/kthread.h>
29 #include <sys/workq.h>
30 
31 #include <machine/conf.h>
32 #include <machine/cpufunc.h>
33 #include <machine/bus.h>
34 
35 #include <dev/pci/pcivar.h>
36 #include <dev/acpi/acpireg.h>
37 #include <dev/acpi/acpivar.h>
38 #include <dev/acpi/amltypes.h>
39 #include <dev/acpi/acpidev.h>
40 #include <dev/acpi/dsdt.h>
41 
42 #include <machine/apmvar.h>
43 #define APMUNIT(dev)	(minor(dev)&0xf0)
44 #define APMDEV(dev)	(minor(dev)&0x0f)
45 #define APMDEV_NORMAL	0
46 #define APMDEV_CTL	8
47 
48 #ifdef ACPI_DEBUG
49 int acpi_debug = 16;
50 #endif
51 int acpi_enabled;
52 int acpi_poll_enabled;
53 int acpi_hasprocfvs;
54 
55 #define ACPIEN_RETRIES 15
56 
57 void	acpi_isr_thread(void *);
58 void	acpi_create_thread(void *);
59 
60 int	acpi_match(struct device *, void *, void *);
61 void	acpi_attach(struct device *, struct device *, void *);
62 int	acpi_submatch(struct device *, void *, void *);
63 int	acpi_print(void *, const char *);
64 
65 void	acpi_map_pmregs(struct acpi_softc *);
66 
67 int	acpi_founddock(struct aml_node *, void *);
68 int	acpi_foundpss(struct aml_node *, void *);
69 int	acpi_foundhid(struct aml_node *, void *);
70 int	acpi_foundec(struct aml_node *, void *);
71 int	acpi_foundtmp(struct aml_node *, void *);
72 int	acpi_foundprt(struct aml_node *, void *);
73 int	acpi_foundprw(struct aml_node *, void *);
74 int	acpi_foundvideo(struct aml_node *, void *);
75 int	acpi_inidev(struct aml_node *, void *);
76 
77 int	acpi_loadtables(struct acpi_softc *, struct acpi_rsdp *);
78 void	acpi_load_table(paddr_t, size_t, acpi_qhead_t *);
79 void	acpi_load_dsdt(paddr_t, struct acpi_q **);
80 
81 void	acpi_init_states(struct acpi_softc *);
82 void	acpi_init_gpes(struct acpi_softc *);
83 void	acpi_init_pm(struct acpi_softc *);
84 
85 void	acpi_dev_sort(void);
86 void	acpi_dev_free(void);
87 
88 #ifdef ACPI_SLEEP_ENABLED
89 void acpi_sleep_walk(struct acpi_softc *, int);
90 #endif /* ACPI_SLEEP_ENABLED */
91 
92 #ifndef SMALL_KERNEL
93 int acpi_add_device(struct aml_node *node, void *arg);
94 #endif /* SMALL_KERNEL */
95 
96 void	acpi_enable_onegpe(struct acpi_softc *, int, int);
97 int	acpi_gpe_level(struct acpi_softc *, int, void *);
98 int	acpi_gpe_edge(struct acpi_softc *, int, void *);
99 
100 struct gpe_block *acpi_find_gpe(struct acpi_softc *, int);
101 
102 #define	ACPI_LOCK(sc)
103 #define	ACPI_UNLOCK(sc)
104 
105 /* XXX move this into dsdt softc at some point */
106 extern struct aml_node aml_root;
107 
108 /* XXX do we need this? */
109 void	acpi_filtdetach(struct knote *);
110 int	acpi_filtread(struct knote *, long);
111 
112 struct filterops acpiread_filtops = {
113 	1, NULL, acpi_filtdetach, acpi_filtread
114 };
115 
116 struct cfattach acpi_ca = {
117 	sizeof(struct acpi_softc), acpi_match, acpi_attach
118 };
119 
120 struct cfdriver acpi_cd = {
121 	NULL, "acpi", DV_DULL
122 };
123 
124 struct acpi_softc *acpi_softc;
125 int acpi_evindex;
126 
127 #define acpi_bus_space_map	_bus_space_map
128 #define acpi_bus_space_unmap	_bus_space_unmap
129 
130 #define pch(x) (((x)>=' ' && (x)<='z') ? (x) : ' ')
131 
132 #if 0
133 void
134 acpi_delay(struct acpi_softc *sc, int64_t uSecs)
135 {
136 	/* XXX this needs to become a tsleep later */
137 	delay(uSecs);
138 }
139 #endif
140 
141 int
142 acpi_gasio(struct acpi_softc *sc, int iodir, int iospace, uint64_t address,
143     int access_size, int len, void *buffer)
144 {
145 	u_int8_t *pb;
146 	bus_space_handle_t ioh;
147 	struct acpi_mem_map mh;
148 	pci_chipset_tag_t pc;
149 	pcitag_t tag;
150 	bus_addr_t ioaddr;
151 	int reg, idx, ival, sval;
152 
153 	dnprintf(50, "gasio: %.2x 0x%.8llx %s\n",
154 	    iospace, address, (iodir == ACPI_IOWRITE) ? "write" : "read");
155 
156 	pb = (u_int8_t *)buffer;
157 	switch (iospace) {
158 	case GAS_SYSTEM_MEMORY:
159 		/* copy to/from system memory */
160 		acpi_map(address, len, &mh);
161 		if (iodir == ACPI_IOREAD)
162 			memcpy(buffer, mh.va, len);
163 		else
164 			memcpy(mh.va, buffer, len);
165 		acpi_unmap(&mh);
166 		break;
167 
168 	case GAS_SYSTEM_IOSPACE:
169 		/* read/write from I/O registers */
170 		ioaddr = address;
171 		if (acpi_bus_space_map(sc->sc_iot, ioaddr, len, 0, &ioh) != 0) {
172 			printf("unable to map iospace\n");
173 			return (-1);
174 		}
175 		for (reg = 0; reg < len; reg += access_size) {
176 			if (iodir == ACPI_IOREAD) {
177 				switch (access_size) {
178 				case 1:
179 					*(uint8_t *)(pb+reg) = bus_space_read_1(
180 					    sc->sc_iot, ioh, reg);
181 					dnprintf(80, "os_in8(%llx) = %x\n",
182 					    reg+address, *(uint8_t *)(pb+reg));
183 					break;
184 				case 2:
185 					*(uint16_t *)(pb+reg) = bus_space_read_2(
186 					    sc->sc_iot, ioh, reg);
187 					dnprintf(80, "os_in16(%llx) = %x\n",
188 					    reg+address, *(uint16_t *)(pb+reg));
189 					break;
190 				case 4:
191 					*(uint32_t *)(pb+reg) = bus_space_read_4(
192 					    sc->sc_iot, ioh, reg);
193 					break;
194 				default:
195 					printf("rdio: invalid size %d\n", access_size);
196 					break;
197 				}
198 			} else {
199 				switch (access_size) {
200 				case 1:
201 					bus_space_write_1(sc->sc_iot, ioh, reg,
202 					    *(uint8_t *)(pb+reg));
203 					dnprintf(80, "os_out8(%llx,%x)\n",
204 					    reg+address, *(uint8_t *)(pb+reg));
205 					break;
206 				case 2:
207 					bus_space_write_2(sc->sc_iot, ioh, reg,
208 					    *(uint16_t *)(pb+reg));
209 					dnprintf(80, "os_out16(%llx,%x)\n",
210 					    reg+address, *(uint16_t *)(pb+reg));
211 					break;
212 				case 4:
213 					bus_space_write_4(sc->sc_iot, ioh, reg,
214 					    *(uint32_t *)(pb+reg));
215 					break;
216 				default:
217 					printf("wrio: invalid size %d\n", access_size);
218 					break;
219 				}
220 			}
221 
222 			/* During autoconf some devices are still gathering
223 			 * information.  Delay here to give them an opportunity
224 			 * to finish.  During runtime we simply need to ignore
225 			 * transient values.
226 			 */
227 			if (cold)
228 				delay(10000);
229 		}
230 		acpi_bus_space_unmap(sc->sc_iot, ioh, len, &ioaddr);
231 		break;
232 
233 	case GAS_PCI_CFG_SPACE:
234 		/* format of address:
235 		 *    bits 00..15 = register
236 		 *    bits 16..31 = function
237 		 *    bits 32..47 = device
238 		 *    bits 48..63 = bus
239 		 */
240 		pc = NULL;
241 		tag = pci_make_tag(pc,
242 		    ACPI_PCI_BUS(address), ACPI_PCI_DEV(address),
243 		    ACPI_PCI_FN(address));
244 
245 		/* XXX: This is ugly. read-modify-write does a byte at a time */
246 		reg = ACPI_PCI_REG(address);
247 		for (idx = reg; idx < reg+len; idx++) {
248 			ival = pci_conf_read(pc, tag, idx & ~0x3);
249 			if (iodir == ACPI_IOREAD) {
250 				*pb = ival >> (8 * (idx & 0x3));
251 			} else {
252 				sval = *pb;
253 				ival &= ~(0xFF << (8* (idx & 0x3)));
254 				ival |= sval << (8* (idx & 0x3));
255 				pci_conf_write(pc, tag, idx & ~0x3, ival);
256 			}
257 			pb++;
258 		}
259 		break;
260 	case GAS_EMBEDDED:
261 		if (sc->sc_ec == NULL)
262 			break;
263 #ifndef SMALL_KERNEL
264 		if (iodir == ACPI_IOREAD)
265 			acpiec_read(sc->sc_ec, (u_int8_t)address, len, buffer);
266 		else
267 			acpiec_write(sc->sc_ec, (u_int8_t)address, len, buffer);
268 #endif
269 		break;
270 	}
271 	return (0);
272 }
273 
274 int
275 acpi_inidev(struct aml_node *node, void *arg)
276 {
277 	struct acpi_softc	*sc = (struct acpi_softc *)arg;
278 	int64_t st;
279 
280 	/*
281 	 * Per the ACPI spec 6.5.1, only run _INI when device is there or
282 	 * when there is no _STA.  We terminate the tree walk (with return 1)
283 	 * early if necessary.
284 	 */
285 
286 	/* Evaluate _STA to decide _INI fate and walk fate */
287 	if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st))
288 		st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000;
289 
290 	/* Evaluate _INI if we are present */
291 	if (st & STA_PRESENT)
292 		aml_evalnode(sc, node, 0, NULL, NULL);
293 
294 	/* If we are functioning, we walk/search our children */
295 	if(st & STA_DEV_OK)
296 		return 0;
297 
298 	/* If we are not enabled, or not present, terminate search */
299 	if (!(st & (STA_PRESENT|STA_ENABLED)))
300 		return 1;
301 
302 	/* Default just continue search */
303 	return 0;
304 }
305 
306 int
307 acpi_foundprt(struct aml_node *node, void *arg)
308 {
309 	struct acpi_softc	*sc = (struct acpi_softc *)arg;
310 	struct device		*self = (struct device *)arg;
311 	struct acpi_attach_args	aaa;
312 	int64_t st = 0;
313 
314 	dnprintf(10, "found prt entry: %s\n", node->parent->name);
315 
316 	/* Evaluate _STA to decide _PRT fate and walk fate */
317 	if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st))
318 		st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000;
319 
320 	if (st & STA_PRESENT) {
321 		memset(&aaa, 0, sizeof(aaa));
322 		aaa.aaa_iot = sc->sc_iot;
323 		aaa.aaa_memt = sc->sc_memt;
324 		aaa.aaa_node = node;
325 		aaa.aaa_name = "acpiprt";
326 
327 		config_found(self, &aaa, acpi_print);
328 	}
329 
330 	/* If we are functioning, we walk/search our children */
331 	if(st & STA_DEV_OK)
332 		return 0;
333 
334 	/* If we are not enabled, or not present, terminate search */
335 	if (!(st & (STA_PRESENT|STA_ENABLED)))
336 		return 1;
337 
338 	/* Default just continue search */
339 	return 0;
340 }
341 
342 int
343 acpi_match(struct device *parent, void *match, void *aux)
344 {
345 	struct bios_attach_args	*ba = aux;
346 	struct cfdata		*cf = match;
347 
348 	/* sanity */
349 	if (strcmp(ba->ba_name, cf->cf_driver->cd_name))
350 		return (0);
351 
352 	if (!acpi_probe(parent, cf, ba))
353 		return (0);
354 
355 	return (1);
356 }
357 
358 void
359 acpi_attach(struct device *parent, struct device *self, void *aux)
360 {
361 	struct bios_attach_args *ba = aux;
362 	struct acpi_softc *sc = (struct acpi_softc *)self;
363 	struct acpi_mem_map handle;
364 	struct acpi_rsdp *rsdp;
365 	struct acpi_q *entry;
366 	struct acpi_dsdt *p_dsdt;
367 	int idx;
368 #ifndef SMALL_KERNEL
369 	struct acpi_wakeq *wentry;
370 	struct device *dev;
371 	struct acpi_ac *ac;
372 	struct acpi_bat *bat;
373 #endif /* SMALL_KERNEL */
374 	paddr_t facspa;
375 
376 	sc->sc_iot = ba->ba_iot;
377 	sc->sc_memt = ba->ba_memt;
378 
379 	if (acpi_map(ba->ba_acpipbase, sizeof(struct acpi_rsdp), &handle)) {
380 		printf(": can't map memory\n");
381 		return;
382 	}
383 
384 	rsdp = (struct acpi_rsdp *)handle.va;
385 	sc->sc_revision = (int)rsdp->rsdp_revision;
386 	printf(": rev %d", sc->sc_revision);
387 
388 	SIMPLEQ_INIT(&sc->sc_tables);
389 	SIMPLEQ_INIT(&sc->sc_wakedevs);
390 
391 #ifndef SMALL_KERNEL
392 	sc->sc_note = malloc(sizeof(struct klist), M_DEVBUF, M_NOWAIT | M_ZERO);
393 	if (sc->sc_note == NULL) {
394 		printf(", can't allocate memory\n");
395 		acpi_unmap(&handle);
396 		return;
397 	}
398 #endif /* SMALL_KERNEL */
399 
400 	if (acpi_loadtables(sc, rsdp)) {
401 		printf(", can't load tables\n");
402 		acpi_unmap(&handle);
403 		return;
404 	}
405 
406 	acpi_unmap(&handle);
407 
408 	/*
409 	 * Find the FADT
410 	 */
411 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
412 		if (memcmp(entry->q_table, FADT_SIG,
413 		    sizeof(FADT_SIG) - 1) == 0) {
414 			sc->sc_fadt = entry->q_table;
415 			break;
416 		}
417 	}
418 	if (sc->sc_fadt == NULL) {
419 		printf(", no FADT\n");
420 		return;
421 	}
422 
423 	/*
424 	 * Check if we are able to enable ACPI control
425 	 */
426 	if (!sc->sc_fadt->smi_cmd ||
427 	    (!sc->sc_fadt->acpi_enable && !sc->sc_fadt->acpi_disable)) {
428 		printf(", ACPI control unavailable\n");
429 		return;
430 	}
431 
432 	/*
433 	 * Set up a pointer to the firmware control structure
434 	 */
435 	if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_firmware_ctl == 0)
436 		facspa = sc->sc_fadt->firmware_ctl;
437 	else
438 		facspa = sc->sc_fadt->x_firmware_ctl;
439 
440 	if (acpi_map(facspa, sizeof(struct acpi_facs), &handle))
441 		printf(" !FACS");
442 	else
443 		sc->sc_facs = (struct acpi_facs *)handle.va;
444 
445 	acpi_enabled = 1;
446 
447 	/* Create opcode hashtable */
448 	aml_hashopcodes();
449 
450 	/* Create Default AML objects */
451 	aml_create_defaultobjects();
452 
453 	/*
454 	 * Load the DSDT from the FADT pointer -- use the
455 	 * extended (64-bit) pointer if it exists
456 	 */
457 	if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_dsdt == 0)
458 		acpi_load_dsdt(sc->sc_fadt->dsdt, &entry);
459 	else
460 		acpi_load_dsdt(sc->sc_fadt->x_dsdt, &entry);
461 
462 	if (entry == NULL)
463 		printf(" !DSDT");
464 	SIMPLEQ_INSERT_HEAD(&sc->sc_tables, entry, q_next);
465 
466 	p_dsdt = entry->q_table;
467 	acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length -
468 	    sizeof(p_dsdt->hdr));
469 
470 	/* Load SSDT's */
471 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
472 		if (memcmp(entry->q_table, SSDT_SIG,
473 		    sizeof(SSDT_SIG) - 1) == 0) {
474 			p_dsdt = entry->q_table;
475 			acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length -
476 			    sizeof(p_dsdt->hdr));
477 		}
478 	}
479 
480 	/* Perform post-parsing fixups */
481 	aml_postparse();
482 
483 #ifndef SMALL_KERNEL
484 	/* Find available sleeping states */
485 	acpi_init_states(sc);
486 
487 	/* Find available sleep/resume related methods. */
488 	acpi_init_pm(sc);
489 #endif /* SMALL_KERNEL */
490 
491 	/* Map Power Management registers */
492 	acpi_map_pmregs(sc);
493 
494 #ifndef SMALL_KERNEL
495 	/* Initialize GPE handlers */
496 	acpi_init_gpes(sc);
497 
498 	/* some devices require periodic polling */
499 	timeout_set(&sc->sc_dev_timeout, acpi_poll, sc);
500 #endif /* SMALL_KERNEL */
501 
502 	/*
503 	 * Take over ACPI control.  Note that once we do this, we
504 	 * effectively tell the system that we have ownership of
505 	 * the ACPI hardware registers, and that SMI should leave
506 	 * them alone
507 	 *
508 	 * This may prevent thermal control on some systems where
509 	 * that actually does work
510 	 */
511 	acpi_write_pmreg(sc, ACPIREG_SMICMD, 0, sc->sc_fadt->acpi_enable);
512 	idx = 0;
513 	do {
514 		if (idx++ > ACPIEN_RETRIES) {
515 			printf(", can't enable ACPI\n");
516 			return;
517 		}
518 	} while (!(acpi_read_pmreg(sc, ACPIREG_PM1_CNT, 0) & ACPI_PM1_SCI_EN));
519 
520 	printf("\n%s: tables", DEVNAME(sc));
521 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
522 		printf(" %.4s", entry->q_table);
523 	}
524 	printf("\n");
525 
526 #ifndef SMALL_KERNEL
527 	/* Display wakeup devices and lowest S-state */
528 	printf("%s: wakeup devices", DEVNAME(sc));
529 	SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) {
530 		printf(" %.4s(S%d)", wentry->q_node->name,
531 		    wentry->q_state);
532 	}
533 	printf("\n");
534 
535 
536 	/*
537 	 * ACPI is enabled now -- attach timer
538 	 */
539 	{
540 		struct acpi_attach_args aaa;
541 
542 		memset(&aaa, 0, sizeof(aaa));
543 		aaa.aaa_name = "acpitimer";
544 		aaa.aaa_iot = sc->sc_iot;
545 		aaa.aaa_memt = sc->sc_memt;
546 #if 0
547 		aaa.aaa_pcit = sc->sc_pcit;
548 		aaa.aaa_smbust = sc->sc_smbust;
549 #endif
550 		config_found(self, &aaa, acpi_print);
551 	}
552 #endif /* SMALL_KERNEL */
553 
554 	/*
555 	 * Attach table-defined devices
556 	 */
557 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
558 		struct acpi_attach_args aaa;
559 
560 		memset(&aaa, 0, sizeof(aaa));
561 		aaa.aaa_iot = sc->sc_iot;
562 		aaa.aaa_memt = sc->sc_memt;
563 	#if 0
564 		aaa.aaa_pcit = sc->sc_pcit;
565 		aaa.aaa_smbust = sc->sc_smbust;
566 	#endif
567 		aaa.aaa_table = entry->q_table;
568 		config_found_sm(self, &aaa, acpi_print, acpi_submatch);
569 	}
570 
571 	acpi_softc = sc;
572 
573 	/* initialize runtime environment */
574 	aml_find_node(&aml_root, "_INI", acpi_inidev, sc);
575 
576 	/* attach pci interrupt routing tables */
577 	aml_find_node(&aml_root, "_PRT", acpi_foundprt, sc);
578 
579 #ifndef SMALL_KERNEL
580 	 /* XXX EC needs to be attached first on some systems */
581 	aml_find_node(&aml_root, "_HID", acpi_foundec, sc);
582 
583 	aml_walknodes(&aml_root, AML_WALK_PRE, acpi_add_device, sc);
584 
585 	/* attach battery, power supply and button devices */
586 	aml_find_node(&aml_root, "_HID", acpi_foundhid, sc);
587 
588 	/* attach docks */
589 	aml_find_node(&aml_root, "_DCK", acpi_founddock, sc);
590 
591 	/* attach video */
592 	aml_find_node(&aml_root, "_DOS", acpi_foundvideo, sc);
593 
594 	/* create list of devices we want to query when APM come in */
595 	SLIST_INIT(&sc->sc_ac);
596 	SLIST_INIT(&sc->sc_bat);
597 	TAILQ_FOREACH(dev, &alldevs, dv_list) {
598 		if (!strncmp(dev->dv_xname, "acpiac", strlen("acpiac"))) {
599 			ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
600 			ac->aac_softc = (struct acpiac_softc *)dev;
601 			SLIST_INSERT_HEAD(&sc->sc_ac, ac, aac_link);
602 		}
603 		if (!strncmp(dev->dv_xname, "acpibat", strlen("acpibat"))) {
604 			bat = malloc(sizeof(*bat), M_DEVBUF, M_WAITOK | M_ZERO);
605 			bat->aba_softc = (struct acpibat_softc *)dev;
606 			SLIST_INSERT_HEAD(&sc->sc_bat, bat, aba_link);
607 		}
608 	}
609 
610 	/* Setup threads */
611 	sc->sc_thread = malloc(sizeof(struct acpi_thread), M_DEVBUF, M_WAITOK);
612 	sc->sc_thread->sc = sc;
613 	sc->sc_thread->running = 1;
614 
615 	acpi_attach_machdep(sc);
616 
617 	kthread_create_deferred(acpi_create_thread, sc);
618 #endif /* SMALL_KERNEL */
619 }
620 
621 int
622 acpi_submatch(struct device *parent, void *match, void *aux)
623 {
624 	struct acpi_attach_args *aaa = (struct acpi_attach_args *)aux;
625 	struct cfdata *cf = match;
626 
627 	if (aaa->aaa_table == NULL)
628 		return (0);
629 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
630 }
631 
632 int
633 acpi_print(void *aux, const char *pnp)
634 {
635 	struct acpi_attach_args *aa = aux;
636 
637 	if (pnp) {
638 		if (aa->aaa_name)
639 			printf("%s at %s", aa->aaa_name, pnp);
640 		else
641 			return (QUIET);
642 	}
643 
644 	return (UNCONF);
645 }
646 
647 int
648 acpi_loadtables(struct acpi_softc *sc, struct acpi_rsdp *rsdp)
649 {
650 	struct acpi_mem_map hrsdt, handle;
651 	struct acpi_table_header *hdr;
652 	int i, ntables;
653 	size_t len;
654 
655 	if (rsdp->rsdp_revision == 2 && rsdp->rsdp_xsdt) {
656 		struct acpi_xsdt *xsdt;
657 
658 		if (acpi_map(rsdp->rsdp_xsdt, sizeof(*hdr), &handle)) {
659 			printf("couldn't map rsdt\n");
660 			return (ENOMEM);
661 		}
662 
663 		hdr = (struct acpi_table_header *)handle.va;
664 		len = hdr->length;
665 		acpi_unmap(&handle);
666 		hdr = NULL;
667 
668 		acpi_map(rsdp->rsdp_xsdt, len, &hrsdt);
669 		xsdt = (struct acpi_xsdt *)hrsdt.va;
670 
671 		ntables = (len - sizeof(struct acpi_table_header)) /
672 		    sizeof(xsdt->table_offsets[0]);
673 
674 		for (i = 0; i < ntables; i++) {
675 			acpi_map(xsdt->table_offsets[i], sizeof(*hdr), &handle);
676 			hdr = (struct acpi_table_header *)handle.va;
677 			acpi_load_table(xsdt->table_offsets[i], hdr->length,
678 			    &sc->sc_tables);
679 			acpi_unmap(&handle);
680 		}
681 		acpi_unmap(&hrsdt);
682 	} else {
683 		struct acpi_rsdt *rsdt;
684 
685 		if (acpi_map(rsdp->rsdp_rsdt, sizeof(*hdr), &handle)) {
686 			printf("couldn't map rsdt\n");
687 			return (ENOMEM);
688 		}
689 
690 		hdr = (struct acpi_table_header *)handle.va;
691 		len = hdr->length;
692 		acpi_unmap(&handle);
693 		hdr = NULL;
694 
695 		acpi_map(rsdp->rsdp_rsdt, len, &hrsdt);
696 		rsdt = (struct acpi_rsdt *)hrsdt.va;
697 
698 		ntables = (len - sizeof(struct acpi_table_header)) /
699 		    sizeof(rsdt->table_offsets[0]);
700 
701 		for (i = 0; i < ntables; i++) {
702 			acpi_map(rsdt->table_offsets[i], sizeof(*hdr), &handle);
703 			hdr = (struct acpi_table_header *)handle.va;
704 			acpi_load_table(rsdt->table_offsets[i], hdr->length,
705 			    &sc->sc_tables);
706 			acpi_unmap(&handle);
707 		}
708 		acpi_unmap(&hrsdt);
709 	}
710 
711 	return (0);
712 }
713 
714 void
715 acpi_load_table(paddr_t pa, size_t len, acpi_qhead_t *queue)
716 {
717 	struct acpi_mem_map handle;
718 	struct acpi_q *entry;
719 
720 	entry = malloc(len + sizeof(struct acpi_q), M_DEVBUF, M_NOWAIT);
721 
722 	if (entry != NULL) {
723 		if (acpi_map(pa, len, &handle)) {
724 			free(entry, M_DEVBUF);
725 			return;
726 		}
727 		memcpy(entry->q_data, handle.va, len);
728 		entry->q_table = entry->q_data;
729 		acpi_unmap(&handle);
730 		SIMPLEQ_INSERT_TAIL(queue, entry, q_next);
731 	}
732 }
733 
734 void
735 acpi_load_dsdt(paddr_t pa, struct acpi_q **dsdt)
736 {
737 	struct acpi_mem_map handle;
738 	struct acpi_table_header *hdr;
739 	size_t len;
740 
741 	if (acpi_map(pa, sizeof(*hdr), &handle))
742 		return;
743 	hdr = (struct acpi_table_header *)handle.va;
744 	len = hdr->length;
745 	acpi_unmap(&handle);
746 
747 	*dsdt = malloc(len + sizeof(struct acpi_q), M_DEVBUF, M_NOWAIT);
748 
749 	if (*dsdt != NULL) {
750 		if (acpi_map(pa, len, &handle)) {
751 			free(*dsdt, M_DEVBUF);
752 			*dsdt = NULL;
753 			return;
754 		}
755 		memcpy((*dsdt)->q_data, handle.va, len);
756 		(*dsdt)->q_table = (*dsdt)->q_data;
757 		acpi_unmap(&handle);
758 	}
759 }
760 
761 int
762 acpiopen(dev_t dev, int flag, int mode, struct proc *p)
763 {
764 	int error = 0;
765 #ifndef SMALL_KERNEL
766 	struct acpi_softc *sc;
767 
768 	if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 ||
769 	    !(sc = acpi_cd.cd_devs[APMUNIT(dev)]))
770 		return (ENXIO);
771 
772 	switch (APMDEV(dev)) {
773 	case APMDEV_CTL:
774 		if (!(flag & FWRITE)) {
775 			error = EINVAL;
776 			break;
777 		}
778 		break;
779 	case APMDEV_NORMAL:
780 		if (!(flag & FREAD) || (flag & FWRITE)) {
781 			error = EINVAL;
782 			break;
783 		}
784 		break;
785 	default:
786 		error = ENXIO;
787 		break;
788 	}
789 #else
790 	error = ENXIO;
791 #endif
792 	return (error);
793 }
794 
795 int
796 acpiclose(dev_t dev, int flag, int mode, struct proc *p)
797 {
798 	int error = 0;
799 #ifndef SMALL_KERNEL
800 	struct acpi_softc *sc;
801 
802 	if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 ||
803 	    !(sc = acpi_cd.cd_devs[APMUNIT(dev)]))
804 		return (ENXIO);
805 	switch (APMDEV(dev)) {
806 	case APMDEV_CTL:
807 	case APMDEV_NORMAL:
808 		break;
809 	default:
810 		error = ENXIO;
811 		break;
812 	}
813 #else
814 	error = ENXIO;
815 #endif
816 	return (error);
817 }
818 
819 int
820 acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
821 {
822 	int error = 0;
823 #ifndef SMALL_KERNEL
824 	struct acpi_softc *sc;
825 	struct acpi_ac *ac;
826 	struct acpi_bat *bat;
827 	struct apm_power_info *pi = (struct apm_power_info *)data;
828 	int bats;
829 	unsigned int remaining, rem, minutes, rate;
830 
831 	if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 ||
832 	    !(sc = acpi_cd.cd_devs[APMUNIT(dev)]))
833 		return (ENXIO);
834 
835 	ACPI_LOCK(sc);
836 	/* fake APM */
837 	switch (cmd) {
838 #ifdef ACPI_SLEEP_ENABLED
839 	case APM_IOC_SUSPEND:
840 	case APM_IOC_STANDBY:
841 		workq_add_task(NULL, 0, (workq_fn)acpi_sleep_state,
842 		acpi_softc, (void *)ACPI_STATE_S3);
843 		break;
844 #endif /* ACPI_SLEEP_ENABLED */
845 	case APM_IOC_GETPOWER:
846 		/* A/C */
847 		pi->ac_state = APM_AC_UNKNOWN;
848 		SLIST_FOREACH(ac, &sc->sc_ac, aac_link) {
849 			if (ac->aac_softc->sc_ac_stat == PSR_ONLINE)
850 				pi->ac_state = APM_AC_ON;
851 			else if (ac->aac_softc->sc_ac_stat == PSR_OFFLINE)
852 				if (pi->ac_state == APM_AC_UNKNOWN)
853 					pi->ac_state = APM_AC_OFF;
854 		}
855 
856 		/* battery */
857 		pi->battery_state = APM_BATT_UNKNOWN;
858 		pi->battery_life = 0;
859 		pi->minutes_left = 0;
860 		bats = 0;
861 		remaining = rem = 0;
862 		minutes = 0;
863 		rate = 0;
864 		SLIST_FOREACH(bat, &sc->sc_bat, aba_link) {
865 			if (bat->aba_softc->sc_bat_present == 0)
866 				continue;
867 
868 			if (bat->aba_softc->sc_bif.bif_last_capacity == 0)
869 				continue;
870 
871 			bats++;
872 			rem = (bat->aba_softc->sc_bst.bst_capacity * 100) /
873 			    bat->aba_softc->sc_bif.bif_last_capacity;
874 			if (rem > 100)
875 				rem = 100;
876 			remaining += rem;
877 
878 			if (bat->aba_softc->sc_bst.bst_rate == BST_UNKNOWN)
879 				continue;
880 			else if (bat->aba_softc->sc_bst.bst_rate > 1)
881 				rate = bat->aba_softc->sc_bst.bst_rate;
882 
883 			minutes += bat->aba_softc->sc_bst.bst_capacity;
884 		}
885 
886 		if (bats == 0) {
887 			pi->battery_state = APM_BATTERY_ABSENT;
888 			pi->battery_life = 0;
889 			pi->minutes_left = (unsigned int)-1;
890 			break;
891 		}
892 
893 		if (pi->ac_state == APM_AC_ON || rate == 0)
894 			pi->minutes_left = (unsigned int)-1;
895 		else
896 			pi->minutes_left = 100 * minutes / rate;
897 
898 		/* running on battery */
899 		pi->battery_life = remaining / bats;
900 		if (pi->battery_life > 50)
901 			pi->battery_state = APM_BATT_HIGH;
902 		else if (pi->battery_life > 25)
903 			pi->battery_state = APM_BATT_LOW;
904 		else
905 			pi->battery_state = APM_BATT_CRITICAL;
906 
907 		break;
908 
909 	default:
910 		error = ENOTTY;
911 	}
912 
913 	ACPI_UNLOCK(sc);
914 #else
915 	error = ENXIO;
916 #endif /* SMALL_KERNEL */
917 	return (error);
918 }
919 
920 void
921 acpi_filtdetach(struct knote *kn)
922 {
923 #ifndef SMALL_KERNEL
924 	struct acpi_softc *sc = kn->kn_hook;
925 
926 	ACPI_LOCK(sc);
927 	SLIST_REMOVE(sc->sc_note, kn, knote, kn_selnext);
928 	ACPI_UNLOCK(sc);
929 #endif
930 }
931 
932 int
933 acpi_filtread(struct knote *kn, long hint)
934 {
935 #ifndef SMALL_KERNEL
936 	/* XXX weird kqueue_scan() semantics */
937 	if (hint & !kn->kn_data)
938 		kn->kn_data = hint;
939 #endif
940 	return (1);
941 }
942 
943 int
944 acpikqfilter(dev_t dev, struct knote *kn)
945 {
946 #ifndef SMALL_KERNEL
947 	struct acpi_softc *sc;
948 
949 	if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 ||
950 	    !(sc = acpi_cd.cd_devs[APMUNIT(dev)]))
951 		return (ENXIO);
952 
953 	switch (kn->kn_filter) {
954 	case EVFILT_READ:
955 		kn->kn_fop = &acpiread_filtops;
956 		break;
957 	default:
958 		return (1);
959 	}
960 
961 	kn->kn_hook = sc;
962 
963 	ACPI_LOCK(sc);
964 	SLIST_INSERT_HEAD(sc->sc_note, kn, kn_selnext);
965 	ACPI_UNLOCK(sc);
966 
967 	return (0);
968 #else
969 	return (1);
970 #endif
971 }
972 
973 /* Read from power management register */
974 int
975 acpi_read_pmreg(struct acpi_softc *sc, int reg, int offset)
976 {
977 	bus_space_handle_t ioh;
978 	bus_size_t size, __size;
979 	int regval;
980 
981 	__size = 0;
982 	/* Special cases: 1A/1B blocks can be OR'ed together */
983 	switch (reg) {
984 	case ACPIREG_PM1_EN:
985 		return (acpi_read_pmreg(sc, ACPIREG_PM1A_EN, offset) |
986 		    acpi_read_pmreg(sc, ACPIREG_PM1B_EN, offset));
987 	case ACPIREG_PM1_STS:
988 		return (acpi_read_pmreg(sc, ACPIREG_PM1A_STS, offset) |
989 		    acpi_read_pmreg(sc, ACPIREG_PM1B_STS, offset));
990 	case ACPIREG_PM1_CNT:
991 		return (acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, offset) |
992 		    acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, offset));
993 	case ACPIREG_GPE_STS:
994 		__size = 1;
995 		dnprintf(50, "read GPE_STS  offset: %.2x %.2x %.2x\n", offset,
996 		    sc->sc_fadt->gpe0_blk_len>>1, sc->sc_fadt->gpe1_blk_len>>1);
997 		if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) {
998 			reg = ACPIREG_GPE0_STS;
999 		}
1000 		break;
1001 	case ACPIREG_GPE_EN:
1002 		__size = 1;
1003 		dnprintf(50, "read GPE_EN   offset: %.2x %.2x %.2x\n",
1004 		    offset, sc->sc_fadt->gpe0_blk_len>>1,
1005 		    sc->sc_fadt->gpe1_blk_len>>1);
1006 		if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) {
1007 			reg = ACPIREG_GPE0_EN;
1008 		}
1009 		break;
1010 	}
1011 
1012 	if (reg >= ACPIREG_MAXREG || sc->sc_pmregs[reg].size == 0)
1013 		return (0);
1014 
1015 	regval = 0;
1016 	ioh = sc->sc_pmregs[reg].ioh;
1017 	size = sc->sc_pmregs[reg].size;
1018 	if (__size)
1019 		size = __size;
1020 	if (size > 4)
1021 		size = 4;
1022 
1023 	switch (size) {
1024 	case 1:
1025 		regval = bus_space_read_1(sc->sc_iot, ioh, offset);
1026 		break;
1027 	case 2:
1028 		regval = bus_space_read_2(sc->sc_iot, ioh, offset);
1029 		break;
1030 	case 4:
1031 		regval = bus_space_read_4(sc->sc_iot, ioh, offset);
1032 		break;
1033 	}
1034 
1035 	dnprintf(30, "acpi_readpm: %s = %.4x:%.4x %x\n",
1036 	    sc->sc_pmregs[reg].name,
1037 	    sc->sc_pmregs[reg].addr, offset, regval);
1038 	return (regval);
1039 }
1040 
1041 /* Write to power management register */
1042 void
1043 acpi_write_pmreg(struct acpi_softc *sc, int reg, int offset, int regval)
1044 {
1045 	bus_space_handle_t ioh;
1046 	bus_size_t size, __size;
1047 
1048 	__size = 0;
1049 	/* Special cases: 1A/1B blocks can be written with same value */
1050 	switch (reg) {
1051 	case ACPIREG_PM1_EN:
1052 		acpi_write_pmreg(sc, ACPIREG_PM1A_EN, offset, regval);
1053 		acpi_write_pmreg(sc, ACPIREG_PM1B_EN, offset, regval);
1054 		break;
1055 	case ACPIREG_PM1_STS:
1056 		acpi_write_pmreg(sc, ACPIREG_PM1A_STS, offset, regval);
1057 		acpi_write_pmreg(sc, ACPIREG_PM1B_STS, offset, regval);
1058 		break;
1059 	case ACPIREG_PM1_CNT:
1060 		acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, offset, regval);
1061 		acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, offset, regval);
1062 		break;
1063 	case ACPIREG_GPE_STS:
1064 		__size = 1;
1065 		dnprintf(50, "write GPE_STS offset: %.2x %.2x %.2x %.2x\n",
1066 		    offset, sc->sc_fadt->gpe0_blk_len>>1,
1067 		    sc->sc_fadt->gpe1_blk_len>>1, regval);
1068 		if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) {
1069 			reg = ACPIREG_GPE0_STS;
1070 		}
1071 		break;
1072 	case ACPIREG_GPE_EN:
1073 		__size = 1;
1074 		dnprintf(50, "write GPE_EN  offset: %.2x %.2x %.2x %.2x\n",
1075 		    offset, sc->sc_fadt->gpe0_blk_len>>1,
1076 		    sc->sc_fadt->gpe1_blk_len>>1, regval);
1077 		if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) {
1078 			reg = ACPIREG_GPE0_EN;
1079 		}
1080 		break;
1081 	}
1082 
1083 	/* All special case return here */
1084 	if (reg >= ACPIREG_MAXREG)
1085 		return;
1086 
1087 	ioh = sc->sc_pmregs[reg].ioh;
1088 	size = sc->sc_pmregs[reg].size;
1089 	if (__size)
1090 		size = __size;
1091 	if (size > 4)
1092 		size = 4;
1093 	switch (size) {
1094 	case 1:
1095 		bus_space_write_1(sc->sc_iot, ioh, offset, regval);
1096 		break;
1097 	case 2:
1098 		bus_space_write_2(sc->sc_iot, ioh, offset, regval);
1099 		break;
1100 	case 4:
1101 		bus_space_write_4(sc->sc_iot, ioh, offset, regval);
1102 		break;
1103 	}
1104 
1105 	dnprintf(30, "acpi_writepm: %s = %.4x:%.4x %x\n",
1106 	    sc->sc_pmregs[reg].name, sc->sc_pmregs[reg].addr, offset, regval);
1107 }
1108 
1109 /* Map Power Management registers */
1110 void
1111 acpi_map_pmregs(struct acpi_softc *sc)
1112 {
1113 	bus_addr_t addr;
1114 	bus_size_t size;
1115 	const char *name;
1116 	int reg;
1117 
1118 	for (reg = 0; reg < ACPIREG_MAXREG; reg++) {
1119 		size = 0;
1120 		switch (reg) {
1121 		case ACPIREG_SMICMD:
1122 			name = "smi";
1123 			size = 1;
1124 			addr = sc->sc_fadt->smi_cmd;
1125 			break;
1126 		case ACPIREG_PM1A_STS:
1127 		case ACPIREG_PM1A_EN:
1128 			name = "pm1a_sts";
1129 			size = sc->sc_fadt->pm1_evt_len >> 1;
1130 			addr = sc->sc_fadt->pm1a_evt_blk;
1131 			if (reg == ACPIREG_PM1A_EN && addr) {
1132 				addr += size;
1133 				name = "pm1a_en";
1134 			}
1135 			break;
1136 		case ACPIREG_PM1A_CNT:
1137 			name = "pm1a_cnt";
1138 			size = sc->sc_fadt->pm1_cnt_len;
1139 			addr = sc->sc_fadt->pm1a_cnt_blk;
1140 			break;
1141 		case ACPIREG_PM1B_STS:
1142 		case ACPIREG_PM1B_EN:
1143 			name = "pm1b_sts";
1144 			size = sc->sc_fadt->pm1_evt_len >> 1;
1145 			addr = sc->sc_fadt->pm1b_evt_blk;
1146 			if (reg == ACPIREG_PM1B_EN && addr) {
1147 				addr += size;
1148 				name = "pm1b_en";
1149 			}
1150 			break;
1151 		case ACPIREG_PM1B_CNT:
1152 			name = "pm1b_cnt";
1153 			size = sc->sc_fadt->pm1_cnt_len;
1154 			addr = sc->sc_fadt->pm1b_cnt_blk;
1155 			break;
1156 		case ACPIREG_PM2_CNT:
1157 			name = "pm2_cnt";
1158 			size = sc->sc_fadt->pm2_cnt_len;
1159 			addr = sc->sc_fadt->pm2_cnt_blk;
1160 			break;
1161 #if 0
1162 		case ACPIREG_PM_TMR:
1163 			/* Allocated in acpitimer */
1164 			name = "pm_tmr";
1165 			size = sc->sc_fadt->pm_tmr_len;
1166 			addr = sc->sc_fadt->pm_tmr_blk;
1167 			break;
1168 #endif
1169 		case ACPIREG_GPE0_STS:
1170 		case ACPIREG_GPE0_EN:
1171 			name = "gpe0_sts";
1172 			size = sc->sc_fadt->gpe0_blk_len >> 1;
1173 			addr = sc->sc_fadt->gpe0_blk;
1174 
1175 			dnprintf(20, "gpe0 block len : %x\n",
1176 			    sc->sc_fadt->gpe0_blk_len >> 1);
1177 			dnprintf(20, "gpe0 block addr: %x\n",
1178 			    sc->sc_fadt->gpe0_blk);
1179 			if (reg == ACPIREG_GPE0_EN && addr) {
1180 				addr += size;
1181 				name = "gpe0_en";
1182 			}
1183 			break;
1184 		case ACPIREG_GPE1_STS:
1185 		case ACPIREG_GPE1_EN:
1186 			name = "gpe1_sts";
1187 			size = sc->sc_fadt->gpe1_blk_len >> 1;
1188 			addr = sc->sc_fadt->gpe1_blk;
1189 
1190 			dnprintf(20, "gpe1 block len : %x\n",
1191 			    sc->sc_fadt->gpe1_blk_len >> 1);
1192 			dnprintf(20, "gpe1 block addr: %x\n",
1193 			    sc->sc_fadt->gpe1_blk);
1194 			if (reg == ACPIREG_GPE1_EN && addr) {
1195 				addr += size;
1196 				name = "gpe1_en";
1197 			}
1198 			break;
1199 		}
1200 		if (size && addr) {
1201 			dnprintf(50, "mapping: %.4x %.4x %s\n",
1202 			    addr, size, name);
1203 
1204 			/* Size and address exist; map register space */
1205 			bus_space_map(sc->sc_iot, addr, size, 0,
1206 			    &sc->sc_pmregs[reg].ioh);
1207 
1208 			sc->sc_pmregs[reg].name = name;
1209 			sc->sc_pmregs[reg].size = size;
1210 			sc->sc_pmregs[reg].addr = addr;
1211 		}
1212 	}
1213 }
1214 
1215 /* move all stuff that doesn't go on the boot media in here */
1216 #ifndef SMALL_KERNEL
1217 void
1218 acpi_reset(void)
1219 {
1220 	struct acpi_fadt	*fadt;
1221 	u_int32_t		 reset_as, reset_len;
1222 	u_int32_t		 value;
1223 
1224 	fadt = acpi_softc->sc_fadt;
1225 
1226 	/*
1227 	 * RESET_REG_SUP is not properly set in some implementations,
1228 	 * but not testing against it breaks more machines than it fixes
1229 	 */
1230 	if (acpi_softc->sc_revision <= 1 ||
1231 	    !(fadt->flags & FADT_RESET_REG_SUP) || fadt->reset_reg.address == 0)
1232 		return;
1233 
1234 	value = fadt->reset_value;
1235 
1236 	reset_as = fadt->reset_reg.register_bit_width / 8;
1237 	if (reset_as == 0)
1238 		reset_as = 1;
1239 
1240 	reset_len = fadt->reset_reg.access_size;
1241 	if (reset_len == 0)
1242 		reset_len = reset_as;
1243 
1244 	acpi_gasio(acpi_softc, ACPI_IOWRITE,
1245 	    fadt->reset_reg.address_space_id,
1246 	    fadt->reset_reg.address, reset_as, reset_len, &value);
1247 
1248 	delay(100000);
1249 }
1250 
1251 int
1252 acpi_interrupt(void *arg)
1253 {
1254 	struct acpi_softc *sc = (struct acpi_softc *)arg;
1255 	u_int32_t processed, sts, en, idx, jdx;
1256 
1257 	processed = 0;
1258 
1259 #if 0
1260 	acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0);
1261 	acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1,
1262 	    sc->sc_fadt->gpe1_base);
1263 #endif
1264 
1265 	dnprintf(40, "ACPI Interrupt\n");
1266 	for (idx = 0; idx < sc->sc_lastgpe; idx += 8) {
1267 		sts = acpi_read_pmreg(sc, ACPIREG_GPE_STS, idx>>3);
1268 		en  = acpi_read_pmreg(sc, ACPIREG_GPE_EN,  idx>>3);
1269 		if (en & sts) {
1270 			dnprintf(10, "GPE block: %.2x %.2x %.2x\n", idx, sts,
1271 			    en);
1272 			acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, en & ~sts);
1273 			for (jdx = 0; jdx < 8; jdx++) {
1274 				if (en & sts & (1L << jdx)) {
1275 					/* Signal this GPE */
1276 					sc->gpe_table[idx+jdx].active = 1;
1277 					processed = 1;
1278 				}
1279 			}
1280 		}
1281 	}
1282 
1283 	sts = acpi_read_pmreg(sc, ACPIREG_PM1_STS, 0);
1284 	en  = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0);
1285 	if (sts & en) {
1286 		dnprintf(10,"GEN interrupt: %.4x\n", sts & en);
1287 		acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en & ~sts);
1288 		acpi_write_pmreg(sc, ACPIREG_PM1_STS, 0, en);
1289 		acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en);
1290 		if (sts & ACPI_PM1_PWRBTN_STS)
1291 			sc->sc_powerbtn = 1;
1292 		if (sts & ACPI_PM1_SLPBTN_STS)
1293 			sc->sc_sleepbtn = 1;
1294 		processed = 1;
1295 	}
1296 
1297 	if (processed) {
1298 		sc->sc_wakeup = 0;
1299 		wakeup(sc);
1300 	}
1301 
1302 	return (processed);
1303 }
1304 
1305 int
1306 acpi_add_device(struct aml_node *node, void *arg)
1307 {
1308 	static int nacpicpus = 0;
1309 	struct device *self = arg;
1310 	struct acpi_softc *sc = arg;
1311 	struct acpi_attach_args aaa;
1312 #ifdef MULTIPROCESSOR
1313 	struct aml_value res;
1314 	int proc_id = -1;
1315 #endif
1316 
1317 	memset(&aaa, 0, sizeof(aaa));
1318 	aaa.aaa_node = node;
1319 	aaa.aaa_iot = sc->sc_iot;
1320 	aaa.aaa_memt = sc->sc_memt;
1321 	if (node == NULL || node->value == NULL)
1322 		return 0;
1323 
1324 	switch (node->value->type) {
1325 	case AML_OBJTYPE_PROCESSOR:
1326 		if (nacpicpus >= ncpus)
1327 			return 0;
1328 #ifdef MULTIPROCESSOR
1329 		if (aml_evalnode(sc, aaa.aaa_node, 0, NULL, &res) == 0) {
1330 			if (res.type == AML_OBJTYPE_PROCESSOR)
1331 				proc_id = res.v_processor.proc_id;
1332 			aml_freevalue(&res);
1333 		}
1334 		if (proc_id < -1 || proc_id >= LAPIC_MAP_SIZE ||
1335 		    (acpi_lapic_flags[proc_id] & ACPI_PROC_ENABLE) == 0)
1336 			return 0;
1337 #endif
1338 		nacpicpus++;
1339 
1340 		aaa.aaa_name = "acpicpu";
1341 		break;
1342 	case AML_OBJTYPE_THERMZONE:
1343 		aaa.aaa_name = "acpitz";
1344 		break;
1345 	default:
1346 		return 0;
1347 	}
1348 	config_found(self, &aaa, acpi_print);
1349 	return 0;
1350 }
1351 
1352 void
1353 acpi_enable_onegpe(struct acpi_softc *sc, int gpe, int enable)
1354 {
1355 	uint8_t mask = (1L << (gpe & 7));
1356 	uint8_t en;
1357 
1358 	/* Read enabled register */
1359 	en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, gpe>>3);
1360 	dnprintf(50, "%sabling GPE %.2x (current: %sabled) %.2x\n",
1361 	    enable ? "en" : "dis", gpe, (en & mask) ? "en" : "dis", en);
1362 	if (enable)
1363 		en |= mask;
1364 	else
1365 		en &= ~mask;
1366 	acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, en);
1367 }
1368 
1369 int
1370 acpi_set_gpehandler(struct acpi_softc *sc, int gpe, int (*handler)
1371     (struct acpi_softc *, int, void *), void *arg, const char *label)
1372 {
1373 	struct gpe_block *ptbl;
1374 
1375 	ptbl = acpi_find_gpe(sc, gpe);
1376 	if (ptbl == NULL || handler == NULL)
1377 		return -EINVAL;
1378 	if (ptbl->handler != NULL) {
1379 		dnprintf(10, "error: GPE %.2x already enabled\n", gpe);
1380 		return -EBUSY;
1381 	}
1382 	dnprintf(50, "Adding GPE handler %.2x (%s)\n", gpe, label);
1383 	ptbl->handler = handler;
1384 	ptbl->arg = arg;
1385 
1386 	return (0);
1387 }
1388 
1389 int
1390 acpi_gpe_level(struct acpi_softc *sc, int gpe, void *arg)
1391 {
1392 	struct aml_node *node = arg;
1393 	uint8_t mask;
1394 
1395 	dnprintf(10, "handling Level-sensitive GPE %.2x\n", gpe);
1396 	mask = (1L << (gpe & 7));
1397 
1398 	aml_evalnode(sc, node, 0, NULL, NULL);
1399 	acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask);
1400 	acpi_write_pmreg(sc, ACPIREG_GPE_EN,  gpe>>3, mask);
1401 
1402 	return (0);
1403 }
1404 
1405 int
1406 acpi_gpe_edge(struct acpi_softc *sc, int gpe, void *arg)
1407 {
1408 
1409 	struct aml_node *node = arg;
1410 	uint8_t mask;
1411 
1412 	dnprintf(10, "handling Edge-sensitive GPE %.2x\n", gpe);
1413 	mask = (1L << (gpe & 7));
1414 
1415 	aml_evalnode(sc, node, 0, NULL, NULL);
1416 	acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask);
1417 	acpi_write_pmreg(sc, ACPIREG_GPE_EN,  gpe>>3, mask);
1418 
1419 	return (0);
1420 }
1421 
1422 /* Discover Devices that can wakeup the system
1423  * _PRW returns a package
1424  *  pkg[0] = integer (FADT gpe bit) or package (gpe block,gpe bit)
1425  *  pkg[1] = lowest sleep state
1426  *  pkg[2+] = power resource devices (optional)
1427  *
1428  * To enable wakeup devices:
1429  *    Evaluate _ON method in each power resource device
1430  *    Evaluate _PSW method
1431  */
1432 int
1433 acpi_foundprw(struct aml_node *node, void *arg)
1434 {
1435 	struct acpi_softc *sc = arg;
1436 	struct acpi_wakeq *wq;
1437 
1438 	wq = malloc(sizeof(struct acpi_wakeq), M_DEVBUF, M_NOWAIT | M_ZERO);
1439 	if (wq == NULL) {
1440 		return 0;
1441 	}
1442 
1443 	wq->q_wakepkg = malloc(sizeof(struct aml_value), M_DEVBUF,
1444 	    M_NOWAIT | M_ZERO);
1445 	if (wq->q_wakepkg == NULL) {
1446 		free(wq, M_DEVBUF);
1447 		return 0;
1448 	}
1449 	dnprintf(10, "Found _PRW (%s)\n", node->parent->name);
1450 	aml_evalnode(sc, node, 0, NULL, wq->q_wakepkg);
1451 	wq->q_node = node->parent;
1452 	wq->q_gpe = -1;
1453 
1454 	/* Get GPE of wakeup device, and lowest sleep level */
1455 	if (wq->q_wakepkg->type == AML_OBJTYPE_PACKAGE && wq->q_wakepkg->length >= 2) {
1456 	  if (wq->q_wakepkg->v_package[0]->type == AML_OBJTYPE_INTEGER) {
1457 	    wq->q_gpe = wq->q_wakepkg->v_package[0]->v_integer;
1458 	  }
1459 	  if (wq->q_wakepkg->v_package[1]->type == AML_OBJTYPE_INTEGER) {
1460 	    wq->q_state = wq->q_wakepkg->v_package[1]->v_integer;
1461 	  }
1462 	}
1463 	SIMPLEQ_INSERT_TAIL(&sc->sc_wakedevs, wq, q_next);
1464 	return 0;
1465 }
1466 
1467 struct gpe_block *
1468 acpi_find_gpe(struct acpi_softc *sc, int gpe)
1469 {
1470 #if 1
1471 	if (gpe >= sc->sc_lastgpe)
1472 		return NULL;
1473 	return &sc->gpe_table[gpe];
1474 #else
1475 	SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) {
1476 		if (gpe >= pgpe->start && gpe <= (pgpe->start+7))
1477 			return &pgpe->table[gpe & 7];
1478 	}
1479 	return NULL;
1480 #endif
1481 }
1482 
1483 #if 0
1484 /* New GPE handling code: Create GPE block */
1485 void
1486 acpi_init_gpeblock(struct acpi_softc *sc, int reg, int len, int base)
1487 {
1488 	int i, j;
1489 
1490 	if (!reg || !len)
1491 		return;
1492 	for (i=0; i<len; i++) {
1493 		pgpe = acpi_os_malloc(sizeof(gpeblock));
1494 		if (pgpe == NULL)
1495 			return;
1496 
1497 		/* Allocate GPE Handler Block */
1498 		pgpe->start = base + i;
1499 		acpi_bus_space_map(sc->sc_iot, reg+i,     1, 0, &pgpe->sts_ioh);
1500 		acpi_bus_space_map(sc->sc_iot, reg+i+len, 1, 0, &pgpe->en_ioh);
1501 		SIMPLEQ_INSERT_TAIL(&sc->sc_gpes, gpe, gpe_link);
1502 
1503 		/* Clear pending GPEs */
1504 		bus_space_write_1(sc->sc_iot, pgpe->sts_ioh, 0, 0xFF);
1505 		bus_space_write_1(sc->sc_iot, pgpe->en_ioh,  0, 0x00);
1506 	}
1507 
1508 	/* Search for GPE handlers */
1509 	for (i=0; i<len*8; i++) {
1510 		char gpestr[32];
1511 		struct aml_node *h;
1512 
1513 		snprintf(gpestr, sizeof(gpestr), "\\_GPE._L%.2X", base+i);
1514 		h = aml_searchnode(&aml_root, gpestr);
1515 		if (acpi_set_gpehandler(sc, base+i, acpi_gpe_level, h, "level") != 0) {
1516 			snprintf(gpestr, sizeof(gpestr), "\\_GPE._E%.2X", base+i);
1517 			h = aml_searchnode(&aml_root, gpestr);
1518 			acpi_set_gpehandler(sc, base+i, acpi_gpe_edge, h, "edge");
1519 		}
1520 	}
1521 }
1522 
1523 /* Process GPE interrupts */
1524 int
1525 acpi_handle_gpes(struct acpi_softc *sc)
1526 {
1527 	uint8_t en, sts;
1528 	int processed, i;
1529 
1530 	processed=0;
1531 	SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) {
1532 		sts = bus_space_read_1(sc->sc_iot, pgpe->sts_ioh, 0);
1533 		en = bus_space_read_1(sc->sc_iot, pgpe->en_ioh, 0);
1534 		for (i=0; i<8; i++) {
1535 			if (en & sts & (1L << i)) {
1536 				pgpe->table[i].active = 1;
1537 				processed=1;
1538 			}
1539 		}
1540 	}
1541 	return processed;
1542 }
1543 #endif
1544 
1545 #if 0
1546 void
1547 acpi_add_gpeblock(struct acpi_softc *sc, int reg, int len, int gpe)
1548 {
1549 	int idx, jdx;
1550 	u_int8_t en, sts;
1551 
1552 	if (!reg || !len)
1553 		return;
1554 	for (idx=0; idx<len; idx++) {
1555 		sts = inb(reg + idx);
1556 		en  = inb(reg + len + idx);
1557 		printf("-- gpe %.2x-%.2x : en:%.2x sts:%.2x  %.2x\n",
1558 		    gpe+idx*8, gpe+idx*8+7, en, sts, en&sts);
1559 		for (jdx=0; jdx<8; jdx++) {
1560 			char gpestr[32];
1561 			struct aml_node *l, *e;
1562 
1563 			if (en & sts & (1L << jdx)) {
1564 				snprintf(gpestr,sizeof(gpestr), "\\_GPE._L%.2X", gpe+idx*8+jdx);
1565 				l = aml_searchname(&aml_root, gpestr);
1566 				snprintf(gpestr,sizeof(gpestr), "\\_GPE._E%.2X", gpe+idx*8+jdx);
1567 				e = aml_searchname(&aml_root, gpestr);
1568 				printf("  GPE %.2x active L%x E%x\n", gpe+idx*8+jdx, l, e);
1569 			}
1570 		}
1571 	}
1572 }
1573 #endif
1574 
1575 void
1576 acpi_init_gpes(struct acpi_softc *sc)
1577 {
1578 	struct aml_node *gpe;
1579 	char name[12];
1580 	int  idx, ngpe;
1581 
1582 #if 0
1583 	acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0);
1584 	acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1,
1585 	    sc->sc_fadt->gpe1_base);
1586 #endif
1587 
1588 	sc->sc_lastgpe = sc->sc_fadt->gpe0_blk_len << 2;
1589 	if (sc->sc_fadt->gpe1_blk_len) {
1590 	}
1591 	dnprintf(50, "Last GPE: %.2x\n", sc->sc_lastgpe);
1592 
1593 	/* Allocate GPE table */
1594 	sc->gpe_table = malloc(sc->sc_lastgpe * sizeof(struct gpe_block),
1595 	    M_DEVBUF, M_WAITOK | M_ZERO);
1596 
1597 	ngpe = 0;
1598 
1599 	/* Clear GPE status */
1600 	for (idx = 0; idx < sc->sc_lastgpe; idx += 8) {
1601 		acpi_write_pmreg(sc, ACPIREG_GPE_EN,  idx>>3, 0);
1602 		acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1);
1603 	}
1604 	for (idx = 0; idx < sc->sc_lastgpe; idx++) {
1605 		/* Search Level-sensitive GPES */
1606 		snprintf(name, sizeof(name), "\\_GPE._L%.2X", idx);
1607 		gpe = aml_searchname(&aml_root, name);
1608 		if (gpe != NULL)
1609 			acpi_set_gpehandler(sc, idx, acpi_gpe_level, gpe,
1610 			    "level");
1611 		if (gpe == NULL) {
1612 			/* Search Edge-sensitive GPES */
1613 			snprintf(name, sizeof(name), "\\_GPE._E%.2X", idx);
1614 			gpe = aml_searchname(&aml_root, name);
1615 			if (gpe != NULL)
1616 				acpi_set_gpehandler(sc, idx, acpi_gpe_edge, gpe,
1617 				    "edge");
1618 		}
1619 	}
1620 	aml_find_node(&aml_root, "_PRW", acpi_foundprw, sc);
1621 	sc->sc_maxgpe = ngpe;
1622 }
1623 
1624 void
1625 acpi_init_states(struct acpi_softc *sc)
1626 {
1627 	struct aml_value res;
1628 	char name[8];
1629 	int i;
1630 
1631 	for (i = ACPI_STATE_S0; i <= ACPI_STATE_S5; i++) {
1632 		snprintf(name, sizeof(name), "_S%d_", i);
1633 		sc->sc_sleeptype[i].slp_typa = -1;
1634 		sc->sc_sleeptype[i].slp_typb = -1;
1635 		if (aml_evalname(sc, &aml_root, name, 0, NULL, &res) == 0) {
1636 			if (res.type == AML_OBJTYPE_PACKAGE) {
1637 				sc->sc_sleeptype[i].slp_typa = aml_val2int(res.v_package[0]);
1638 				sc->sc_sleeptype[i].slp_typb = aml_val2int(res.v_package[1]);
1639 			}
1640 			aml_freevalue(&res);
1641 		}
1642 	}
1643 }
1644 
1645 void
1646 acpi_init_pm(struct acpi_softc *sc)
1647 {
1648 	sc->sc_tts = aml_searchname(&aml_root, "_TTS");
1649 	sc->sc_pts = aml_searchname(&aml_root, "_PTS");
1650 	sc->sc_wak = aml_searchname(&aml_root, "_WAK");
1651 	sc->sc_bfs = aml_searchname(&aml_root, "_BFS");
1652 	sc->sc_gts = aml_searchname(&aml_root, "_GTS");
1653 }
1654 
1655 #ifndef SMALL_KERNEL
1656 void
1657 acpi_sleep_walk(struct acpi_softc *sc, int state)
1658 {
1659 	struct acpi_wakeq *wentry;
1660 	int idx;
1661 
1662 	/* Clear GPE status */
1663 	for (idx = 0; idx < sc->sc_lastgpe; idx += 8) {
1664 		acpi_write_pmreg(sc, ACPIREG_GPE_EN,  idx>>3, 0);
1665 		acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1);
1666 	}
1667 
1668 	SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) {
1669 		dnprintf(10, "%.4s(S%d) gpe %.2x\n", wentry->q_node->name,
1670 		    wentry->q_state,
1671 		    wentry->q_gpe);
1672 
1673 	if (state <= wentry->q_state)
1674 		acpi_enable_onegpe(sc, wentry->q_gpe, 1);
1675 	}
1676 }
1677 #endif /* ! SMALL_KERNEL */
1678 
1679 int
1680 acpi_sleep_state(struct acpi_softc *sc, int state)
1681 {
1682 	int ret;
1683 
1684 	switch (state) {
1685 	case ACPI_STATE_S0:
1686 		return (0);
1687 	case ACPI_STATE_S4:
1688 		return (EOPNOTSUPP);
1689 	case ACPI_STATE_S5:
1690 		break;
1691 	case ACPI_STATE_S1:
1692 	case ACPI_STATE_S2:
1693 	case ACPI_STATE_S3:
1694 		if (sc->sc_sleeptype[state].slp_typa == -1 ||
1695 		    sc->sc_sleeptype[state].slp_typb == -1)
1696 			return (EOPNOTSUPP);
1697 	}
1698 
1699 	acpi_sleep_walk(sc, state);
1700 
1701 	if ((ret = acpi_prepare_sleep_state(sc, state)) != 0)
1702 		return (ret);
1703 
1704 	if (state != ACPI_STATE_S1)
1705 		ret = acpi_sleep_machdep(sc, state);
1706 	else
1707 		ret = acpi_enter_sleep_state(sc, state);
1708 
1709 #ifndef SMALL_KERNEL
1710 	acpi_resume(sc);
1711 #endif /* ! SMALL_KERNEL */
1712 	return (ret);
1713 }
1714 
1715 int
1716 acpi_enter_sleep_state(struct acpi_softc *sc, int state)
1717 {
1718 	uint16_t rega, regb;
1719 	int retries;
1720 
1721 	/* Clear WAK_STS bit */
1722 	acpi_write_pmreg(sc, ACPIREG_PM1_STS, 1, ACPI_PM1_WAK_STS);
1723 
1724 	/* Disable BM arbitration */
1725 	acpi_write_pmreg(sc, ACPIREG_PM2_CNT, 1, ACPI_PM2_ARB_DIS);
1726 
1727 	/* Write SLP_TYPx values */
1728 	rega = acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, 0);
1729 	regb = acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, 0);
1730 	rega &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN);
1731 	regb &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN);
1732 	rega |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typa);
1733 	regb |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typb);
1734 	acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega);
1735 	acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb);
1736 
1737 	/* Set SLP_EN bit */
1738 	rega |= ACPI_PM1_SLP_EN;
1739 	regb |= ACPI_PM1_SLP_EN;
1740 
1741 	/*
1742 	 * Let the machdep code flush caches and do any other necessary
1743 	 * tasks before going away.
1744 	 */
1745 	acpi_cpu_flush(sc, state);
1746 
1747 	acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega);
1748 	acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb);
1749 	/* Loop on WAK_STS */
1750 	for (retries = 1000; retries > 0; retries--) {
1751 		rega = acpi_read_pmreg(sc, ACPIREG_PM1A_STS, 0);
1752 		regb = acpi_read_pmreg(sc, ACPIREG_PM1B_STS, 0);
1753 		if (rega & ACPI_PM1_WAK_STS ||
1754 		    regb & ACPI_PM1_WAK_STS)
1755 			break;
1756 		DELAY(10);
1757 	}
1758 
1759 	return (-1);
1760 }
1761 
1762 #ifndef SMALL_KERNEL
1763 void
1764 acpi_resume(struct acpi_softc *sc)
1765 {
1766 	struct aml_value env;
1767 
1768 	memset(&env, 0, sizeof(env));
1769 	env.type = AML_OBJTYPE_INTEGER;
1770 	env.v_integer = sc->sc_state;
1771 
1772 	if (sc->sc_bfs)
1773 		if (aml_evalnode(sc, sc->sc_bfs, 1, &env, NULL) != 0) {
1774 			dnprintf(10, "%s evaluating method _BFS failed.\n",
1775 			    DEVNAME(sc));
1776 		}
1777 
1778 	dopowerhooks(PWR_RESUME);
1779 	inittodr(0);
1780 
1781 	if (sc->sc_wak)
1782 		if (aml_evalnode(sc, sc->sc_wak, 1, &env, NULL) != 0) {
1783 			dnprintf(10, "%s evaluating method _WAK failed.\n",
1784 			    DEVNAME(sc));
1785 		}
1786 
1787 	sc->sc_state = ACPI_STATE_S0;
1788 	if (sc->sc_tts) {
1789 		env.v_integer = sc->sc_state;
1790 		if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) {
1791 			dnprintf(10, "%s evaluating method _TTS failed.\n",
1792 			    DEVNAME(sc));
1793 		}
1794 	}
1795 }
1796 #endif /* ! SMALL_KERNEL */
1797 
1798 int
1799 acpi_prepare_sleep_state(struct acpi_softc *sc, int state)
1800 {
1801 	struct aml_value env;
1802 
1803 	if (sc == NULL || state == ACPI_STATE_S0)
1804 		return(0);
1805 
1806 	if (sc->sc_sleeptype[state].slp_typa == -1 ||
1807 	    sc->sc_sleeptype[state].slp_typb == -1) {
1808 		printf("%s: state S%d unavailable\n",
1809 		    sc->sc_dev.dv_xname, state);
1810 		return (ENXIO);
1811 	}
1812 
1813 	memset(&env, 0, sizeof(env));
1814 	env.type = AML_OBJTYPE_INTEGER;
1815 	env.v_integer = state;
1816 	/* _TTS(state) */
1817 	if (sc->sc_tts)
1818 		if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) {
1819 			dnprintf(10, "%s evaluating method _TTS failed.\n",
1820 			    DEVNAME(sc));
1821 			return (ENXIO);
1822 		}
1823 
1824 	switch (state) {
1825 	case ACPI_STATE_S1:
1826 	case ACPI_STATE_S2:
1827 		resettodr();
1828 		dopowerhooks(PWR_SUSPEND);
1829 		break;
1830 	case ACPI_STATE_S3:
1831 		resettodr();
1832 		dopowerhooks(PWR_STANDBY);
1833 		break;
1834 	}
1835 
1836 	/* _PTS(state) */
1837 	if (sc->sc_pts)
1838 		if (aml_evalnode(sc, sc->sc_pts, 1, &env, NULL) != 0) {
1839 			dnprintf(10, "%s evaluating method _PTS failed.\n",
1840 			    DEVNAME(sc));
1841 			return (ENXIO);
1842 		}
1843 
1844 	sc->sc_state = state;
1845 	/* _GTS(state) */
1846 	if (sc->sc_gts)
1847 		if (aml_evalnode(sc, sc->sc_gts, 1, &env, NULL) != 0) {
1848 			dnprintf(10, "%s evaluating method _GTS failed.\n",
1849 			    DEVNAME(sc));
1850 			return (ENXIO);
1851 		}
1852 
1853 	disable_intr();
1854 	aml_evalname(sc, &aml_root, "\\_SST", 1, &env, NULL);
1855 	sc->sc_state = state;
1856 
1857 	return (0);
1858 }
1859 
1860 
1861 
1862 void
1863 acpi_powerdown(void)
1864 {
1865 	/*
1866 	 * In case acpi_prepare_sleep fails, we shouldn't try to enter
1867 	 * the sleep state. It might cost us the battery.
1868 	 */
1869 	acpi_sleep_walk(acpi_softc, ACPI_STATE_S5);
1870 	if (acpi_prepare_sleep_state(acpi_softc, ACPI_STATE_S5) == 0)
1871 		acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5);
1872 }
1873 
1874 
1875 extern int aml_busy;
1876 
1877 void
1878 acpi_isr_thread(void *arg)
1879 {
1880 	struct acpi_thread *thread = arg;
1881 	struct acpi_softc  *sc = thread->sc;
1882 	u_int32_t gpe;
1883 
1884 	/*
1885 	 * If we have an interrupt handler, we can get notification
1886 	 * when certain status bits changes in the ACPI registers,
1887 	 * so let us enable some events we can forward to userland
1888 	 */
1889 	if (sc->sc_interrupt) {
1890 		int16_t flag;
1891 
1892 		dnprintf(1,"slpbtn:%c  pwrbtn:%c\n",
1893 		    sc->sc_fadt->flags & FADT_SLP_BUTTON ? 'n' : 'y',
1894 		    sc->sc_fadt->flags & FADT_PWR_BUTTON ? 'n' : 'y');
1895 		dnprintf(10, "Enabling acpi interrupts...\n");
1896 		sc->sc_wakeup = 1;
1897 
1898 		/* Enable Sleep/Power buttons if they exist */
1899 		flag = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0);
1900 		if (!(sc->sc_fadt->flags & FADT_PWR_BUTTON)) {
1901 			flag |= ACPI_PM1_PWRBTN_EN;
1902 		}
1903 		if (!(sc->sc_fadt->flags & FADT_SLP_BUTTON)) {
1904 			flag |= ACPI_PM1_SLPBTN_EN;
1905 		}
1906 		acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, flag);
1907 
1908 		/* Enable handled GPEs here */
1909 		for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) {
1910 			if (sc->gpe_table[gpe].handler)
1911 				acpi_enable_onegpe(sc, gpe, 1);
1912 		}
1913 	}
1914 
1915 	while (thread->running) {
1916 		dnprintf(10, "sleep... %d\n", sc->sc_wakeup);
1917 		while (sc->sc_wakeup)
1918 			tsleep(sc, PWAIT, "acpi_idle", 0);
1919 		sc->sc_wakeup = 1;
1920 		dnprintf(10, "wakeup..\n");
1921 		if (aml_busy)
1922 			continue;
1923 
1924 		for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) {
1925 			struct gpe_block *pgpe = &sc->gpe_table[gpe];
1926 
1927 			if (pgpe->active) {
1928 				pgpe->active = 0;
1929 				dnprintf(50, "softgpe: %.2x\n", gpe);
1930 				if (pgpe->handler)
1931 					pgpe->handler(sc, gpe, pgpe->arg);
1932 			}
1933 		}
1934 		if (sc->sc_powerbtn) {
1935 			sc->sc_powerbtn = 0;
1936 
1937 			aml_notify_dev(ACPI_DEV_PBD, 0x80);
1938 
1939 			acpi_evindex++;
1940 			dnprintf(1,"power button pressed\n");
1941 			KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_PWRBTN,
1942 			    acpi_evindex));
1943 		}
1944 		if (sc->sc_sleepbtn) {
1945 			sc->sc_sleepbtn = 0;
1946 
1947 			aml_notify_dev(ACPI_DEV_SBD, 0x80);
1948 
1949 			acpi_evindex++;
1950 			dnprintf(1,"sleep button pressed\n");
1951 			KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_SLPBTN,
1952 			    acpi_evindex));
1953 		}
1954 
1955 		/* handle polling here to keep code non-concurrent*/
1956 		if (sc->sc_poll) {
1957 			sc->sc_poll = 0;
1958 			acpi_poll_notify();
1959 		}
1960 	}
1961 	free(thread, M_DEVBUF);
1962 
1963 	kthread_exit(0);
1964 }
1965 
1966 void
1967 acpi_create_thread(void *arg)
1968 {
1969 	struct acpi_softc *sc = arg;
1970 
1971 	if (kthread_create(acpi_isr_thread, sc->sc_thread, NULL, DEVNAME(sc))
1972 	    != 0) {
1973 		printf("%s: unable to create isr thread, GPEs disabled\n",
1974 		    DEVNAME(sc));
1975 		return;
1976 	}
1977 }
1978 
1979 int
1980 acpi_map_address(struct acpi_softc *sc, struct acpi_gas *gas, bus_addr_t base,
1981     bus_size_t size, bus_space_handle_t *pioh, bus_space_tag_t *piot)
1982 {
1983 	int iospace = GAS_SYSTEM_IOSPACE;
1984 
1985 	/* No GAS structure, default to I/O space */
1986 	if (gas != NULL) {
1987 		base += gas->address;
1988 		iospace = gas->address_space_id;
1989 	}
1990 	switch (iospace) {
1991 	case GAS_SYSTEM_MEMORY:
1992 		*piot = sc->sc_memt;
1993 		break;
1994 	case GAS_SYSTEM_IOSPACE:
1995 		*piot = sc->sc_iot;
1996 		break;
1997 	default:
1998 		return -1;
1999 	}
2000 	if (bus_space_map(*piot, base, size, 0, pioh))
2001 		return -1;
2002 
2003 	return 0;
2004 }
2005 
2006 int
2007 acpi_foundec(struct aml_node *node, void *arg)
2008 {
2009 	struct acpi_softc	*sc = (struct acpi_softc *)arg;
2010 	struct device		*self = (struct device *)arg;
2011 	const char		*dev;
2012 	struct aml_value	 res;
2013 	struct acpi_attach_args	aaa;
2014 
2015 	if (aml_evalnode(sc, node, 0, NULL, &res) != 0)
2016 		return 0;
2017 
2018 	switch (res.type) {
2019 	case AML_OBJTYPE_STRING:
2020 		dev = res.v_string;
2021 		break;
2022 	case AML_OBJTYPE_INTEGER:
2023 		dev = aml_eisaid(aml_val2int(&res));
2024 		break;
2025 	default:
2026 		dev = "unknown";
2027 		break;
2028 	}
2029 
2030 	if (strcmp(dev, ACPI_DEV_ECD))
2031 		return 0;
2032 
2033 	memset(&aaa, 0, sizeof(aaa));
2034 	aaa.aaa_iot = sc->sc_iot;
2035 	aaa.aaa_memt = sc->sc_memt;
2036 	aaa.aaa_node = node->parent;
2037 	aaa.aaa_dev = dev;
2038 	aaa.aaa_name = "acpiec";
2039 	config_found(self, &aaa, acpi_print);
2040 	aml_freevalue(&res);
2041 
2042 	return 0;
2043 }
2044 
2045 int
2046 acpi_matchhids(struct acpi_attach_args *aa, const char *hids[],
2047     const char *driver)
2048 {
2049 	int i;
2050 
2051 	if (aa->aaa_dev == NULL || aa->aaa_node == NULL)
2052 		return (0);
2053 	for (i = 0; hids[i]; i++) {
2054 		if (!strcmp(aa->aaa_dev, hids[i])) {
2055 			dnprintf(5, "driver %s matches %s\n", driver, hids[i]);
2056 			return (1);
2057 		}
2058 	}
2059 	return (0);
2060 }
2061 
2062 int
2063 acpi_foundhid(struct aml_node *node, void *arg)
2064 {
2065 	struct acpi_softc	*sc = (struct acpi_softc *)arg;
2066 	struct device		*self = (struct device *)arg;
2067 	const char		*dev;
2068 	struct aml_value	 res;
2069 	struct acpi_attach_args	aaa;
2070 
2071 	dnprintf(10, "found hid device: %s ", node->parent->name);
2072 	if (aml_evalnode(sc, node, 0, NULL, &res) != 0)
2073 		return 0;
2074 
2075 	switch (res.type) {
2076 	case AML_OBJTYPE_STRING:
2077 		dev = res.v_string;
2078 		break;
2079 	case AML_OBJTYPE_INTEGER:
2080 		dev = aml_eisaid(aml_val2int(&res));
2081 		break;
2082 	default:
2083 		dev = "unknown";
2084 		break;
2085 	}
2086 	dnprintf(10, "	device: %s\n", dev);
2087 
2088 	memset(&aaa, 0, sizeof(aaa));
2089 	aaa.aaa_iot = sc->sc_iot;
2090 	aaa.aaa_memt = sc->sc_memt;
2091 	aaa.aaa_node = node->parent;
2092 	aaa.aaa_dev = dev;
2093 
2094 	if (!strcmp(dev, ACPI_DEV_AC))
2095 		aaa.aaa_name = "acpiac";
2096 	else if (!strcmp(dev, ACPI_DEV_CMB))
2097 		aaa.aaa_name = "acpibat";
2098 	else if (!strcmp(dev, ACPI_DEV_LD) ||
2099 	    !strcmp(dev, ACPI_DEV_PBD) ||
2100 	    !strcmp(dev, ACPI_DEV_SBD))
2101 		aaa.aaa_name = "acpibtn";
2102 	else if (!strcmp(dev, ACPI_DEV_ASUS))
2103 		aaa.aaa_name = "acpiasus";
2104 	else if (!strcmp(dev, ACPI_DEV_THINKPAD))
2105 		aaa.aaa_name = "acpithinkpad";
2106 
2107 	if (aaa.aaa_name)
2108 		config_found(self, &aaa, acpi_print);
2109 
2110 	aml_freevalue(&res);
2111 
2112 	return 0;
2113 }
2114 
2115 int
2116 acpi_founddock(struct aml_node *node, void *arg)
2117 {
2118 	struct acpi_softc	*sc = (struct acpi_softc *)arg;
2119 	struct device		*self = (struct device *)arg;
2120 	struct acpi_attach_args	aaa;
2121 
2122 	dnprintf(10, "found dock entry: %s\n", node->parent->name);
2123 
2124 	memset(&aaa, 0, sizeof(aaa));
2125 	aaa.aaa_iot = sc->sc_iot;
2126 	aaa.aaa_memt = sc->sc_memt;
2127 	aaa.aaa_node = node->parent;
2128 	aaa.aaa_name = "acpidock";
2129 
2130 	config_found(self, &aaa, acpi_print);
2131 
2132 	return 0;
2133 }
2134 
2135 int
2136 acpi_foundvideo(struct aml_node *node, void *arg)
2137 {
2138 	struct acpi_softc *sc = (struct acpi_softc *)arg;
2139 	struct device *self = (struct device *)arg;
2140 	struct acpi_attach_args	aaa;
2141 
2142 	memset(&aaa, 0, sizeof(aaa));
2143 	aaa.aaa_iot = sc->sc_iot;
2144 	aaa.aaa_memt = sc->sc_memt;
2145 	aaa.aaa_node = node->parent;
2146 	aaa.aaa_name = "acpivideo";
2147 
2148 	config_found(self, &aaa, acpi_print);
2149 
2150 	return (0);
2151 }
2152 
2153 TAILQ_HEAD(acpi_dv_hn, acpi_dev_rank) acpi_dv_h;
2154 void
2155 acpi_dev_sort(void)
2156 {
2157 	struct device		*dev, *idev;
2158 	struct acpi_dev_rank	*rentry, *ientry;
2159 	int			rank;
2160 
2161 	TAILQ_INIT(&acpi_dv_h);
2162 
2163 	TAILQ_FOREACH(dev, &alldevs, dv_list) {
2164 		for (rank = -1, idev = dev; idev != NULL;
2165 		    idev = idev->dv_parent, rank++)
2166 			;	/* nothing */
2167 
2168 		rentry = malloc(sizeof(*rentry), M_DEVBUF, M_WAITOK | M_ZERO);
2169 		rentry->rank = rank;
2170 		rentry->dev = dev;
2171 
2172 		if (TAILQ_FIRST(&acpi_dv_h) == NULL)
2173 			TAILQ_INSERT_HEAD(&acpi_dv_h, rentry, link);
2174 		TAILQ_FOREACH_REVERSE(ientry, &acpi_dv_h, acpi_dv_hn, link) {
2175 			if (rentry->rank > ientry->rank) {
2176 				TAILQ_INSERT_AFTER(&acpi_dv_h, ientry, rentry,
2177 				    link);
2178 				break;
2179 			}
2180 		}
2181 	}
2182 }
2183 
2184 void
2185 acpi_dev_free(void)
2186 {
2187 	struct acpi_dev_rank	*dvr;
2188 
2189 	while ((dvr = TAILQ_FIRST(&acpi_dv_h)) != NULL) {
2190 		TAILQ_REMOVE(&acpi_dv_h, dvr, link);
2191 		if (dvr != NULL) {
2192 			free(dvr, M_DEVBUF);
2193 			dvr = NULL;
2194 		}
2195 	}
2196 }
2197 #endif /* SMALL_KERNEL */
2198