1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 /*
27 * Copyright (c) 2009-2010, Intel Corporation.
28 * All rights reserved.
29 */
30 /*
31 * ACPI CA OSL for Solaris x86
32 */
33
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/psm.h>
37 #include <sys/pci_cfgspace.h>
38 #include <sys/apic.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/pci.h>
43 #include <sys/kobj.h>
44 #include <sys/taskq.h>
45 #include <sys/strlog.h>
46 #include <sys/x86_archext.h>
47 #include <sys/note.h>
48 #include <sys/promif.h>
49
50 #include <sys/acpi/accommon.h>
51 #include <sys/acpica.h>
52
53 #define MAX_DAT_FILE_SIZE (64*1024)
54
55 /* local functions */
56 static int CompressEisaID(char *np);
57
58 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
59 static int acpica_query_bbn_problem(void);
60 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
61 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
62 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
63 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
64 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
65
66 /*
67 * Event queue vars
68 */
69 int acpica_eventq_init = 0;
70 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
71
72 /*
73 * Priorities relative to minclsyspri that each taskq
74 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
75 * priority than OSL_GPE_HANDLER. There's an implicit
76 * assumption that no priority here results in exceeding
77 * maxclsyspri.
78 * Note: these initializations need to match the order of
79 * ACPI_EXECUTE_TYPE.
80 */
81 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
82 0, /* OSL_GLOBAL_LOCK_HANDLER */
83 2, /* OSL_NOTIFY_HANDLER */
84 0, /* OSL_GPE_HANDLER */
85 0, /* OSL_DEBUGGER_THREAD */
86 0, /* OSL_EC_POLL_HANDLER */
87 0 /* OSL_EC_BURST_HANDLER */
88 };
89
90 /*
91 * Note, if you change this path, you need to update
92 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
93 */
94 static char *acpi_table_path = "/boot/acpi/tables/";
95
96 /* non-zero while scan_d2a_map() is working */
97 static int scanning_d2a_map = 0;
98 static int d2a_done = 0;
99
100 /* features supported by ACPICA and ACPI device configuration. */
101 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
102 static uint64_t acpica_devcfg_features = 0;
103
104 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
105 int acpica_use_safe_delay = 0;
106
107 /* CPU mapping data */
108 struct cpu_map_item {
109 processorid_t cpu_id;
110 UINT32 proc_id;
111 UINT32 apic_id;
112 ACPI_HANDLE obj;
113 };
114
115 kmutex_t cpu_map_lock;
116 static struct cpu_map_item **cpu_map = NULL;
117 static int cpu_map_count_max = 0;
118 static int cpu_map_count = 0;
119 static int cpu_map_built = 0;
120
121 /*
122 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
123 * This flag is used to check for uppc-only systems by detecting whether
124 * acpica_map_cpu() has been called or not.
125 */
126 static int cpu_map_called = 0;
127
128 static int acpi_has_broken_bbn = -1;
129
130 /* buffer for AcpiOsVprintf() */
131 #define ACPI_OSL_PR_BUFLEN 1024
132 static char *acpi_osl_pr_buffer = NULL;
133 static int acpi_osl_pr_buflen;
134
135 #define D2A_DEBUG
136
137 /*
138 *
139 */
140 static void
discard_event_queues()141 discard_event_queues()
142 {
143 int i;
144
145 /*
146 * destroy event queues
147 */
148 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
149 if (osl_eventq[i])
150 ddi_taskq_destroy(osl_eventq[i]);
151 }
152 }
153
154
155 /*
156 *
157 */
158 static ACPI_STATUS
init_event_queues()159 init_event_queues()
160 {
161 char namebuf[32];
162 int i, error = 0;
163
164 /*
165 * Initialize event queues
166 */
167
168 /* Always allocate only 1 thread per queue to force FIFO execution */
169 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
170 snprintf(namebuf, 32, "ACPI%d", i);
171 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
172 osl_eventq_pri_delta[i] + minclsyspri, 0);
173 if (osl_eventq[i] == NULL)
174 error++;
175 }
176
177 if (error != 0) {
178 discard_event_queues();
179 #ifdef DEBUG
180 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
181 #endif
182 return (AE_ERROR);
183 }
184
185 acpica_eventq_init = 1;
186 return (AE_OK);
187 }
188
189 /*
190 * One-time initialization of OSL layer
191 */
192 ACPI_STATUS
AcpiOsInitialize(void)193 AcpiOsInitialize(void)
194 {
195 /*
196 * Allocate buffer for AcpiOsVprintf() here to avoid
197 * kmem_alloc()/kmem_free() at high PIL
198 */
199 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
200 if (acpi_osl_pr_buffer != NULL)
201 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
202
203 return (AE_OK);
204 }
205
206 /*
207 * One-time shut-down of OSL layer
208 */
209 ACPI_STATUS
AcpiOsTerminate(void)210 AcpiOsTerminate(void)
211 {
212
213 if (acpi_osl_pr_buffer != NULL)
214 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
215
216 discard_event_queues();
217 return (AE_OK);
218 }
219
220
221 ACPI_PHYSICAL_ADDRESS
AcpiOsGetRootPointer()222 AcpiOsGetRootPointer()
223 {
224 ACPI_PHYSICAL_ADDRESS Address;
225
226 /*
227 * For EFI firmware, the root pointer is defined in EFI systab.
228 * The boot code process the table and put the physical address
229 * in the acpi-root-tab property.
230 */
231 Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
232 DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
233
234 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
235 Address = NULL;
236
237 return (Address);
238 }
239
240 /*ARGSUSED*/
241 ACPI_STATUS
AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES * InitVal,ACPI_STRING * NewVal)242 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
243 ACPI_STRING *NewVal)
244 {
245
246 *NewVal = 0;
247 return (AE_OK);
248 }
249
250 static void
acpica_strncpy(char * dest,const char * src,int len)251 acpica_strncpy(char *dest, const char *src, int len)
252 {
253
254 /*LINTED*/
255 while ((*dest++ = *src++) && (--len > 0))
256 /* copy the string */;
257 *dest = '\0';
258 }
259
260 ACPI_STATUS
AcpiOsTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_TABLE_HEADER ** NewTable)261 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
262 ACPI_TABLE_HEADER **NewTable)
263 {
264 char signature[5];
265 char oemid[7];
266 char oemtableid[9];
267 struct _buf *file;
268 char *buf1, *buf2;
269 int count;
270 char acpi_table_loc[128];
271
272 acpica_strncpy(signature, ExistingTable->Signature, 4);
273 acpica_strncpy(oemid, ExistingTable->OemId, 6);
274 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
275
276 #ifdef DEBUG
277 cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
278 " OEM TABLE ID [%s] OEM rev %x",
279 signature, ExistingTable->Revision, oemid, oemtableid,
280 ExistingTable->OemRevision);
281 #endif
282
283 /* File name format is "signature_oemid_oemtableid.dat" */
284 (void) strcpy(acpi_table_loc, acpi_table_path);
285 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
286 (void) strcat(acpi_table_loc, "_");
287 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
288 (void) strcat(acpi_table_loc, "_");
289 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
290 (void) strcat(acpi_table_loc, ".dat");
291
292 file = kobj_open_file(acpi_table_loc);
293 if (file == (struct _buf *)-1) {
294 *NewTable = 0;
295 return (AE_OK);
296 } else {
297 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
298 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
299 if (count >= MAX_DAT_FILE_SIZE) {
300 cmn_err(CE_WARN, "!acpica: table %s file size too big",
301 acpi_table_loc);
302 *NewTable = 0;
303 } else {
304 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
305 (void) memcpy(buf2, buf1, count);
306 *NewTable = (ACPI_TABLE_HEADER *)buf2;
307 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
308 acpi_table_loc);
309 }
310 }
311 kobj_close_file(file);
312 kmem_free(buf1, MAX_DAT_FILE_SIZE);
313
314 return (AE_OK);
315 }
316
317
318 /*
319 * ACPI semaphore implementation
320 */
321 typedef struct {
322 kmutex_t mutex;
323 kcondvar_t cv;
324 uint32_t available;
325 uint32_t initial;
326 uint32_t maximum;
327 } acpi_sema_t;
328
329 /*
330 *
331 */
332 void
acpi_sema_init(acpi_sema_t * sp,unsigned max,unsigned count)333 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
334 {
335 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
336 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
337 /* no need to enter mutex here at creation */
338 sp->available = count;
339 sp->initial = count;
340 sp->maximum = max;
341 }
342
343 /*
344 *
345 */
346 void
acpi_sema_destroy(acpi_sema_t * sp)347 acpi_sema_destroy(acpi_sema_t *sp)
348 {
349
350 cv_destroy(&sp->cv);
351 mutex_destroy(&sp->mutex);
352 }
353
354 /*
355 *
356 */
357 ACPI_STATUS
acpi_sema_p(acpi_sema_t * sp,unsigned count,uint16_t wait_time)358 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
359 {
360 ACPI_STATUS rv = AE_OK;
361 clock_t deadline;
362
363 mutex_enter(&sp->mutex);
364
365 if (sp->available >= count) {
366 /*
367 * Enough units available, no blocking
368 */
369 sp->available -= count;
370 mutex_exit(&sp->mutex);
371 return (rv);
372 } else if (wait_time == 0) {
373 /*
374 * Not enough units available and timeout
375 * specifies no blocking
376 */
377 rv = AE_TIME;
378 mutex_exit(&sp->mutex);
379 return (rv);
380 }
381
382 /*
383 * Not enough units available and timeout specifies waiting
384 */
385 if (wait_time != ACPI_WAIT_FOREVER)
386 deadline = ddi_get_lbolt() +
387 (clock_t)drv_usectohz(wait_time * 1000);
388
389 do {
390 if (wait_time == ACPI_WAIT_FOREVER)
391 cv_wait(&sp->cv, &sp->mutex);
392 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
393 rv = AE_TIME;
394 break;
395 }
396 } while (sp->available < count);
397
398 /* if we dropped out of the wait with AE_OK, we got the units */
399 if (rv == AE_OK)
400 sp->available -= count;
401
402 mutex_exit(&sp->mutex);
403 return (rv);
404 }
405
406 /*
407 *
408 */
409 void
acpi_sema_v(acpi_sema_t * sp,unsigned count)410 acpi_sema_v(acpi_sema_t *sp, unsigned count)
411 {
412 mutex_enter(&sp->mutex);
413 sp->available += count;
414 cv_broadcast(&sp->cv);
415 mutex_exit(&sp->mutex);
416 }
417
418
419 ACPI_STATUS
AcpiOsCreateSemaphore(UINT32 MaxUnits,UINT32 InitialUnits,ACPI_HANDLE * OutHandle)420 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
421 ACPI_HANDLE *OutHandle)
422 {
423 acpi_sema_t *sp;
424
425 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
426 return (AE_BAD_PARAMETER);
427
428 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
429 acpi_sema_init(sp, MaxUnits, InitialUnits);
430 *OutHandle = (ACPI_HANDLE)sp;
431 return (AE_OK);
432 }
433
434
435 ACPI_STATUS
AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)436 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
437 {
438
439 if (Handle == NULL)
440 return (AE_BAD_PARAMETER);
441
442 acpi_sema_destroy((acpi_sema_t *)Handle);
443 kmem_free((void *)Handle, sizeof (acpi_sema_t));
444 return (AE_OK);
445 }
446
447 ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_HANDLE Handle,UINT32 Units,UINT16 Timeout)448 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
449 {
450
451 if ((Handle == NULL) || (Units < 1))
452 return (AE_BAD_PARAMETER);
453
454 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
455 }
456
457 ACPI_STATUS
AcpiOsSignalSemaphore(ACPI_HANDLE Handle,UINT32 Units)458 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
459 {
460
461 if ((Handle == NULL) || (Units < 1))
462 return (AE_BAD_PARAMETER);
463
464 acpi_sema_v((acpi_sema_t *)Handle, Units);
465 return (AE_OK);
466 }
467
468 ACPI_STATUS
AcpiOsCreateLock(ACPI_HANDLE * OutHandle)469 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
470 {
471 kmutex_t *mp;
472
473 if (OutHandle == NULL)
474 return (AE_BAD_PARAMETER);
475
476 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
477 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
478 *OutHandle = (ACPI_HANDLE)mp;
479 return (AE_OK);
480 }
481
482 void
AcpiOsDeleteLock(ACPI_HANDLE Handle)483 AcpiOsDeleteLock(ACPI_HANDLE Handle)
484 {
485
486 if (Handle == NULL)
487 return;
488
489 mutex_destroy((kmutex_t *)Handle);
490 kmem_free((void *)Handle, sizeof (kmutex_t));
491 }
492
493 ACPI_CPU_FLAGS
AcpiOsAcquireLock(ACPI_HANDLE Handle)494 AcpiOsAcquireLock(ACPI_HANDLE Handle)
495 {
496
497
498 if (Handle == NULL)
499 return (AE_BAD_PARAMETER);
500
501 if (curthread == CPU->cpu_idle_thread) {
502 while (!mutex_tryenter((kmutex_t *)Handle))
503 /* spin */;
504 } else
505 mutex_enter((kmutex_t *)Handle);
506 return (AE_OK);
507 }
508
509 void
AcpiOsReleaseLock(ACPI_HANDLE Handle,ACPI_CPU_FLAGS Flags)510 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
511 {
512 _NOTE(ARGUNUSED(Flags))
513
514 mutex_exit((kmutex_t *)Handle);
515 }
516
517
518 void *
AcpiOsAllocate(ACPI_SIZE Size)519 AcpiOsAllocate(ACPI_SIZE Size)
520 {
521 ACPI_SIZE *tmp_ptr;
522
523 Size += sizeof (Size);
524 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
525 *tmp_ptr++ = Size;
526 return (tmp_ptr);
527 }
528
529 void
AcpiOsFree(void * Memory)530 AcpiOsFree(void *Memory)
531 {
532 ACPI_SIZE size, *tmp_ptr;
533
534 tmp_ptr = (ACPI_SIZE *)Memory;
535 tmp_ptr -= 1;
536 size = *tmp_ptr;
537 kmem_free(tmp_ptr, size);
538 }
539
540 static int napics_found; /* number of ioapic addresses in array */
541 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
542 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
543 static void *dummy_ioapicadr;
544
545 void
acpica_find_ioapics(void)546 acpica_find_ioapics(void)
547 {
548 int madt_seen, madt_size;
549 ACPI_SUBTABLE_HEADER *ap;
550 ACPI_MADT_IO_APIC *mia;
551
552 if (acpi_mapic_dtp != NULL)
553 return; /* already parsed table */
554 if (AcpiGetTable(ACPI_SIG_MADT, 1,
555 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
556 return;
557
558 napics_found = 0;
559
560 /*
561 * Search the MADT for ioapics
562 */
563 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
564 madt_size = acpi_mapic_dtp->Header.Length;
565 madt_seen = sizeof (*acpi_mapic_dtp);
566
567 while (madt_seen < madt_size) {
568
569 switch (ap->Type) {
570 case ACPI_MADT_TYPE_IO_APIC:
571 mia = (ACPI_MADT_IO_APIC *) ap;
572 if (napics_found < MAX_IO_APIC) {
573 ioapic_paddr[napics_found++] =
574 (ACPI_PHYSICAL_ADDRESS)
575 (mia->Address & PAGEMASK);
576 }
577 break;
578
579 default:
580 break;
581 }
582
583 /* advance to next entry */
584 madt_seen += ap->Length;
585 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
586 }
587 if (dummy_ioapicadr == NULL)
588 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
589 }
590
591
592 void *
AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress,ACPI_SIZE Size)593 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
594 {
595 int i;
596
597 /*
598 * If the iopaic address table is populated, check if trying
599 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
600 */
601 for (i = 0; i < napics_found; i++) {
602 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
603 return (dummy_ioapicadr);
604 }
605 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
606 return (psm_map_new((paddr_t)PhysicalAddress,
607 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
608 }
609
610 void
AcpiOsUnmapMemory(void * LogicalAddress,ACPI_SIZE Size)611 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
612 {
613 /*
614 * Check if trying to unmap dummy ioapic address.
615 */
616 if (LogicalAddress == dummy_ioapicadr)
617 return;
618
619 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
620 }
621
622 /*ARGSUSED*/
623 ACPI_STATUS
AcpiOsGetPhysicalAddress(void * LogicalAddress,ACPI_PHYSICAL_ADDRESS * PhysicalAddress)624 AcpiOsGetPhysicalAddress(void *LogicalAddress,
625 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
626 {
627
628 /* UNIMPLEMENTED: not invoked by ACPI CA code */
629 return (AE_NOT_IMPLEMENTED);
630 }
631
632
633 ACPI_OSD_HANDLER acpi_isr;
634 void *acpi_isr_context;
635
636 uint_t
acpi_wrapper_isr(char * arg)637 acpi_wrapper_isr(char *arg)
638 {
639 _NOTE(ARGUNUSED(arg))
640
641 int status;
642
643 status = (*acpi_isr)(acpi_isr_context);
644
645 if (status == ACPI_INTERRUPT_HANDLED) {
646 return (DDI_INTR_CLAIMED);
647 } else {
648 return (DDI_INTR_UNCLAIMED);
649 }
650 }
651
652 static int acpi_intr_hooked = 0;
653
654 ACPI_STATUS
AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine,void * Context)655 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
656 ACPI_OSD_HANDLER ServiceRoutine,
657 void *Context)
658 {
659 _NOTE(ARGUNUSED(InterruptNumber))
660
661 int retval;
662 int sci_vect;
663 iflag_t sci_flags;
664
665 acpi_isr = ServiceRoutine;
666 acpi_isr_context = Context;
667
668 /*
669 * Get SCI (adjusted for PIC/APIC mode if necessary)
670 */
671 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
672 return (AE_ERROR);
673 }
674
675 #ifdef DEBUG
676 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
677 #endif
678
679 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
680 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
681 if (retval) {
682 acpi_intr_hooked = 1;
683 return (AE_OK);
684 } else
685 return (AE_BAD_PARAMETER);
686 }
687
688 ACPI_STATUS
AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER ServiceRoutine)689 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
690 ACPI_OSD_HANDLER ServiceRoutine)
691 {
692 _NOTE(ARGUNUSED(ServiceRoutine))
693
694 #ifdef DEBUG
695 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
696 #endif
697 if (acpi_intr_hooked) {
698 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
699 InterruptNumber);
700 acpi_intr_hooked = 0;
701 }
702 return (AE_OK);
703 }
704
705
706 ACPI_THREAD_ID
AcpiOsGetThreadId(void)707 AcpiOsGetThreadId(void)
708 {
709 /*
710 * ACPI CA doesn't care what actual value is returned as long
711 * as it is non-zero and unique to each existing thread.
712 * ACPI CA assumes that thread ID is castable to a pointer,
713 * so we use the current thread pointer.
714 */
715 return (curthread);
716 }
717
718 /*
719 *
720 */
721 ACPI_STATUS
AcpiOsExecute(ACPI_EXECUTE_TYPE Type,ACPI_OSD_EXEC_CALLBACK Function,void * Context)722 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
723 void *Context)
724 {
725
726 if (!acpica_eventq_init) {
727 /*
728 * Create taskqs for event handling
729 */
730 if (init_event_queues() != AE_OK)
731 return (AE_ERROR);
732 }
733
734 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
735 DDI_NOSLEEP) == DDI_FAILURE) {
736 #ifdef DEBUG
737 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
738 #endif
739 return (AE_ERROR);
740 }
741 return (AE_OK);
742
743 }
744
745 void
AcpiOsSleep(ACPI_INTEGER Milliseconds)746 AcpiOsSleep(ACPI_INTEGER Milliseconds)
747 {
748 /*
749 * During kernel startup, before the first tick interrupt
750 * has taken place, we can't call delay; very late in
751 * kernel shutdown or suspend/resume, clock interrupts
752 * are blocked, so delay doesn't work then either.
753 * So we busy wait if lbolt == 0 (kernel startup)
754 * or if acpica_use_safe_delay has been set to a
755 * non-zero value.
756 */
757 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
758 drv_usecwait(Milliseconds * 1000);
759 else
760 delay(drv_usectohz(Milliseconds * 1000));
761 }
762
763 void
AcpiOsStall(UINT32 Microseconds)764 AcpiOsStall(UINT32 Microseconds)
765 {
766 drv_usecwait(Microseconds);
767 }
768
769
770 /*
771 * Implementation of "Windows 2001" compatible I/O permission map
772 *
773 */
774 #define OSL_IO_NONE (0)
775 #define OSL_IO_READ (1<<0)
776 #define OSL_IO_WRITE (1<<1)
777 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
778 #define OSL_IO_TERM (1<<2)
779 #define OSL_IO_DEFAULT OSL_IO_RW
780
781 static struct io_perm {
782 ACPI_IO_ADDRESS low;
783 ACPI_IO_ADDRESS high;
784 uint8_t perm;
785 } osl_io_perm[] = {
786 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
787 };
788
789
790 /*
791 *
792 */
793 static struct io_perm *
osl_io_find_perm(ACPI_IO_ADDRESS addr)794 osl_io_find_perm(ACPI_IO_ADDRESS addr)
795 {
796 struct io_perm *p;
797
798 p = osl_io_perm;
799 while (p != NULL) {
800 if ((p->low <= addr) && (addr <= p->high))
801 break;
802 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
803 }
804
805 return (p);
806 }
807
808 /*
809 *
810 */
811 ACPI_STATUS
AcpiOsReadPort(ACPI_IO_ADDRESS Address,UINT32 * Value,UINT32 Width)812 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
813 {
814 struct io_perm *p;
815
816 /* verify permission */
817 p = osl_io_find_perm(Address);
818 if (p && (p->perm & OSL_IO_READ) == 0) {
819 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
820 (long)Address, Width);
821 *Value = 0xffffffff;
822 return (AE_ERROR);
823 }
824
825 switch (Width) {
826 case 8:
827 *Value = inb(Address);
828 break;
829 case 16:
830 *Value = inw(Address);
831 break;
832 case 32:
833 *Value = inl(Address);
834 break;
835 default:
836 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
837 (long)Address, Width);
838 return (AE_BAD_PARAMETER);
839 }
840 return (AE_OK);
841 }
842
843 ACPI_STATUS
AcpiOsWritePort(ACPI_IO_ADDRESS Address,UINT32 Value,UINT32 Width)844 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
845 {
846 struct io_perm *p;
847
848 /* verify permission */
849 p = osl_io_find_perm(Address);
850 if (p && (p->perm & OSL_IO_WRITE) == 0) {
851 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
852 (long)Address, Width);
853 return (AE_ERROR);
854 }
855
856 switch (Width) {
857 case 8:
858 outb(Address, Value);
859 break;
860 case 16:
861 outw(Address, Value);
862 break;
863 case 32:
864 outl(Address, Value);
865 break;
866 default:
867 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
868 (long)Address, Width);
869 return (AE_BAD_PARAMETER);
870 }
871 return (AE_OK);
872 }
873
874
875 /*
876 *
877 */
878
879 #define OSL_RW(ptr, val, type, rw) \
880 { if (rw) *((type *)(ptr)) = *((type *) val); \
881 else *((type *) val) = *((type *)(ptr)); }
882
883
884 static void
osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address,UINT32 * Value,UINT32 Width,int write)885 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
886 UINT32 Width, int write)
887 {
888 size_t maplen = Width / 8;
889 caddr_t ptr;
890
891 ptr = psm_map_new((paddr_t)Address, maplen,
892 PSM_PROT_WRITE | PSM_PROT_READ);
893
894 switch (maplen) {
895 case 1:
896 OSL_RW(ptr, Value, uint8_t, write);
897 break;
898 case 2:
899 OSL_RW(ptr, Value, uint16_t, write);
900 break;
901 case 4:
902 OSL_RW(ptr, Value, uint32_t, write);
903 break;
904 default:
905 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
906 Width);
907 break;
908 }
909
910 psm_unmap(ptr, maplen);
911 }
912
913 ACPI_STATUS
AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,UINT32 * Value,UINT32 Width)914 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
915 UINT32 *Value, UINT32 Width)
916 {
917 osl_rw_memory(Address, Value, Width, 0);
918 return (AE_OK);
919 }
920
921 ACPI_STATUS
AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,UINT32 Value,UINT32 Width)922 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
923 UINT32 Value, UINT32 Width)
924 {
925 osl_rw_memory(Address, &Value, Width, 1);
926 return (AE_OK);
927 }
928
929
930 ACPI_STATUS
AcpiOsReadPciConfiguration(ACPI_PCI_ID * PciId,UINT32 Register,void * Value,UINT32 Width)931 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
932 void *Value, UINT32 Width)
933 {
934
935 switch (Width) {
936 case 8:
937 *((UINT64 *)Value) = (UINT64)(*pci_getb_func)
938 (PciId->Bus, PciId->Device, PciId->Function, Register);
939 break;
940 case 16:
941 *((UINT64 *)Value) = (UINT64)(*pci_getw_func)
942 (PciId->Bus, PciId->Device, PciId->Function, Register);
943 break;
944 case 32:
945 *((UINT64 *)Value) = (UINT64)(*pci_getl_func)
946 (PciId->Bus, PciId->Device, PciId->Function, Register);
947 break;
948 case 64:
949 default:
950 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
951 Register, Width);
952 return (AE_BAD_PARAMETER);
953 }
954 return (AE_OK);
955 }
956
957 /*
958 *
959 */
960 int acpica_write_pci_config_ok = 1;
961
962 ACPI_STATUS
AcpiOsWritePciConfiguration(ACPI_PCI_ID * PciId,UINT32 Register,ACPI_INTEGER Value,UINT32 Width)963 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
964 ACPI_INTEGER Value, UINT32 Width)
965 {
966
967 if (!acpica_write_pci_config_ok) {
968 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
969 " %lx %d not permitted", PciId->Bus, PciId->Device,
970 PciId->Function, Register, (long)Value, Width);
971 return (AE_OK);
972 }
973
974 switch (Width) {
975 case 8:
976 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
977 Register, (uint8_t)Value);
978 break;
979 case 16:
980 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
981 Register, (uint16_t)Value);
982 break;
983 case 32:
984 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
985 Register, (uint32_t)Value);
986 break;
987 case 64:
988 default:
989 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
990 Register, Width);
991 return (AE_BAD_PARAMETER);
992 }
993 return (AE_OK);
994 }
995
996 /*
997 * Called with ACPI_HANDLEs for both a PCI Config Space
998 * OpRegion and (what ACPI CA thinks is) the PCI device
999 * to which this ConfigSpace OpRegion belongs.
1000 *
1001 * ACPI CA uses _BBN and _ADR objects to determine the default
1002 * values for bus, segment, device and function; anything ACPI CA
1003 * can't figure out from the ACPI tables will be 0. One very
1004 * old 32-bit x86 system is known to have broken _BBN; this is
1005 * not addressed here.
1006 *
1007 * Some BIOSes implement _BBN() by reading PCI config space
1008 * on bus #0 - which means that we'll recurse when we attempt
1009 * to create the devinfo-to-ACPI map. If Derive is called during
1010 * scan_d2a_map, we don't translate the bus # and return.
1011 *
1012 * We get the parent of the OpRegion, which must be a PCI
1013 * node, fetch the associated devinfo node and snag the
1014 * b/d/f from it.
1015 */
1016 void
AcpiOsDerivePciId(ACPI_HANDLE rhandle,ACPI_HANDLE chandle,ACPI_PCI_ID ** PciId)1017 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1018 ACPI_PCI_ID **PciId)
1019 {
1020 ACPI_HANDLE handle;
1021 dev_info_t *dip;
1022 int bus, device, func, devfn;
1023
1024 /*
1025 * See above - avoid recursing during scanning_d2a_map.
1026 */
1027 if (scanning_d2a_map)
1028 return;
1029
1030 /*
1031 * Get the OpRegion's parent
1032 */
1033 if (AcpiGetParent(chandle, &handle) != AE_OK)
1034 return;
1035
1036 /*
1037 * If we've mapped the ACPI node to the devinfo
1038 * tree, use the devinfo reg property
1039 */
1040 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1041 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1042 (*PciId)->Bus = bus;
1043 (*PciId)->Device = device;
1044 (*PciId)->Function = func;
1045 }
1046 }
1047
1048
1049 /*ARGSUSED*/
1050 BOOLEAN
AcpiOsReadable(void * Pointer,ACPI_SIZE Length)1051 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1052 {
1053
1054 /* Always says yes; all mapped memory assumed readable */
1055 return (1);
1056 }
1057
1058 /*ARGSUSED*/
1059 BOOLEAN
AcpiOsWritable(void * Pointer,ACPI_SIZE Length)1060 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1061 {
1062
1063 /* Always says yes; all mapped memory assumed writable */
1064 return (1);
1065 }
1066
1067 UINT64
AcpiOsGetTimer(void)1068 AcpiOsGetTimer(void)
1069 {
1070 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1071 return ((gethrtime() + 50) / 100);
1072 }
1073
1074 static struct AcpiOSIFeature_s {
1075 uint64_t control_flag;
1076 const char *feature_name;
1077 } AcpiOSIFeatures[] = {
1078 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1079 { 0, "Processor Device" }
1080 };
1081
1082 /*ARGSUSED*/
1083 ACPI_STATUS
AcpiOsValidateInterface(char * feature)1084 AcpiOsValidateInterface(char *feature)
1085 {
1086 int i;
1087
1088 ASSERT(feature != NULL);
1089 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1090 i++) {
1091 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1092 continue;
1093 }
1094 /* Check whether required core features are available. */
1095 if (AcpiOSIFeatures[i].control_flag != 0 &&
1096 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1097 AcpiOSIFeatures[i].control_flag) {
1098 break;
1099 }
1100 /* Feature supported. */
1101 return (AE_OK);
1102 }
1103
1104 return (AE_SUPPORT);
1105 }
1106
1107 /*ARGSUSED*/
1108 ACPI_STATUS
AcpiOsValidateAddress(UINT8 spaceid,ACPI_PHYSICAL_ADDRESS addr,ACPI_SIZE length)1109 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1110 ACPI_SIZE length)
1111 {
1112 return (AE_OK);
1113 }
1114
1115 ACPI_STATUS
AcpiOsSignal(UINT32 Function,void * Info)1116 AcpiOsSignal(UINT32 Function, void *Info)
1117 {
1118 _NOTE(ARGUNUSED(Function, Info))
1119
1120 /* FUTUREWORK: debugger support */
1121
1122 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1123 return (AE_OK);
1124 }
1125
1126 void ACPI_INTERNAL_VAR_XFACE
AcpiOsPrintf(const char * Format,...)1127 AcpiOsPrintf(const char *Format, ...)
1128 {
1129 va_list ap;
1130
1131 va_start(ap, Format);
1132 AcpiOsVprintf(Format, ap);
1133 va_end(ap);
1134 }
1135
1136 /*
1137 * When != 0, sends output to console
1138 * Patchable with kmdb or /etc/system.
1139 */
1140 int acpica_console_out = 0;
1141
1142 #define ACPICA_OUTBUF_LEN 160
1143 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1144 int acpica_outbuf_offset;
1145
1146 /*
1147 *
1148 */
1149 static void
acpica_pr_buf(char * buf)1150 acpica_pr_buf(char *buf)
1151 {
1152 char c, *bufp, *outp;
1153 int out_remaining;
1154
1155 /*
1156 * copy the supplied buffer into the output buffer
1157 * when we hit a '\n' or overflow the output buffer,
1158 * output and reset the output buffer
1159 */
1160 bufp = buf;
1161 outp = acpica_outbuf + acpica_outbuf_offset;
1162 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1163 while (c = *bufp++) {
1164 *outp++ = c;
1165 if (c == '\n' || --out_remaining == 0) {
1166 *outp = '\0';
1167 switch (acpica_console_out) {
1168 case 1:
1169 printf(acpica_outbuf);
1170 break;
1171 case 2:
1172 prom_printf(acpica_outbuf);
1173 break;
1174 case 0:
1175 default:
1176 (void) strlog(0, 0, 0,
1177 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1178 acpica_outbuf);
1179 break;
1180 }
1181 acpica_outbuf_offset = 0;
1182 outp = acpica_outbuf;
1183 out_remaining = ACPICA_OUTBUF_LEN - 1;
1184 }
1185 }
1186
1187 acpica_outbuf_offset = outp - acpica_outbuf;
1188 }
1189
1190 void
AcpiOsVprintf(const char * Format,va_list Args)1191 AcpiOsVprintf(const char *Format, va_list Args)
1192 {
1193
1194 /*
1195 * If AcpiOsInitialize() failed to allocate a string buffer,
1196 * resort to vprintf().
1197 */
1198 if (acpi_osl_pr_buffer == NULL) {
1199 vprintf(Format, Args);
1200 return;
1201 }
1202
1203 /*
1204 * It is possible that a very long debug output statement will
1205 * be truncated; this is silently ignored.
1206 */
1207 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1208 acpica_pr_buf(acpi_osl_pr_buffer);
1209 }
1210
1211 void
AcpiOsRedirectOutput(void * Destination)1212 AcpiOsRedirectOutput(void *Destination)
1213 {
1214 _NOTE(ARGUNUSED(Destination))
1215
1216 /* FUTUREWORK: debugger support */
1217
1218 #ifdef DEBUG
1219 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1220 #endif
1221 }
1222
1223
1224 UINT32
AcpiOsGetLine(char * Buffer)1225 AcpiOsGetLine(char *Buffer)
1226 {
1227 _NOTE(ARGUNUSED(Buffer))
1228
1229 /* FUTUREWORK: debugger support */
1230
1231 return (0);
1232 }
1233
1234 /*
1235 * Device tree binding
1236 */
1237 static ACPI_STATUS
acpica_find_pcibus_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1238 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1239 {
1240 _NOTE(ARGUNUSED(lvl));
1241
1242 int sta, hid, bbn;
1243 int busno = (intptr_t)ctxp;
1244 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1245
1246 /* Check whether device exists. */
1247 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1248 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1249 /*
1250 * Skip object if device doesn't exist.
1251 * According to ACPI Spec,
1252 * 1) setting either bit 0 or bit 3 means that device exists.
1253 * 2) Absence of _STA method means all status bits set.
1254 */
1255 return (AE_CTRL_DEPTH);
1256 }
1257
1258 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1259 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1260 /* Non PCI/PCIe host bridge. */
1261 return (AE_OK);
1262 }
1263
1264 if (acpi_has_broken_bbn) {
1265 ACPI_BUFFER rb;
1266 rb.Pointer = NULL;
1267 rb.Length = ACPI_ALLOCATE_BUFFER;
1268
1269 /* Decree _BBN == n from PCI<n> */
1270 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1271 return (AE_CTRL_TERMINATE);
1272 }
1273 bbn = ((char *)rb.Pointer)[3] - '0';
1274 AcpiOsFree(rb.Pointer);
1275 if (bbn == busno || busno == 0) {
1276 *hdlp = hdl;
1277 return (AE_CTRL_TERMINATE);
1278 }
1279 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1280 if (bbn == busno) {
1281 *hdlp = hdl;
1282 return (AE_CTRL_TERMINATE);
1283 }
1284 } else if (busno == 0) {
1285 *hdlp = hdl;
1286 return (AE_CTRL_TERMINATE);
1287 }
1288
1289 return (AE_CTRL_DEPTH);
1290 }
1291
1292 static int
acpica_find_pcibus(int busno,ACPI_HANDLE * rh)1293 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1294 {
1295 ACPI_HANDLE sbobj, busobj;
1296
1297 /* initialize static flag by querying ACPI namespace for bug */
1298 if (acpi_has_broken_bbn == -1)
1299 acpi_has_broken_bbn = acpica_query_bbn_problem();
1300
1301 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1302 busobj = NULL;
1303 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1304 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1305 (void **)&busobj);
1306 if (busobj != NULL) {
1307 *rh = busobj;
1308 return (AE_OK);
1309 }
1310 }
1311
1312 return (AE_ERROR);
1313 }
1314
1315 static ACPI_STATUS
acpica_query_bbn_walker(ACPI_HANDLE hdl,UINT32 lvl,void * ctxp,void ** rvpp)1316 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1317 {
1318 _NOTE(ARGUNUSED(lvl));
1319 _NOTE(ARGUNUSED(rvpp));
1320
1321 int sta, hid, bbn;
1322 int *cntp = (int *)ctxp;
1323
1324 /* Check whether device exists. */
1325 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1326 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1327 /*
1328 * Skip object if device doesn't exist.
1329 * According to ACPI Spec,
1330 * 1) setting either bit 0 or bit 3 means that device exists.
1331 * 2) Absence of _STA method means all status bits set.
1332 */
1333 return (AE_CTRL_DEPTH);
1334 }
1335
1336 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1337 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1338 /* Non PCI/PCIe host bridge. */
1339 return (AE_OK);
1340 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1341 bbn == 0 && ++(*cntp) > 1) {
1342 /*
1343 * If we find more than one bus with a 0 _BBN
1344 * we have the problem that BigBear's BIOS shows
1345 */
1346 return (AE_CTRL_TERMINATE);
1347 } else {
1348 /*
1349 * Skip children of PCI/PCIe host bridge.
1350 */
1351 return (AE_CTRL_DEPTH);
1352 }
1353 }
1354
1355 /*
1356 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1357 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1358 * below if it exists.
1359 */
1360 static int
acpica_query_bbn_problem(void)1361 acpica_query_bbn_problem(void)
1362 {
1363 ACPI_HANDLE sbobj;
1364 int zerobbncnt;
1365 void *rv;
1366
1367 zerobbncnt = 0;
1368 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1369 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1370 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1371 }
1372
1373 return (zerobbncnt > 1 ? 1 : 0);
1374 }
1375
1376 static const char hextab[] = "0123456789ABCDEF";
1377
1378 static int
hexdig(int c)1379 hexdig(int c)
1380 {
1381 /*
1382 * Get hex digit:
1383 *
1384 * Returns the 4-bit hex digit named by the input character. Returns
1385 * zero if the input character is not valid hex!
1386 */
1387
1388 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1389 int j = sizeof (hextab);
1390
1391 while (--j && (x != hextab[j])) {
1392 }
1393 return (j);
1394 }
1395
1396 static int
CompressEisaID(char * np)1397 CompressEisaID(char *np)
1398 {
1399 /*
1400 * Compress an EISA device name:
1401 *
1402 * This routine converts a 7-byte ASCII device name into the 4-byte
1403 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1404 * NV-RAM!)
1405 */
1406
1407 union { char octets[4]; int retval; } myu;
1408
1409 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1410 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1411 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1412 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1413
1414 return (myu.retval);
1415 }
1416
1417 ACPI_STATUS
acpica_eval_int(ACPI_HANDLE dev,char * method,int * rint)1418 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1419 {
1420 ACPI_STATUS status;
1421 ACPI_BUFFER rb;
1422 ACPI_OBJECT ro;
1423
1424 rb.Pointer = &ro;
1425 rb.Length = sizeof (ro);
1426 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1427 ACPI_TYPE_INTEGER)) == AE_OK)
1428 *rint = ro.Integer.Value;
1429
1430 return (status);
1431 }
1432
1433 static int
acpica_eval_hid(ACPI_HANDLE dev,char * method,int * rint)1434 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1435 {
1436 ACPI_BUFFER rb;
1437 ACPI_OBJECT *rv;
1438
1439 rb.Pointer = NULL;
1440 rb.Length = ACPI_ALLOCATE_BUFFER;
1441 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1442 rb.Length != 0) {
1443 rv = rb.Pointer;
1444 if (rv->Type == ACPI_TYPE_INTEGER) {
1445 *rint = rv->Integer.Value;
1446 AcpiOsFree(rv);
1447 return (AE_OK);
1448 } else if (rv->Type == ACPI_TYPE_STRING) {
1449 char *stringData;
1450
1451 /* Convert the string into an EISA ID */
1452 if (rv->String.Pointer == NULL) {
1453 AcpiOsFree(rv);
1454 return (AE_ERROR);
1455 }
1456
1457 stringData = rv->String.Pointer;
1458
1459 /*
1460 * If the string is an EisaID, it must be 7
1461 * characters; if it's an ACPI ID, it will be 8
1462 * (and we don't care about ACPI ids here).
1463 */
1464 if (strlen(stringData) != 7) {
1465 AcpiOsFree(rv);
1466 return (AE_ERROR);
1467 }
1468
1469 *rint = CompressEisaID(stringData);
1470 AcpiOsFree(rv);
1471 return (AE_OK);
1472 } else
1473 AcpiOsFree(rv);
1474 }
1475 return (AE_ERROR);
1476 }
1477
1478 /*
1479 * Create linkage between devinfo nodes and ACPI nodes
1480 */
1481 ACPI_STATUS
acpica_tag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1482 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1483 {
1484 ACPI_STATUS status;
1485 ACPI_BUFFER rb;
1486
1487 /*
1488 * Tag the devinfo node with the ACPI name
1489 */
1490 rb.Pointer = NULL;
1491 rb.Length = ACPI_ALLOCATE_BUFFER;
1492 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1493 if (ACPI_FAILURE(status)) {
1494 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1495 } else {
1496 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1497 "acpi-namespace", (char *)rb.Pointer);
1498 AcpiOsFree(rb.Pointer);
1499
1500 /*
1501 * Tag the ACPI node with the dip
1502 */
1503 status = acpica_set_devinfo(acpiobj, dip);
1504 ASSERT(ACPI_SUCCESS(status));
1505 }
1506
1507 return (status);
1508 }
1509
1510 /*
1511 * Destroy linkage between devinfo nodes and ACPI nodes
1512 */
1513 ACPI_STATUS
acpica_untag_devinfo(dev_info_t * dip,ACPI_HANDLE acpiobj)1514 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1515 {
1516 (void) acpica_unset_devinfo(acpiobj);
1517 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1518
1519 return (AE_OK);
1520 }
1521
1522 /*
1523 * Return the ACPI device node matching the CPU dev_info node.
1524 */
1525 ACPI_STATUS
acpica_get_handle_cpu(int cpu_id,ACPI_HANDLE * rh)1526 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1527 {
1528 int i;
1529
1530 /*
1531 * if cpu_map itself is NULL, we're a uppc system and
1532 * acpica_build_processor_map() hasn't been called yet.
1533 * So call it here
1534 */
1535 if (cpu_map == NULL) {
1536 (void) acpica_build_processor_map();
1537 if (cpu_map == NULL)
1538 return (AE_ERROR);
1539 }
1540
1541 if (cpu_id < 0) {
1542 return (AE_ERROR);
1543 }
1544
1545 /*
1546 * search object with cpuid in cpu_map
1547 */
1548 mutex_enter(&cpu_map_lock);
1549 for (i = 0; i < cpu_map_count; i++) {
1550 if (cpu_map[i]->cpu_id == cpu_id) {
1551 break;
1552 }
1553 }
1554 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1555 *rh = cpu_map[i]->obj;
1556 mutex_exit(&cpu_map_lock);
1557 return (AE_OK);
1558 }
1559
1560 /* Handle special case for uppc-only systems. */
1561 if (cpu_map_called == 0) {
1562 uint32_t apicid = cpuid_get_apicid(CPU);
1563 if (apicid != UINT32_MAX) {
1564 for (i = 0; i < cpu_map_count; i++) {
1565 if (cpu_map[i]->apic_id == apicid) {
1566 break;
1567 }
1568 }
1569 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1570 *rh = cpu_map[i]->obj;
1571 mutex_exit(&cpu_map_lock);
1572 return (AE_OK);
1573 }
1574 }
1575 }
1576 mutex_exit(&cpu_map_lock);
1577
1578 return (AE_ERROR);
1579 }
1580
1581 /*
1582 * Determine if this object is a processor
1583 */
1584 static ACPI_STATUS
acpica_probe_processor(ACPI_HANDLE obj,UINT32 level,void * ctx,void ** rv)1585 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1586 {
1587 ACPI_STATUS status;
1588 ACPI_OBJECT_TYPE objtype;
1589 unsigned long acpi_id;
1590 ACPI_BUFFER rb;
1591 ACPI_DEVICE_INFO *di;
1592
1593 if (AcpiGetType(obj, &objtype) != AE_OK)
1594 return (AE_OK);
1595
1596 if (objtype == ACPI_TYPE_PROCESSOR) {
1597 /* process a Processor */
1598 rb.Pointer = NULL;
1599 rb.Length = ACPI_ALLOCATE_BUFFER;
1600 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1601 ACPI_TYPE_PROCESSOR);
1602 if (status != AE_OK) {
1603 cmn_err(CE_WARN, "!acpica: error probing Processor");
1604 return (status);
1605 }
1606 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1607 AcpiOsFree(rb.Pointer);
1608 } else if (objtype == ACPI_TYPE_DEVICE) {
1609 /* process a processor Device */
1610 status = AcpiGetObjectInfo(obj, &di);
1611 if (status != AE_OK) {
1612 cmn_err(CE_WARN,
1613 "!acpica: error probing Processor Device\n");
1614 return (status);
1615 }
1616
1617 if (!(di->Valid & ACPI_VALID_UID) ||
1618 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1619 ACPI_FREE(di);
1620 cmn_err(CE_WARN,
1621 "!acpica: error probing Processor Device _UID\n");
1622 return (AE_ERROR);
1623 }
1624 ACPI_FREE(di);
1625 }
1626 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1627
1628 return (AE_OK);
1629 }
1630
1631 void
scan_d2a_map(void)1632 scan_d2a_map(void)
1633 {
1634 dev_info_t *dip, *cdip;
1635 ACPI_HANDLE acpiobj;
1636 char *device_type_prop;
1637 int bus;
1638 static int map_error = 0;
1639
1640 if (map_error || (d2a_done != 0))
1641 return;
1642
1643 scanning_d2a_map = 1;
1644
1645 /*
1646 * Find all child-of-root PCI buses, and find their corresponding
1647 * ACPI child-of-root PCI nodes. For each one, add to the
1648 * d2a table.
1649 */
1650
1651 for (dip = ddi_get_child(ddi_root_node());
1652 dip != NULL;
1653 dip = ddi_get_next_sibling(dip)) {
1654
1655 /* prune non-PCI nodes */
1656 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1657 DDI_PROP_DONTPASS,
1658 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1659 continue;
1660
1661 if ((strcmp("pci", device_type_prop) != 0) &&
1662 (strcmp("pciex", device_type_prop) != 0)) {
1663 ddi_prop_free(device_type_prop);
1664 continue;
1665 }
1666
1667 ddi_prop_free(device_type_prop);
1668
1669 /*
1670 * To get bus number of dip, get first child and get its
1671 * bus number. If NULL, just continue, because we don't
1672 * care about bus nodes with no children anyway.
1673 */
1674 if ((cdip = ddi_get_child(dip)) == NULL)
1675 continue;
1676
1677 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1678 #ifdef D2ADEBUG
1679 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1680 #endif
1681 map_error = 1;
1682 scanning_d2a_map = 0;
1683 d2a_done = 1;
1684 return;
1685 }
1686
1687 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1688 #ifdef D2ADEBUG
1689 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1690 #endif
1691 map_error = 1;
1692 continue;
1693 }
1694
1695 acpica_tag_devinfo(dip, acpiobj);
1696
1697 /* call recursively to enumerate subtrees */
1698 scan_d2a_subtree(dip, acpiobj, bus);
1699 }
1700
1701 scanning_d2a_map = 0;
1702 d2a_done = 1;
1703 }
1704
1705 /*
1706 * For all acpi child devices of acpiobj, find their matching
1707 * dip under "dip" argument. (matching means "matches dev/fn").
1708 * bus is assumed to already be a match from caller, and is
1709 * used here only to record in the d2a entry. Recurse if necessary.
1710 */
1711 static void
scan_d2a_subtree(dev_info_t * dip,ACPI_HANDLE acpiobj,int bus)1712 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1713 {
1714 int acpi_devfn, hid;
1715 ACPI_HANDLE acld;
1716 dev_info_t *dcld;
1717 int dcld_b, dcld_d, dcld_f;
1718 int dev, func;
1719 char *device_type_prop;
1720
1721 acld = NULL;
1722 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1723 == AE_OK) {
1724 /* get the dev/func we're looking for in the devinfo tree */
1725 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1726 continue;
1727 dev = (acpi_devfn >> 16) & 0xFFFF;
1728 func = acpi_devfn & 0xFFFF;
1729
1730 /* look through all the immediate children of dip */
1731 for (dcld = ddi_get_child(dip); dcld != NULL;
1732 dcld = ddi_get_next_sibling(dcld)) {
1733 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1734 continue;
1735
1736 /* dev must match; function must match or wildcard */
1737 if (dcld_d != dev ||
1738 (func != 0xFFFF && func != dcld_f))
1739 continue;
1740 bus = dcld_b;
1741
1742 /* found a match, record it */
1743 acpica_tag_devinfo(dcld, acld);
1744
1745 /* if we find a bridge, recurse from here */
1746 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1747 DDI_PROP_DONTPASS, "device_type",
1748 &device_type_prop) == DDI_PROP_SUCCESS) {
1749 if ((strcmp("pci", device_type_prop) == 0) ||
1750 (strcmp("pciex", device_type_prop) == 0))
1751 scan_d2a_subtree(dcld, acld, bus);
1752 ddi_prop_free(device_type_prop);
1753 }
1754
1755 /* done finding a match, so break now */
1756 break;
1757 }
1758 }
1759 }
1760
1761 /*
1762 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1763 */
1764 int
acpica_get_bdf(dev_info_t * dip,int * bus,int * device,int * func)1765 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1766 {
1767 pci_regspec_t *pci_rp;
1768 int len;
1769
1770 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1771 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1772 return (-1);
1773
1774 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1775 ddi_prop_free(pci_rp);
1776 return (-1);
1777 }
1778 if (bus != NULL)
1779 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1780 if (device != NULL)
1781 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1782 if (func != NULL)
1783 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1784 ddi_prop_free(pci_rp);
1785 return (0);
1786 }
1787
1788 /*
1789 * Return the ACPI device node matching this dev_info node, if it
1790 * exists in the ACPI tree.
1791 */
1792 ACPI_STATUS
acpica_get_handle(dev_info_t * dip,ACPI_HANDLE * rh)1793 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1794 {
1795 ACPI_STATUS status;
1796 char *acpiname;
1797
1798 #ifdef DEBUG
1799 if (d2a_done == 0)
1800 cmn_err(CE_WARN, "!acpica_get_handle:"
1801 " no ACPI mapping for %s", ddi_node_name(dip));
1802 #endif
1803
1804 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1805 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1806 return (AE_ERROR);
1807 }
1808
1809 status = AcpiGetHandle(NULL, acpiname, rh);
1810 ddi_prop_free((void *)acpiname);
1811 return (status);
1812 }
1813
1814
1815
1816 /*
1817 * Manage OS data attachment to ACPI nodes
1818 */
1819
1820 /*
1821 * Return the (dev_info_t *) associated with the ACPI node.
1822 */
1823 ACPI_STATUS
acpica_get_devinfo(ACPI_HANDLE obj,dev_info_t ** dipp)1824 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1825 {
1826 ACPI_STATUS status;
1827 void *ptr;
1828
1829 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1830 if (status == AE_OK)
1831 *dipp = (dev_info_t *)ptr;
1832
1833 return (status);
1834 }
1835
1836 /*
1837 * Set the dev_info_t associated with the ACPI node.
1838 */
1839 static ACPI_STATUS
acpica_set_devinfo(ACPI_HANDLE obj,dev_info_t * dip)1840 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1841 {
1842 ACPI_STATUS status;
1843
1844 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1845 return (status);
1846 }
1847
1848 /*
1849 * Unset the dev_info_t associated with the ACPI node.
1850 */
1851 static ACPI_STATUS
acpica_unset_devinfo(ACPI_HANDLE obj)1852 acpica_unset_devinfo(ACPI_HANDLE obj)
1853 {
1854 return (AcpiDetachData(obj, acpica_devinfo_handler));
1855 }
1856
1857 /*
1858 *
1859 */
1860 void
acpica_devinfo_handler(ACPI_HANDLE obj,void * data)1861 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1862 {
1863 /* no-op */
1864 }
1865
1866 ACPI_STATUS
acpica_build_processor_map(void)1867 acpica_build_processor_map(void)
1868 {
1869 ACPI_STATUS status;
1870 void *rv;
1871
1872 /*
1873 * shouldn't be called more than once anyway
1874 */
1875 if (cpu_map_built)
1876 return (AE_OK);
1877
1878 /*
1879 * ACPI device configuration driver has built mapping information
1880 * among processor id and object handle, no need to probe again.
1881 */
1882 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1883 cpu_map_built = 1;
1884 return (AE_OK);
1885 }
1886
1887 /*
1888 * Look for Processor objects
1889 */
1890 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1891 ACPI_ROOT_OBJECT,
1892 4,
1893 acpica_probe_processor,
1894 NULL,
1895 NULL,
1896 &rv);
1897 ASSERT(status == AE_OK);
1898
1899 /*
1900 * Look for processor Device objects
1901 */
1902 status = AcpiGetDevices("ACPI0007",
1903 acpica_probe_processor,
1904 NULL,
1905 &rv);
1906 ASSERT(status == AE_OK);
1907 cpu_map_built = 1;
1908
1909 return (status);
1910 }
1911
1912 /*
1913 * Grow cpu map table on demand.
1914 */
1915 static void
acpica_grow_cpu_map(void)1916 acpica_grow_cpu_map(void)
1917 {
1918 if (cpu_map_count == cpu_map_count_max) {
1919 size_t sz;
1920 struct cpu_map_item **new_map;
1921
1922 ASSERT(cpu_map_count_max < INT_MAX / 2);
1923 cpu_map_count_max += max_ncpus;
1924 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1925 KM_SLEEP);
1926 if (cpu_map_count != 0) {
1927 ASSERT(cpu_map != NULL);
1928 sz = sizeof (cpu_map[0]) * cpu_map_count;
1929 kcopy(cpu_map, new_map, sz);
1930 kmem_free(cpu_map, sz);
1931 }
1932 cpu_map = new_map;
1933 }
1934 }
1935
1936 /*
1937 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1938 * ACPI handle). The mapping table will be setup in two steps:
1939 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1940 * processor id and ACPI object handle.
1941 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1942 * On systems with which have ACPI device configuration for CPUs enabled,
1943 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1944 * otherwise acpica_map_cpu() will be called before
1945 * acpica_add_processor_to_map().
1946 */
1947 ACPI_STATUS
acpica_add_processor_to_map(UINT32 acpi_id,ACPI_HANDLE obj,UINT32 apic_id)1948 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1949 {
1950 int i;
1951 ACPI_STATUS rc = AE_OK;
1952 struct cpu_map_item *item = NULL;
1953
1954 ASSERT(obj != NULL);
1955 if (obj == NULL) {
1956 return (AE_ERROR);
1957 }
1958
1959 mutex_enter(&cpu_map_lock);
1960
1961 /*
1962 * Special case for uppc
1963 * If we're a uppc system and ACPI device configuration for CPU has
1964 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1965 * call acpica_map_cpu(). So create one and use the passed-in processor
1966 * as CPU 0
1967 * Assumption: the first CPU returned by
1968 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1969 * Unfortunately there appears to be no good way to ASSERT this.
1970 */
1971 if (cpu_map == NULL &&
1972 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1973 acpica_grow_cpu_map();
1974 ASSERT(cpu_map != NULL);
1975 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1976 item->cpu_id = 0;
1977 item->proc_id = acpi_id;
1978 item->apic_id = apic_id;
1979 item->obj = obj;
1980 cpu_map[0] = item;
1981 cpu_map_count = 1;
1982 mutex_exit(&cpu_map_lock);
1983 return (AE_OK);
1984 }
1985
1986 for (i = 0; i < cpu_map_count; i++) {
1987 if (cpu_map[i]->obj == obj) {
1988 rc = AE_ALREADY_EXISTS;
1989 break;
1990 } else if (cpu_map[i]->proc_id == acpi_id) {
1991 ASSERT(item == NULL);
1992 item = cpu_map[i];
1993 }
1994 }
1995
1996 if (rc == AE_OK) {
1997 if (item != NULL) {
1998 /*
1999 * ACPI alias objects may cause more than one objects
2000 * with the same ACPI processor id, only remember the
2001 * the first object encountered.
2002 */
2003 if (item->obj == NULL) {
2004 item->obj = obj;
2005 item->apic_id = apic_id;
2006 } else {
2007 rc = AE_ALREADY_EXISTS;
2008 }
2009 } else if (cpu_map_count >= INT_MAX / 2) {
2010 rc = AE_NO_MEMORY;
2011 } else {
2012 acpica_grow_cpu_map();
2013 ASSERT(cpu_map != NULL);
2014 ASSERT(cpu_map_count < cpu_map_count_max);
2015 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2016 item->cpu_id = -1;
2017 item->proc_id = acpi_id;
2018 item->apic_id = apic_id;
2019 item->obj = obj;
2020 cpu_map[cpu_map_count] = item;
2021 cpu_map_count++;
2022 }
2023 }
2024
2025 mutex_exit(&cpu_map_lock);
2026
2027 return (rc);
2028 }
2029
2030 ACPI_STATUS
acpica_remove_processor_from_map(UINT32 acpi_id)2031 acpica_remove_processor_from_map(UINT32 acpi_id)
2032 {
2033 int i;
2034 ACPI_STATUS rc = AE_NOT_EXIST;
2035
2036 mutex_enter(&cpu_map_lock);
2037 for (i = 0; i < cpu_map_count; i++) {
2038 if (cpu_map[i]->proc_id != acpi_id) {
2039 continue;
2040 }
2041 cpu_map[i]->obj = NULL;
2042 /* Free item if no more reference to it. */
2043 if (cpu_map[i]->cpu_id == -1) {
2044 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2045 cpu_map[i] = NULL;
2046 cpu_map_count--;
2047 if (i != cpu_map_count) {
2048 cpu_map[i] = cpu_map[cpu_map_count];
2049 cpu_map[cpu_map_count] = NULL;
2050 }
2051 }
2052 rc = AE_OK;
2053 break;
2054 }
2055 mutex_exit(&cpu_map_lock);
2056
2057 return (rc);
2058 }
2059
2060 ACPI_STATUS
acpica_map_cpu(processorid_t cpuid,UINT32 acpi_id)2061 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2062 {
2063 int i;
2064 ACPI_STATUS rc = AE_OK;
2065 struct cpu_map_item *item = NULL;
2066
2067 ASSERT(cpuid != -1);
2068 if (cpuid == -1) {
2069 return (AE_ERROR);
2070 }
2071
2072 mutex_enter(&cpu_map_lock);
2073 cpu_map_called = 1;
2074 for (i = 0; i < cpu_map_count; i++) {
2075 if (cpu_map[i]->cpu_id == cpuid) {
2076 rc = AE_ALREADY_EXISTS;
2077 break;
2078 } else if (cpu_map[i]->proc_id == acpi_id) {
2079 ASSERT(item == NULL);
2080 item = cpu_map[i];
2081 }
2082 }
2083 if (rc == AE_OK) {
2084 if (item != NULL) {
2085 if (item->cpu_id == -1) {
2086 item->cpu_id = cpuid;
2087 } else {
2088 rc = AE_ALREADY_EXISTS;
2089 }
2090 } else if (cpu_map_count >= INT_MAX / 2) {
2091 rc = AE_NO_MEMORY;
2092 } else {
2093 acpica_grow_cpu_map();
2094 ASSERT(cpu_map != NULL);
2095 ASSERT(cpu_map_count < cpu_map_count_max);
2096 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2097 item->cpu_id = cpuid;
2098 item->proc_id = acpi_id;
2099 item->apic_id = UINT32_MAX;
2100 item->obj = NULL;
2101 cpu_map[cpu_map_count] = item;
2102 cpu_map_count++;
2103 }
2104 }
2105 mutex_exit(&cpu_map_lock);
2106
2107 return (rc);
2108 }
2109
2110 ACPI_STATUS
acpica_unmap_cpu(processorid_t cpuid)2111 acpica_unmap_cpu(processorid_t cpuid)
2112 {
2113 int i;
2114 ACPI_STATUS rc = AE_NOT_EXIST;
2115
2116 ASSERT(cpuid != -1);
2117 if (cpuid == -1) {
2118 return (rc);
2119 }
2120
2121 mutex_enter(&cpu_map_lock);
2122 for (i = 0; i < cpu_map_count; i++) {
2123 if (cpu_map[i]->cpu_id != cpuid) {
2124 continue;
2125 }
2126 cpu_map[i]->cpu_id = -1;
2127 /* Free item if no more reference. */
2128 if (cpu_map[i]->obj == NULL) {
2129 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2130 cpu_map[i] = NULL;
2131 cpu_map_count--;
2132 if (i != cpu_map_count) {
2133 cpu_map[i] = cpu_map[cpu_map_count];
2134 cpu_map[cpu_map_count] = NULL;
2135 }
2136 }
2137 rc = AE_OK;
2138 break;
2139 }
2140 mutex_exit(&cpu_map_lock);
2141
2142 return (rc);
2143 }
2144
2145 ACPI_STATUS
acpica_get_cpu_object_by_cpuid(processorid_t cpuid,ACPI_HANDLE * hdlp)2146 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2147 {
2148 int i;
2149 ACPI_STATUS rc = AE_NOT_EXIST;
2150
2151 ASSERT(cpuid != -1);
2152 if (cpuid == -1) {
2153 return (rc);
2154 }
2155
2156 mutex_enter(&cpu_map_lock);
2157 for (i = 0; i < cpu_map_count; i++) {
2158 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2159 *hdlp = cpu_map[i]->obj;
2160 rc = AE_OK;
2161 break;
2162 }
2163 }
2164 mutex_exit(&cpu_map_lock);
2165
2166 return (rc);
2167 }
2168
2169 ACPI_STATUS
acpica_get_cpu_object_by_procid(UINT32 procid,ACPI_HANDLE * hdlp)2170 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2171 {
2172 int i;
2173 ACPI_STATUS rc = AE_NOT_EXIST;
2174
2175 mutex_enter(&cpu_map_lock);
2176 for (i = 0; i < cpu_map_count; i++) {
2177 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2178 *hdlp = cpu_map[i]->obj;
2179 rc = AE_OK;
2180 break;
2181 }
2182 }
2183 mutex_exit(&cpu_map_lock);
2184
2185 return (rc);
2186 }
2187
2188 ACPI_STATUS
acpica_get_cpu_object_by_apicid(UINT32 apicid,ACPI_HANDLE * hdlp)2189 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2190 {
2191 int i;
2192 ACPI_STATUS rc = AE_NOT_EXIST;
2193
2194 ASSERT(apicid != UINT32_MAX);
2195 if (apicid == UINT32_MAX) {
2196 return (rc);
2197 }
2198
2199 mutex_enter(&cpu_map_lock);
2200 for (i = 0; i < cpu_map_count; i++) {
2201 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2202 *hdlp = cpu_map[i]->obj;
2203 rc = AE_OK;
2204 break;
2205 }
2206 }
2207 mutex_exit(&cpu_map_lock);
2208
2209 return (rc);
2210 }
2211
2212 ACPI_STATUS
acpica_get_cpu_id_by_object(ACPI_HANDLE hdl,processorid_t * cpuidp)2213 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2214 {
2215 int i;
2216 ACPI_STATUS rc = AE_NOT_EXIST;
2217
2218 ASSERT(cpuidp != NULL);
2219 if (hdl == NULL || cpuidp == NULL) {
2220 return (rc);
2221 }
2222
2223 *cpuidp = -1;
2224 mutex_enter(&cpu_map_lock);
2225 for (i = 0; i < cpu_map_count; i++) {
2226 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2227 *cpuidp = cpu_map[i]->cpu_id;
2228 rc = AE_OK;
2229 break;
2230 }
2231 }
2232 mutex_exit(&cpu_map_lock);
2233
2234 return (rc);
2235 }
2236
2237 ACPI_STATUS
acpica_get_apicid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2238 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2239 {
2240 int i;
2241 ACPI_STATUS rc = AE_NOT_EXIST;
2242
2243 ASSERT(rp != NULL);
2244 if (hdl == NULL || rp == NULL) {
2245 return (rc);
2246 }
2247
2248 *rp = UINT32_MAX;
2249 mutex_enter(&cpu_map_lock);
2250 for (i = 0; i < cpu_map_count; i++) {
2251 if (cpu_map[i]->obj == hdl &&
2252 cpu_map[i]->apic_id != UINT32_MAX) {
2253 *rp = cpu_map[i]->apic_id;
2254 rc = AE_OK;
2255 break;
2256 }
2257 }
2258 mutex_exit(&cpu_map_lock);
2259
2260 return (rc);
2261 }
2262
2263 ACPI_STATUS
acpica_get_procid_by_object(ACPI_HANDLE hdl,UINT32 * rp)2264 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2265 {
2266 int i;
2267 ACPI_STATUS rc = AE_NOT_EXIST;
2268
2269 ASSERT(rp != NULL);
2270 if (hdl == NULL || rp == NULL) {
2271 return (rc);
2272 }
2273
2274 *rp = UINT32_MAX;
2275 mutex_enter(&cpu_map_lock);
2276 for (i = 0; i < cpu_map_count; i++) {
2277 if (cpu_map[i]->obj == hdl) {
2278 *rp = cpu_map[i]->proc_id;
2279 rc = AE_OK;
2280 break;
2281 }
2282 }
2283 mutex_exit(&cpu_map_lock);
2284
2285 return (rc);
2286 }
2287
2288 void
acpica_set_core_feature(uint64_t features)2289 acpica_set_core_feature(uint64_t features)
2290 {
2291 atomic_or_64(&acpica_core_features, features);
2292 }
2293
2294 void
acpica_clear_core_feature(uint64_t features)2295 acpica_clear_core_feature(uint64_t features)
2296 {
2297 atomic_and_64(&acpica_core_features, ~features);
2298 }
2299
2300 uint64_t
acpica_get_core_feature(uint64_t features)2301 acpica_get_core_feature(uint64_t features)
2302 {
2303 return (acpica_core_features & features);
2304 }
2305
2306 void
acpica_set_devcfg_feature(uint64_t features)2307 acpica_set_devcfg_feature(uint64_t features)
2308 {
2309 atomic_or_64(&acpica_devcfg_features, features);
2310 }
2311
2312 void
acpica_clear_devcfg_feature(uint64_t features)2313 acpica_clear_devcfg_feature(uint64_t features)
2314 {
2315 atomic_and_64(&acpica_devcfg_features, ~features);
2316 }
2317
2318 uint64_t
acpica_get_devcfg_feature(uint64_t features)2319 acpica_get_devcfg_feature(uint64_t features)
2320 {
2321 return (acpica_devcfg_features & features);
2322 }
2323
2324 void
acpica_get_global_FADT(ACPI_TABLE_FADT ** gbl_FADT)2325 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2326 {
2327 *gbl_FADT = &AcpiGbl_FADT;
2328 }
2329
2330 void
acpica_write_cpupm_capabilities(boolean_t pstates,boolean_t cstates)2331 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2332 {
2333 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2334 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2335 AcpiGbl_FADT.PstateControl);
2336
2337 if (cstates && AcpiGbl_FADT.CstControl != 0)
2338 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2339 AcpiGbl_FADT.CstControl);
2340 }
2341