1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * PSMI 1.1 extensions are supported only in 2.6 and later versions. 31*0Sstevel@tonic-gate * PSMI 1.2 extensions are supported only in 2.7 and later versions. 32*0Sstevel@tonic-gate * PSMI 1.3 and 1.4 extensions are supported in Solaris 10. 33*0Sstevel@tonic-gate * PSMI 1.5 extensions are supported in Solaris Nevada. 34*0Sstevel@tonic-gate */ 35*0Sstevel@tonic-gate #define PSMI_1_5 36*0Sstevel@tonic-gate 37*0Sstevel@tonic-gate #include <sys/processor.h> 38*0Sstevel@tonic-gate #include <sys/time.h> 39*0Sstevel@tonic-gate #include <sys/psm.h> 40*0Sstevel@tonic-gate #include <sys/smp_impldefs.h> 41*0Sstevel@tonic-gate #include <sys/cram.h> 42*0Sstevel@tonic-gate #include <sys/acpi/acpi.h> 43*0Sstevel@tonic-gate #include <sys/acpica.h> 44*0Sstevel@tonic-gate #include <sys/psm_common.h> 45*0Sstevel@tonic-gate #include "apic.h" 46*0Sstevel@tonic-gate #include <sys/pit.h> 47*0Sstevel@tonic-gate #include <sys/ddi.h> 48*0Sstevel@tonic-gate #include <sys/sunddi.h> 49*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 50*0Sstevel@tonic-gate #include <sys/pci.h> 51*0Sstevel@tonic-gate #include <sys/promif.h> 52*0Sstevel@tonic-gate #include <sys/x86_archext.h> 53*0Sstevel@tonic-gate #include <sys/cpc_impl.h> 54*0Sstevel@tonic-gate #include <sys/uadmin.h> 55*0Sstevel@tonic-gate #include <sys/panic.h> 56*0Sstevel@tonic-gate #include <sys/debug.h> 57*0Sstevel@tonic-gate #include <sys/archsystm.h> 58*0Sstevel@tonic-gate #include <sys/trap.h> 59*0Sstevel@tonic-gate #include <sys/machsystm.h> 60*0Sstevel@tonic-gate #include <sys/cpuvar.h> 61*0Sstevel@tonic-gate #include <sys/rm_platter.h> 62*0Sstevel@tonic-gate #include <sys/privregs.h> 63*0Sstevel@tonic-gate #include <sys/cyclic.h> 64*0Sstevel@tonic-gate #include <sys/note.h> 65*0Sstevel@tonic-gate #include <sys/pci_intr_lib.h> 66*0Sstevel@tonic-gate 67*0Sstevel@tonic-gate /* 68*0Sstevel@tonic-gate * Local Function Prototypes 69*0Sstevel@tonic-gate */ 70*0Sstevel@tonic-gate static void apic_init_intr(); 71*0Sstevel@tonic-gate static void apic_ret(); 72*0Sstevel@tonic-gate static int apic_handle_defconf(); 73*0Sstevel@tonic-gate static int apic_parse_mpct(caddr_t mpct, int bypass); 74*0Sstevel@tonic-gate static struct apic_mpfps_hdr *apic_find_fps_sig(caddr_t fptr, int size); 75*0Sstevel@tonic-gate static int apic_checksum(caddr_t bptr, int len); 76*0Sstevel@tonic-gate static int get_apic_cmd1(); 77*0Sstevel@tonic-gate static int get_apic_pri(); 78*0Sstevel@tonic-gate static int apic_find_bus_type(char *bus); 79*0Sstevel@tonic-gate static int apic_find_bus(int busid); 80*0Sstevel@tonic-gate static int apic_find_bus_id(int bustype); 81*0Sstevel@tonic-gate static struct apic_io_intr *apic_find_io_intr(int irqno); 82*0Sstevel@tonic-gate int apic_allocate_irq(int irq); 83*0Sstevel@tonic-gate static int apic_find_free_irq(int start, int end); 84*0Sstevel@tonic-gate static uchar_t apic_allocate_vector(int ipl, int irq, int pri); 85*0Sstevel@tonic-gate static void apic_modify_vector(uchar_t vector, int irq); 86*0Sstevel@tonic-gate static void apic_mark_vector(uchar_t oldvector, uchar_t newvector); 87*0Sstevel@tonic-gate static uchar_t apic_xlate_vector(uchar_t oldvector); 88*0Sstevel@tonic-gate static void apic_xlate_vector_free_timeout_handler(void *arg); 89*0Sstevel@tonic-gate static void apic_free_vector(uchar_t vector); 90*0Sstevel@tonic-gate static void apic_reprogram_timeout_handler(void *arg); 91*0Sstevel@tonic-gate static int apic_check_stuck_interrupt(apic_irq_t *irq_ptr, int old_bind_cpu, 92*0Sstevel@tonic-gate int new_bind_cpu, volatile int32_t *ioapic, int intin_no, int which_irq); 93*0Sstevel@tonic-gate static int apic_setup_io_intr(apic_irq_t *irqptr, int irq); 94*0Sstevel@tonic-gate static int apic_setup_io_intr_deferred(apic_irq_t *irqptr, int irq); 95*0Sstevel@tonic-gate static void apic_record_rdt_entry(apic_irq_t *irqptr, int irq); 96*0Sstevel@tonic-gate static struct apic_io_intr *apic_find_io_intr_w_busid(int irqno, int busid); 97*0Sstevel@tonic-gate static int apic_find_intin(uchar_t ioapic, uchar_t intin); 98*0Sstevel@tonic-gate static int apic_handle_pci_pci_bridge(dev_info_t *idip, int child_devno, 99*0Sstevel@tonic-gate int child_ipin, struct apic_io_intr **intrp); 100*0Sstevel@tonic-gate static int apic_setup_irq_table(dev_info_t *dip, int irqno, 101*0Sstevel@tonic-gate struct apic_io_intr *intrp, struct intrspec *ispec, iflag_t *intr_flagp, 102*0Sstevel@tonic-gate int type); 103*0Sstevel@tonic-gate static int apic_setup_sci_irq_table(int irqno, uchar_t ipl, 104*0Sstevel@tonic-gate iflag_t *intr_flagp); 105*0Sstevel@tonic-gate static void apic_nmi_intr(caddr_t arg); 106*0Sstevel@tonic-gate uchar_t apic_bind_intr(dev_info_t *dip, int irq, uchar_t ioapicid, 107*0Sstevel@tonic-gate uchar_t intin); 108*0Sstevel@tonic-gate static int apic_rebind(apic_irq_t *irq_ptr, int bind_cpu, int acquire_lock, 109*0Sstevel@tonic-gate int when); 110*0Sstevel@tonic-gate static int apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe); 111*0Sstevel@tonic-gate static void apic_intr_redistribute(); 112*0Sstevel@tonic-gate static void apic_cleanup_busy(); 113*0Sstevel@tonic-gate static void apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr *hdrp); 114*0Sstevel@tonic-gate int apic_introp_xlate(dev_info_t *dip, struct intrspec *ispec, int type); 115*0Sstevel@tonic-gate 116*0Sstevel@tonic-gate /* ACPI support routines */ 117*0Sstevel@tonic-gate static int acpi_probe(void); 118*0Sstevel@tonic-gate static int apic_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip, 119*0Sstevel@tonic-gate int *pci_irqp, iflag_t *intr_flagp); 120*0Sstevel@tonic-gate 121*0Sstevel@tonic-gate static int apic_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid, 122*0Sstevel@tonic-gate int ipin, int *pci_irqp, iflag_t *intr_flagp); 123*0Sstevel@tonic-gate static uchar_t acpi_find_ioapic(int irq); 124*0Sstevel@tonic-gate static int acpi_intr_compatible(iflag_t iflag1, iflag_t iflag2); 125*0Sstevel@tonic-gate 126*0Sstevel@tonic-gate /* 127*0Sstevel@tonic-gate * standard MP entries 128*0Sstevel@tonic-gate */ 129*0Sstevel@tonic-gate static int apic_probe(); 130*0Sstevel@tonic-gate static int apic_clkinit(); 131*0Sstevel@tonic-gate static int apic_getclkirq(int ipl); 132*0Sstevel@tonic-gate static uint_t apic_calibrate(volatile uint32_t *addr, 133*0Sstevel@tonic-gate uint16_t *pit_ticks_adj); 134*0Sstevel@tonic-gate static hrtime_t apic_gettime(); 135*0Sstevel@tonic-gate static hrtime_t apic_gethrtime(); 136*0Sstevel@tonic-gate static void apic_init(); 137*0Sstevel@tonic-gate static void apic_picinit(void); 138*0Sstevel@tonic-gate static void apic_cpu_start(processorid_t cpun, caddr_t rm_code); 139*0Sstevel@tonic-gate static int apic_post_cpu_start(void); 140*0Sstevel@tonic-gate static void apic_send_ipi(int cpun, int ipl); 141*0Sstevel@tonic-gate static void apic_set_softintr(int softintr); 142*0Sstevel@tonic-gate static void apic_set_idlecpu(processorid_t cpun); 143*0Sstevel@tonic-gate static void apic_unset_idlecpu(processorid_t cpun); 144*0Sstevel@tonic-gate static int apic_softlvl_to_irq(int ipl); 145*0Sstevel@tonic-gate static int apic_intr_enter(int ipl, int *vect); 146*0Sstevel@tonic-gate static void apic_intr_exit(int ipl, int vect); 147*0Sstevel@tonic-gate static void apic_setspl(int ipl); 148*0Sstevel@tonic-gate static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl); 149*0Sstevel@tonic-gate static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl); 150*0Sstevel@tonic-gate static void apic_shutdown(int cmd, int fcn); 151*0Sstevel@tonic-gate static void apic_preshutdown(int cmd, int fcn); 152*0Sstevel@tonic-gate static int apic_disable_intr(processorid_t cpun); 153*0Sstevel@tonic-gate static void apic_enable_intr(processorid_t cpun); 154*0Sstevel@tonic-gate static processorid_t apic_get_next_processorid(processorid_t cpun); 155*0Sstevel@tonic-gate static int apic_get_ipivect(int ipl, int type); 156*0Sstevel@tonic-gate static void apic_timer_reprogram(hrtime_t time); 157*0Sstevel@tonic-gate static void apic_timer_enable(void); 158*0Sstevel@tonic-gate static void apic_timer_disable(void); 159*0Sstevel@tonic-gate static void apic_post_cyclic_setup(void *arg); 160*0Sstevel@tonic-gate extern int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 161*0Sstevel@tonic-gate psm_intr_op_t, int *); 162*0Sstevel@tonic-gate 163*0Sstevel@tonic-gate static int apic_oneshot = 0; 164*0Sstevel@tonic-gate int apic_oneshot_enable = 1; /* to allow disabling one-shot capability */ 165*0Sstevel@tonic-gate 166*0Sstevel@tonic-gate /* 167*0Sstevel@tonic-gate * These variables are frequently accessed in apic_intr_enter(), 168*0Sstevel@tonic-gate * apic_intr_exit and apic_setspl, so group them together 169*0Sstevel@tonic-gate */ 170*0Sstevel@tonic-gate volatile uint32_t *apicadr = NULL; /* virtual addr of local APIC */ 171*0Sstevel@tonic-gate int apic_setspl_delay = 1; /* apic_setspl - delay enable */ 172*0Sstevel@tonic-gate int apic_clkvect; 173*0Sstevel@tonic-gate 174*0Sstevel@tonic-gate /* ACPI SCI interrupt configuration; -1 if SCI not used */ 175*0Sstevel@tonic-gate int apic_sci_vect = -1; 176*0Sstevel@tonic-gate iflag_t apic_sci_flags; 177*0Sstevel@tonic-gate 178*0Sstevel@tonic-gate /* vector at which error interrupts come in */ 179*0Sstevel@tonic-gate int apic_errvect; 180*0Sstevel@tonic-gate int apic_enable_error_intr = 1; 181*0Sstevel@tonic-gate int apic_error_display_delay = 100; 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate /* vector at which performance counter overflow interrupts come in */ 184*0Sstevel@tonic-gate int apic_cpcovf_vect; 185*0Sstevel@tonic-gate int apic_enable_cpcovf_intr = 1; 186*0Sstevel@tonic-gate 187*0Sstevel@tonic-gate /* Max wait time (in microsecs) for flags to clear in an RDT entry. */ 188*0Sstevel@tonic-gate static int apic_max_usecs_clear_pending = 1000; 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate /* Amt of usecs to wait before checking if RDT flags have reset. */ 191*0Sstevel@tonic-gate #define APIC_USECS_PER_WAIT_INTERVAL 100 192*0Sstevel@tonic-gate 193*0Sstevel@tonic-gate /* Maximum number of times to retry reprogramming via the timeout */ 194*0Sstevel@tonic-gate #define APIC_REPROGRAM_MAX_TIMEOUTS 10 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate /* timeout delay for IOAPIC delayed reprogramming */ 197*0Sstevel@tonic-gate #define APIC_REPROGRAM_TIMEOUT_DELAY 5 /* microseconds */ 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate /* Parameter to apic_rebind(): Should reprogramming be done now or later? */ 200*0Sstevel@tonic-gate #define DEFERRED 1 201*0Sstevel@tonic-gate #define IMMEDIATE 0 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate /* 204*0Sstevel@tonic-gate * number of bits per byte, from <sys/param.h> 205*0Sstevel@tonic-gate */ 206*0Sstevel@tonic-gate #define UCHAR_MAX ((1 << NBBY) - 1) 207*0Sstevel@tonic-gate 208*0Sstevel@tonic-gate uchar_t apic_reserved_irqlist[MAX_ISA_IRQ]; 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate /* 211*0Sstevel@tonic-gate * The following vector assignments influence the value of ipltopri and 212*0Sstevel@tonic-gate * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program 213*0Sstevel@tonic-gate * idle to 0 and IPL 0 to 0x10 to differentiate idle in case 214*0Sstevel@tonic-gate * we care to do so in future. Note some IPLs which are rarely used 215*0Sstevel@tonic-gate * will share the vector ranges and heavily used IPLs (5 and 6) have 216*0Sstevel@tonic-gate * a wide range. 217*0Sstevel@tonic-gate * IPL Vector range. as passed to intr_enter 218*0Sstevel@tonic-gate * 0 none. 219*0Sstevel@tonic-gate * 1,2,3 0x20-0x2f 0x0-0xf 220*0Sstevel@tonic-gate * 4 0x30-0x3f 0x10-0x1f 221*0Sstevel@tonic-gate * 5 0x40-0x5f 0x20-0x3f 222*0Sstevel@tonic-gate * 6 0x60-0x7f 0x40-0x5f 223*0Sstevel@tonic-gate * 7,8,9 0x80-0x8f 0x60-0x6f 224*0Sstevel@tonic-gate * 10 0x90-0x9f 0x70-0x7f 225*0Sstevel@tonic-gate * 11 0xa0-0xaf 0x80-0x8f 226*0Sstevel@tonic-gate * ... ... 227*0Sstevel@tonic-gate * 16 0xf0-0xff 0xd0-0xdf 228*0Sstevel@tonic-gate */ 229*0Sstevel@tonic-gate uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = { 230*0Sstevel@tonic-gate 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 16 231*0Sstevel@tonic-gate }; 232*0Sstevel@tonic-gate /* 233*0Sstevel@tonic-gate * The ipl of an ISR at vector X is apic_vectortoipl[X<<4] 234*0Sstevel@tonic-gate * NOTE that this is vector as passed into intr_enter which is 235*0Sstevel@tonic-gate * programmed vector - 0x20 (APIC_BASE_VECT) 236*0Sstevel@tonic-gate */ 237*0Sstevel@tonic-gate 238*0Sstevel@tonic-gate uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */ 239*0Sstevel@tonic-gate /* The taskpri to be programmed into apic to mask given ipl */ 240*0Sstevel@tonic-gate 241*0Sstevel@tonic-gate #if defined(__amd64) 242*0Sstevel@tonic-gate uchar_t apic_cr8pri[MAXIPL + 1]; /* unix ipl to cr8 pri */ 243*0Sstevel@tonic-gate #endif 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate /* 246*0Sstevel@tonic-gate * Patchable global variables. 247*0Sstevel@tonic-gate */ 248*0Sstevel@tonic-gate int apic_forceload = 0; 249*0Sstevel@tonic-gate 250*0Sstevel@tonic-gate #define INTR_ROUND_ROBIN_WITH_AFFINITY 0 251*0Sstevel@tonic-gate #define INTR_ROUND_ROBIN 1 252*0Sstevel@tonic-gate #define INTR_LOWEST_PRIORITY 2 253*0Sstevel@tonic-gate 254*0Sstevel@tonic-gate int apic_intr_policy = INTR_ROUND_ROBIN_WITH_AFFINITY; 255*0Sstevel@tonic-gate 256*0Sstevel@tonic-gate static int apic_next_bind_cpu = 2; /* For round robin assignment */ 257*0Sstevel@tonic-gate /* start with cpu 1 */ 258*0Sstevel@tonic-gate 259*0Sstevel@tonic-gate int apic_coarse_hrtime = 1; /* 0 - use accurate slow gethrtime() */ 260*0Sstevel@tonic-gate /* 1 - use gettime() for performance */ 261*0Sstevel@tonic-gate int apic_flat_model = 0; /* 0 - clustered. 1 - flat */ 262*0Sstevel@tonic-gate int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */ 263*0Sstevel@tonic-gate int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */ 264*0Sstevel@tonic-gate int apic_panic_on_nmi = 0; 265*0Sstevel@tonic-gate int apic_panic_on_apic_error = 0; 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate int apic_verbose = 0; 268*0Sstevel@tonic-gate 269*0Sstevel@tonic-gate /* Flag definitions for apic_verbose */ 270*0Sstevel@tonic-gate #define APIC_VERBOSE_IOAPIC_FLAG 0x00000001 271*0Sstevel@tonic-gate #define APIC_VERBOSE_IRQ_FLAG 0x00000002 272*0Sstevel@tonic-gate #define APIC_VERBOSE_POWEROFF_FLAG 0x00000004 273*0Sstevel@tonic-gate #define APIC_VERBOSE_POWEROFF_PAUSE_FLAG 0x00000008 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate 276*0Sstevel@tonic-gate #define APIC_VERBOSE_IOAPIC(fmt) \ 277*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) \ 278*0Sstevel@tonic-gate cmn_err fmt; 279*0Sstevel@tonic-gate 280*0Sstevel@tonic-gate #define APIC_VERBOSE_IRQ(fmt) \ 281*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IRQ_FLAG) \ 282*0Sstevel@tonic-gate cmn_err fmt; 283*0Sstevel@tonic-gate 284*0Sstevel@tonic-gate #define APIC_VERBOSE_POWEROFF(fmt) \ 285*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_POWEROFF_FLAG) \ 286*0Sstevel@tonic-gate prom_printf fmt; 287*0Sstevel@tonic-gate 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate /* Now the ones for Dynamic Interrupt distribution */ 290*0Sstevel@tonic-gate int apic_enable_dynamic_migration = 1; 291*0Sstevel@tonic-gate 292*0Sstevel@tonic-gate /* 293*0Sstevel@tonic-gate * If enabled, the distribution works as follows: 294*0Sstevel@tonic-gate * On every interrupt entry, the current ipl for the CPU is set in cpu_info 295*0Sstevel@tonic-gate * and the irq corresponding to the ipl is also set in the aci_current array. 296*0Sstevel@tonic-gate * interrupt exit and setspl (due to soft interrupts) will cause the current 297*0Sstevel@tonic-gate * ipl to be be changed. This is cache friendly as these frequently used 298*0Sstevel@tonic-gate * paths write into a per cpu structure. 299*0Sstevel@tonic-gate * 300*0Sstevel@tonic-gate * Sampling is done by checking the structures for all CPUs and incrementing 301*0Sstevel@tonic-gate * the busy field of the irq (if any) executing on each CPU and the busy field 302*0Sstevel@tonic-gate * of the corresponding CPU. 303*0Sstevel@tonic-gate * In periodic mode this is done on every clock interrupt. 304*0Sstevel@tonic-gate * In one-shot mode, this is done thru a cyclic with an interval of 305*0Sstevel@tonic-gate * apic_redistribute_sample_interval (default 10 milli sec). 306*0Sstevel@tonic-gate * 307*0Sstevel@tonic-gate * Every apic_sample_factor_redistribution times we sample, we do computations 308*0Sstevel@tonic-gate * to decide which interrupt needs to be migrated (see comments 309*0Sstevel@tonic-gate * before apic_intr_redistribute(). 310*0Sstevel@tonic-gate */ 311*0Sstevel@tonic-gate 312*0Sstevel@tonic-gate /* 313*0Sstevel@tonic-gate * Following 3 variables start as % and can be patched or set using an 314*0Sstevel@tonic-gate * API to be defined in future. They will be scaled to 315*0Sstevel@tonic-gate * sample_factor_redistribution which is in turn set to hertz+1 (in periodic 316*0Sstevel@tonic-gate * mode), or 101 in one-shot mode to stagger it away from one sec processing 317*0Sstevel@tonic-gate */ 318*0Sstevel@tonic-gate 319*0Sstevel@tonic-gate int apic_int_busy_mark = 60; 320*0Sstevel@tonic-gate int apic_int_free_mark = 20; 321*0Sstevel@tonic-gate int apic_diff_for_redistribution = 10; 322*0Sstevel@tonic-gate 323*0Sstevel@tonic-gate /* sampling interval for interrupt redistribution for dynamic migration */ 324*0Sstevel@tonic-gate int apic_redistribute_sample_interval = NANOSEC / 100; /* 10 millisec */ 325*0Sstevel@tonic-gate 326*0Sstevel@tonic-gate /* 327*0Sstevel@tonic-gate * number of times we sample before deciding to redistribute interrupts 328*0Sstevel@tonic-gate * for dynamic migration 329*0Sstevel@tonic-gate */ 330*0Sstevel@tonic-gate int apic_sample_factor_redistribution = 101; 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate /* timeout for xlate_vector, mark_vector */ 333*0Sstevel@tonic-gate int apic_revector_timeout = 16 * 10000; /* 160 millisec */ 334*0Sstevel@tonic-gate 335*0Sstevel@tonic-gate int apic_redist_cpu_skip = 0; 336*0Sstevel@tonic-gate int apic_num_imbalance = 0; 337*0Sstevel@tonic-gate int apic_num_rebind = 0; 338*0Sstevel@tonic-gate 339*0Sstevel@tonic-gate int apic_nproc = 0; 340*0Sstevel@tonic-gate int apic_defconf = 0; 341*0Sstevel@tonic-gate int apic_irq_translate = 0; 342*0Sstevel@tonic-gate int apic_spec_rev = 0; 343*0Sstevel@tonic-gate int apic_imcrp = 0; 344*0Sstevel@tonic-gate 345*0Sstevel@tonic-gate int apic_use_acpi = 1; /* 1 = use ACPI, 0 = don't use ACPI */ 346*0Sstevel@tonic-gate int apic_use_acpi_madt_only = 0; /* 1=ONLY use MADT from ACPI */ 347*0Sstevel@tonic-gate 348*0Sstevel@tonic-gate /* 349*0Sstevel@tonic-gate * For interrupt link devices, if apic_unconditional_srs is set, an irq resource 350*0Sstevel@tonic-gate * will be assigned (via _SRS). If it is not set, use the current 351*0Sstevel@tonic-gate * irq setting (via _CRS), but only if that irq is in the set of possible 352*0Sstevel@tonic-gate * irqs (returned by _PRS) for the device. 353*0Sstevel@tonic-gate */ 354*0Sstevel@tonic-gate int apic_unconditional_srs = 1; 355*0Sstevel@tonic-gate 356*0Sstevel@tonic-gate /* 357*0Sstevel@tonic-gate * For interrupt link devices, if apic_prefer_crs is set when we are 358*0Sstevel@tonic-gate * assigning an IRQ resource to a device, prefer the current IRQ setting 359*0Sstevel@tonic-gate * over other possible irq settings under same conditions. 360*0Sstevel@tonic-gate */ 361*0Sstevel@tonic-gate 362*0Sstevel@tonic-gate int apic_prefer_crs = 1; 363*0Sstevel@tonic-gate 364*0Sstevel@tonic-gate 365*0Sstevel@tonic-gate /* minimum number of timer ticks to program to */ 366*0Sstevel@tonic-gate int apic_min_timer_ticks = 1; 367*0Sstevel@tonic-gate /* 368*0Sstevel@tonic-gate * Local static data 369*0Sstevel@tonic-gate */ 370*0Sstevel@tonic-gate static struct psm_ops apic_ops = { 371*0Sstevel@tonic-gate apic_probe, 372*0Sstevel@tonic-gate 373*0Sstevel@tonic-gate apic_init, 374*0Sstevel@tonic-gate apic_picinit, 375*0Sstevel@tonic-gate apic_intr_enter, 376*0Sstevel@tonic-gate apic_intr_exit, 377*0Sstevel@tonic-gate apic_setspl, 378*0Sstevel@tonic-gate apic_addspl, 379*0Sstevel@tonic-gate apic_delspl, 380*0Sstevel@tonic-gate apic_disable_intr, 381*0Sstevel@tonic-gate apic_enable_intr, 382*0Sstevel@tonic-gate apic_softlvl_to_irq, 383*0Sstevel@tonic-gate apic_set_softintr, 384*0Sstevel@tonic-gate 385*0Sstevel@tonic-gate apic_set_idlecpu, 386*0Sstevel@tonic-gate apic_unset_idlecpu, 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate apic_clkinit, 389*0Sstevel@tonic-gate apic_getclkirq, 390*0Sstevel@tonic-gate (void (*)(void))NULL, /* psm_hrtimeinit */ 391*0Sstevel@tonic-gate apic_gethrtime, 392*0Sstevel@tonic-gate 393*0Sstevel@tonic-gate apic_get_next_processorid, 394*0Sstevel@tonic-gate apic_cpu_start, 395*0Sstevel@tonic-gate apic_post_cpu_start, 396*0Sstevel@tonic-gate apic_shutdown, 397*0Sstevel@tonic-gate apic_get_ipivect, 398*0Sstevel@tonic-gate apic_send_ipi, 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate (int (*)(dev_info_t *, int))NULL, /* psm_translate_irq */ 401*0Sstevel@tonic-gate (int (*)(todinfo_t *))NULL, /* psm_tod_get */ 402*0Sstevel@tonic-gate (int (*)(todinfo_t *))NULL, /* psm_tod_set */ 403*0Sstevel@tonic-gate (void (*)(int, char *))NULL, /* psm_notify_error */ 404*0Sstevel@tonic-gate (void (*)(int))NULL, /* psm_notify_func */ 405*0Sstevel@tonic-gate apic_timer_reprogram, 406*0Sstevel@tonic-gate apic_timer_enable, 407*0Sstevel@tonic-gate apic_timer_disable, 408*0Sstevel@tonic-gate apic_post_cyclic_setup, 409*0Sstevel@tonic-gate apic_preshutdown, 410*0Sstevel@tonic-gate apic_intr_ops /* Advanced DDI Interrupt framework */ 411*0Sstevel@tonic-gate }; 412*0Sstevel@tonic-gate 413*0Sstevel@tonic-gate 414*0Sstevel@tonic-gate static struct psm_info apic_psm_info = { 415*0Sstevel@tonic-gate PSM_INFO_VER01_5, /* version */ 416*0Sstevel@tonic-gate PSM_OWN_EXCLUSIVE, /* ownership */ 417*0Sstevel@tonic-gate (struct psm_ops *)&apic_ops, /* operation */ 418*0Sstevel@tonic-gate "pcplusmp", /* machine name */ 419*0Sstevel@tonic-gate "pcplusmp v1.4 compatible %I%", 420*0Sstevel@tonic-gate }; 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate static void *apic_hdlp; 423*0Sstevel@tonic-gate 424*0Sstevel@tonic-gate #ifdef DEBUG 425*0Sstevel@tonic-gate #define DENT 0x0001 426*0Sstevel@tonic-gate int apic_debug = 0; 427*0Sstevel@tonic-gate /* 428*0Sstevel@tonic-gate * set apic_restrict_vector to the # of vectors we want to allow per range 429*0Sstevel@tonic-gate * useful in testing shared interrupt logic by setting it to 2 or 3 430*0Sstevel@tonic-gate */ 431*0Sstevel@tonic-gate int apic_restrict_vector = 0; 432*0Sstevel@tonic-gate 433*0Sstevel@tonic-gate #define APIC_DEBUG_MSGBUFSIZE 2048 434*0Sstevel@tonic-gate int apic_debug_msgbuf[APIC_DEBUG_MSGBUFSIZE]; 435*0Sstevel@tonic-gate int apic_debug_msgbufindex = 0; 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate /* 438*0Sstevel@tonic-gate * Put "int" info into debug buffer. No MP consistency, but light weight. 439*0Sstevel@tonic-gate * Good enough for most debugging. 440*0Sstevel@tonic-gate */ 441*0Sstevel@tonic-gate #define APIC_DEBUG_BUF_PUT(x) \ 442*0Sstevel@tonic-gate apic_debug_msgbuf[apic_debug_msgbufindex++] = x; \ 443*0Sstevel@tonic-gate if (apic_debug_msgbufindex >= (APIC_DEBUG_MSGBUFSIZE - NCPU)) \ 444*0Sstevel@tonic-gate apic_debug_msgbufindex = 0; 445*0Sstevel@tonic-gate 446*0Sstevel@tonic-gate #endif /* DEBUG */ 447*0Sstevel@tonic-gate 448*0Sstevel@tonic-gate apic_cpus_info_t *apic_cpus; 449*0Sstevel@tonic-gate 450*0Sstevel@tonic-gate static uint_t apic_cpumask = 0; 451*0Sstevel@tonic-gate static uint_t apic_flag; 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate /* Flag to indicate that we need to shut down all processors */ 454*0Sstevel@tonic-gate static uint_t apic_shutdown_processors; 455*0Sstevel@tonic-gate 456*0Sstevel@tonic-gate uint_t apic_nsec_per_intr = 0; 457*0Sstevel@tonic-gate 458*0Sstevel@tonic-gate /* 459*0Sstevel@tonic-gate * apic_let_idle_redistribute can have the following values: 460*0Sstevel@tonic-gate * 0 - If clock decremented it from 1 to 0, clock has to call redistribute. 461*0Sstevel@tonic-gate * apic_redistribute_lock prevents multiple idle cpus from redistributing 462*0Sstevel@tonic-gate */ 463*0Sstevel@tonic-gate int apic_num_idle_redistributions = 0; 464*0Sstevel@tonic-gate static int apic_let_idle_redistribute = 0; 465*0Sstevel@tonic-gate static uint_t apic_nticks = 0; 466*0Sstevel@tonic-gate static uint_t apic_skipped_redistribute = 0; 467*0Sstevel@tonic-gate 468*0Sstevel@tonic-gate /* to gather intr data and redistribute */ 469*0Sstevel@tonic-gate static void apic_redistribute_compute(void); 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate static uint_t last_count_read = 0; 472*0Sstevel@tonic-gate static lock_t apic_gethrtime_lock; 473*0Sstevel@tonic-gate volatile int apic_hrtime_stamp = 0; 474*0Sstevel@tonic-gate volatile hrtime_t apic_nsec_since_boot = 0; 475*0Sstevel@tonic-gate static uint_t apic_hertz_count, apic_nsec_per_tick; 476*0Sstevel@tonic-gate static hrtime_t apic_nsec_max; 477*0Sstevel@tonic-gate 478*0Sstevel@tonic-gate static hrtime_t apic_last_hrtime = 0; 479*0Sstevel@tonic-gate int apic_hrtime_error = 0; 480*0Sstevel@tonic-gate int apic_remote_hrterr = 0; 481*0Sstevel@tonic-gate int apic_num_nmis = 0; 482*0Sstevel@tonic-gate int apic_apic_error = 0; 483*0Sstevel@tonic-gate int apic_num_apic_errors = 0; 484*0Sstevel@tonic-gate int apic_num_cksum_errors = 0; 485*0Sstevel@tonic-gate 486*0Sstevel@tonic-gate static uchar_t apic_io_id[MAX_IO_APIC]; 487*0Sstevel@tonic-gate static uchar_t apic_io_ver[MAX_IO_APIC]; 488*0Sstevel@tonic-gate static uchar_t apic_io_vectbase[MAX_IO_APIC]; 489*0Sstevel@tonic-gate static uchar_t apic_io_vectend[MAX_IO_APIC]; 490*0Sstevel@tonic-gate volatile int32_t *apicioadr[MAX_IO_APIC]; 491*0Sstevel@tonic-gate /* 492*0Sstevel@tonic-gate * apic_ioapic_lock protects the ioapics (reg select), the status, temp_bound 493*0Sstevel@tonic-gate * and bound elements of cpus_info and the temp_cpu element of irq_struct 494*0Sstevel@tonic-gate */ 495*0Sstevel@tonic-gate lock_t apic_ioapic_lock; 496*0Sstevel@tonic-gate 497*0Sstevel@tonic-gate /* 498*0Sstevel@tonic-gate * apic_ioapic_reprogram_lock prevents a CPU from exiting 499*0Sstevel@tonic-gate * apic_intr_exit before IOAPIC reprogramming information 500*0Sstevel@tonic-gate * is collected. 501*0Sstevel@tonic-gate */ 502*0Sstevel@tonic-gate static lock_t apic_ioapic_reprogram_lock; 503*0Sstevel@tonic-gate static int apic_io_max = 0; /* no. of i/o apics enabled */ 504*0Sstevel@tonic-gate 505*0Sstevel@tonic-gate static struct apic_io_intr *apic_io_intrp = 0; 506*0Sstevel@tonic-gate static struct apic_bus *apic_busp; 507*0Sstevel@tonic-gate 508*0Sstevel@tonic-gate uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 509*0Sstevel@tonic-gate static uchar_t apic_resv_vector[MAXIPL+1]; 510*0Sstevel@tonic-gate 511*0Sstevel@tonic-gate static char apic_level_intr[APIC_MAX_VECTOR+1]; 512*0Sstevel@tonic-gate static int apic_error = 0; 513*0Sstevel@tonic-gate /* values which apic_error can take. Not catastrophic, but may help debug */ 514*0Sstevel@tonic-gate #define APIC_ERR_BOOT_EOI 0x1 515*0Sstevel@tonic-gate #define APIC_ERR_GET_IPIVECT_FAIL 0x2 516*0Sstevel@tonic-gate #define APIC_ERR_INVALID_INDEX 0x4 517*0Sstevel@tonic-gate #define APIC_ERR_MARK_VECTOR_FAIL 0x8 518*0Sstevel@tonic-gate #define APIC_ERR_APIC_ERROR 0x40000000 519*0Sstevel@tonic-gate #define APIC_ERR_NMI 0x80000000 520*0Sstevel@tonic-gate 521*0Sstevel@tonic-gate static int apic_cmos_ssb_set = 0; 522*0Sstevel@tonic-gate 523*0Sstevel@tonic-gate static uint32_t eisa_level_intr_mask = 0; 524*0Sstevel@tonic-gate /* At least MSB will be set if EISA bus */ 525*0Sstevel@tonic-gate 526*0Sstevel@tonic-gate static int apic_pci_bus_total = 0; 527*0Sstevel@tonic-gate static uchar_t apic_single_pci_busid = 0; 528*0Sstevel@tonic-gate 529*0Sstevel@tonic-gate 530*0Sstevel@tonic-gate /* 531*0Sstevel@tonic-gate * airq_mutex protects additions to the apic_irq_table - the first 532*0Sstevel@tonic-gate * pointer and any airq_nexts off of that one. It also protects 533*0Sstevel@tonic-gate * apic_max_device_irq & apic_min_device_irq. It also guarantees 534*0Sstevel@tonic-gate * that share_id is unique as new ids are generated only when new 535*0Sstevel@tonic-gate * irq_t structs are linked in. Once linked in the structs are never 536*0Sstevel@tonic-gate * deleted. temp_cpu & mps_intr_index field indicate if it is programmed 537*0Sstevel@tonic-gate * or allocated. Note that there is a slight gap between allocating in 538*0Sstevel@tonic-gate * apic_introp_xlate and programming in addspl. 539*0Sstevel@tonic-gate */ 540*0Sstevel@tonic-gate kmutex_t airq_mutex; 541*0Sstevel@tonic-gate apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 542*0Sstevel@tonic-gate int apic_max_device_irq = 0; 543*0Sstevel@tonic-gate int apic_min_device_irq = APIC_MAX_VECTOR; 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* use to make sure only one cpu handles the nmi */ 546*0Sstevel@tonic-gate static lock_t apic_nmi_lock; 547*0Sstevel@tonic-gate /* use to make sure only one cpu handles the error interrupt */ 548*0Sstevel@tonic-gate static lock_t apic_error_lock; 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate /* 551*0Sstevel@tonic-gate * Following declarations are for revectoring; used when ISRs at different 552*0Sstevel@tonic-gate * IPLs share an irq. 553*0Sstevel@tonic-gate */ 554*0Sstevel@tonic-gate static lock_t apic_revector_lock; 555*0Sstevel@tonic-gate static int apic_revector_pending = 0; 556*0Sstevel@tonic-gate static uchar_t *apic_oldvec_to_newvec; 557*0Sstevel@tonic-gate static uchar_t *apic_newvec_to_oldvec; 558*0Sstevel@tonic-gate 559*0Sstevel@tonic-gate /* Ensures that the IOAPIC-reprogramming timeout is not reentrant */ 560*0Sstevel@tonic-gate static kmutex_t apic_reprogram_timeout_mutex; 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate static struct ioapic_reprogram_data { 563*0Sstevel@tonic-gate int valid; /* This entry is valid */ 564*0Sstevel@tonic-gate int bindcpu; /* The CPU to which the int will be bound */ 565*0Sstevel@tonic-gate unsigned timeouts; /* # times the reprogram timeout was called */ 566*0Sstevel@tonic-gate } apic_reprogram_info[APIC_MAX_VECTOR+1]; 567*0Sstevel@tonic-gate /* 568*0Sstevel@tonic-gate * APIC_MAX_VECTOR + 1 is the maximum # of IRQs as well. apic_reprogram_info 569*0Sstevel@tonic-gate * is indexed by IRQ number, NOT by vector number. 570*0Sstevel@tonic-gate */ 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate 573*0Sstevel@tonic-gate /* 574*0Sstevel@tonic-gate * The following added to identify a software poweroff method if available. 575*0Sstevel@tonic-gate */ 576*0Sstevel@tonic-gate 577*0Sstevel@tonic-gate static struct { 578*0Sstevel@tonic-gate int poweroff_method; 579*0Sstevel@tonic-gate char oem_id[APIC_MPS_OEM_ID_LEN + 1]; /* MAX + 1 for NULL */ 580*0Sstevel@tonic-gate char prod_id[APIC_MPS_PROD_ID_LEN + 1]; /* MAX + 1 for NULL */ 581*0Sstevel@tonic-gate } apic_mps_ids[] = { 582*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_RTC, "INTEL", "ALDER" }, /* 4300 */ 583*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_RTC, "NCR", "AMC" }, /* 4300 */ 584*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "A450NX" }, /* 4400? */ 585*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "AD450NX" }, /* 4400 */ 586*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "AC450NX" }, /* 4400R */ 587*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_SITKA_BMC, "INTEL", "S450NX" }, /* S50 */ 588*0Sstevel@tonic-gate { APIC_POWEROFF_VIA_SITKA_BMC, "INTEL", "SC450NX" } /* S50? */ 589*0Sstevel@tonic-gate }; 590*0Sstevel@tonic-gate 591*0Sstevel@tonic-gate int apic_poweroff_method = APIC_POWEROFF_NONE; 592*0Sstevel@tonic-gate 593*0Sstevel@tonic-gate static struct { 594*0Sstevel@tonic-gate uchar_t cntl; 595*0Sstevel@tonic-gate uchar_t data; 596*0Sstevel@tonic-gate } aspen_bmc[] = { 597*0Sstevel@tonic-gate { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */ 598*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */ 599*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0x84 }, /* DataByte 1: SMS/OS no log */ 600*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0x2 }, /* DataByte 2: Power Down */ 601*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 3: no pre-timeout */ 602*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 4: timer expir. */ 603*0Sstevel@tonic-gate { CC_SMS_WR_NEXT, 0xa }, /* DataByte 5: init countdown */ 604*0Sstevel@tonic-gate { CC_SMS_WR_END, 0x0 }, /* DataByte 6: init countdown */ 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */ 607*0Sstevel@tonic-gate { CC_SMS_WR_END, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */ 608*0Sstevel@tonic-gate }; 609*0Sstevel@tonic-gate 610*0Sstevel@tonic-gate static struct { 611*0Sstevel@tonic-gate int port; 612*0Sstevel@tonic-gate uchar_t data; 613*0Sstevel@tonic-gate } sitka_bmc[] = { 614*0Sstevel@tonic-gate { SMS_COMMAND_REGISTER, SMS_WRITE_START }, 615*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */ 616*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */ 617*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x84 }, /* DataByte 1: SMS/OS no log */ 618*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x2 }, /* DataByte 2: Power Down */ 619*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x0 }, /* DataByte 3: no pre-timeout */ 620*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x0 }, /* DataByte 4: timer expir. */ 621*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0xa }, /* DataByte 5: init countdown */ 622*0Sstevel@tonic-gate { SMS_COMMAND_REGISTER, SMS_WRITE_END }, 623*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x0 }, /* DataByte 6: init countdown */ 624*0Sstevel@tonic-gate 625*0Sstevel@tonic-gate { SMS_COMMAND_REGISTER, SMS_WRITE_START }, 626*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */ 627*0Sstevel@tonic-gate { SMS_COMMAND_REGISTER, SMS_WRITE_END }, 628*0Sstevel@tonic-gate { SMS_DATA_REGISTER, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */ 629*0Sstevel@tonic-gate }; 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate 632*0Sstevel@tonic-gate /* Patchable global variables. */ 633*0Sstevel@tonic-gate int apic_kmdb_on_nmi = 0; /* 0 - no, 1 - yes enter kmdb */ 634*0Sstevel@tonic-gate int apic_debug_mps_id = 0; /* 1 - print MPS ID strings */ 635*0Sstevel@tonic-gate 636*0Sstevel@tonic-gate /* 637*0Sstevel@tonic-gate * ACPI definitions 638*0Sstevel@tonic-gate */ 639*0Sstevel@tonic-gate /* _PIC method arguments */ 640*0Sstevel@tonic-gate #define ACPI_PIC_MODE 0 641*0Sstevel@tonic-gate #define ACPI_APIC_MODE 1 642*0Sstevel@tonic-gate 643*0Sstevel@tonic-gate /* APIC error flags we care about */ 644*0Sstevel@tonic-gate #define APIC_SEND_CS_ERROR 0x01 645*0Sstevel@tonic-gate #define APIC_RECV_CS_ERROR 0x02 646*0Sstevel@tonic-gate #define APIC_CS_ERRORS (APIC_SEND_CS_ERROR|APIC_RECV_CS_ERROR) 647*0Sstevel@tonic-gate 648*0Sstevel@tonic-gate /* 649*0Sstevel@tonic-gate * ACPI variables 650*0Sstevel@tonic-gate */ 651*0Sstevel@tonic-gate /* 1 = acpi is enabled & working, 0 = acpi is not enabled or not there */ 652*0Sstevel@tonic-gate static int apic_enable_acpi = 0; 653*0Sstevel@tonic-gate 654*0Sstevel@tonic-gate /* ACPI Multiple APIC Description Table ptr */ 655*0Sstevel@tonic-gate static MULTIPLE_APIC_TABLE *acpi_mapic_dtp = NULL; 656*0Sstevel@tonic-gate 657*0Sstevel@tonic-gate /* ACPI Interrupt Source Override Structure ptr */ 658*0Sstevel@tonic-gate static MADT_INTERRUPT_OVERRIDE *acpi_isop = NULL; 659*0Sstevel@tonic-gate static int acpi_iso_cnt = 0; 660*0Sstevel@tonic-gate 661*0Sstevel@tonic-gate /* ACPI Non-maskable Interrupt Sources ptr */ 662*0Sstevel@tonic-gate static MADT_NMI_SOURCE *acpi_nmi_sp = NULL; 663*0Sstevel@tonic-gate static int acpi_nmi_scnt = 0; 664*0Sstevel@tonic-gate static MADT_LOCAL_APIC_NMI *acpi_nmi_cp = NULL; 665*0Sstevel@tonic-gate static int acpi_nmi_ccnt = 0; 666*0Sstevel@tonic-gate 667*0Sstevel@tonic-gate /* 668*0Sstevel@tonic-gate * extern declarations 669*0Sstevel@tonic-gate */ 670*0Sstevel@tonic-gate extern int intr_clear(void); 671*0Sstevel@tonic-gate extern void intr_restore(uint_t); 672*0Sstevel@tonic-gate #if defined(__amd64) 673*0Sstevel@tonic-gate extern int intpri_use_cr8; 674*0Sstevel@tonic-gate #endif /* __amd64 */ 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate extern int apic_pci_msi_enable_vector(dev_info_t *, int, int, 677*0Sstevel@tonic-gate int, int, int); 678*0Sstevel@tonic-gate extern apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 679*0Sstevel@tonic-gate 680*0Sstevel@tonic-gate /* 681*0Sstevel@tonic-gate * This is the loadable module wrapper 682*0Sstevel@tonic-gate */ 683*0Sstevel@tonic-gate 684*0Sstevel@tonic-gate int 685*0Sstevel@tonic-gate _init(void) 686*0Sstevel@tonic-gate { 687*0Sstevel@tonic-gate if (apic_coarse_hrtime) 688*0Sstevel@tonic-gate apic_ops.psm_gethrtime = &apic_gettime; 689*0Sstevel@tonic-gate return (psm_mod_init(&apic_hdlp, &apic_psm_info)); 690*0Sstevel@tonic-gate } 691*0Sstevel@tonic-gate 692*0Sstevel@tonic-gate int 693*0Sstevel@tonic-gate _fini(void) 694*0Sstevel@tonic-gate { 695*0Sstevel@tonic-gate return (psm_mod_fini(&apic_hdlp, &apic_psm_info)); 696*0Sstevel@tonic-gate } 697*0Sstevel@tonic-gate 698*0Sstevel@tonic-gate int 699*0Sstevel@tonic-gate _info(struct modinfo *modinfop) 700*0Sstevel@tonic-gate { 701*0Sstevel@tonic-gate return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop)); 702*0Sstevel@tonic-gate } 703*0Sstevel@tonic-gate 704*0Sstevel@tonic-gate /* 705*0Sstevel@tonic-gate * Auto-configuration routines 706*0Sstevel@tonic-gate */ 707*0Sstevel@tonic-gate 708*0Sstevel@tonic-gate /* 709*0Sstevel@tonic-gate * Look at MPSpec 1.4 (Intel Order # 242016-005) for details of what we do here 710*0Sstevel@tonic-gate * May work with 1.1 - but not guaranteed. 711*0Sstevel@tonic-gate * According to the MP Spec, the MP floating pointer structure 712*0Sstevel@tonic-gate * will be searched in the order described below: 713*0Sstevel@tonic-gate * 1. In the first kilobyte of Extended BIOS Data Area (EBDA) 714*0Sstevel@tonic-gate * 2. Within the last kilobyte of system base memory 715*0Sstevel@tonic-gate * 3. In the BIOS ROM address space between 0F0000h and 0FFFFh 716*0Sstevel@tonic-gate * Once we find the right signature with proper checksum, we call 717*0Sstevel@tonic-gate * either handle_defconf or parse_mpct to get all info necessary for 718*0Sstevel@tonic-gate * subsequent operations. 719*0Sstevel@tonic-gate */ 720*0Sstevel@tonic-gate static int 721*0Sstevel@tonic-gate apic_probe() 722*0Sstevel@tonic-gate { 723*0Sstevel@tonic-gate uint32_t mpct_addr, ebda_start = 0, base_mem_end; 724*0Sstevel@tonic-gate caddr_t biosdatap; 725*0Sstevel@tonic-gate caddr_t mpct; 726*0Sstevel@tonic-gate caddr_t fptr; 727*0Sstevel@tonic-gate int i, mpct_size, mapsize, retval = PSM_FAILURE; 728*0Sstevel@tonic-gate ushort_t ebda_seg, base_mem_size; 729*0Sstevel@tonic-gate struct apic_mpfps_hdr *fpsp; 730*0Sstevel@tonic-gate struct apic_mp_cnf_hdr *hdrp; 731*0Sstevel@tonic-gate int bypass_cpu_and_ioapics_in_mptables; 732*0Sstevel@tonic-gate int acpi_user_options; 733*0Sstevel@tonic-gate 734*0Sstevel@tonic-gate if (apic_forceload < 0) 735*0Sstevel@tonic-gate return (retval); 736*0Sstevel@tonic-gate 737*0Sstevel@tonic-gate /* Allow override for MADT-only mode */ 738*0Sstevel@tonic-gate acpi_user_options = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(), 0, 739*0Sstevel@tonic-gate "acpi-user-options", 0); 740*0Sstevel@tonic-gate apic_use_acpi_madt_only = ((acpi_user_options & ACPI_OUSER_MADT) != 0); 741*0Sstevel@tonic-gate 742*0Sstevel@tonic-gate /* Allow apic_use_acpi to override MADT-only mode */ 743*0Sstevel@tonic-gate if (!apic_use_acpi) 744*0Sstevel@tonic-gate apic_use_acpi_madt_only = 0; 745*0Sstevel@tonic-gate 746*0Sstevel@tonic-gate retval = acpi_probe(); 747*0Sstevel@tonic-gate 748*0Sstevel@tonic-gate /* 749*0Sstevel@tonic-gate * mapin the bios data area 40:0 750*0Sstevel@tonic-gate * 40:13h - two-byte location reports the base memory size 751*0Sstevel@tonic-gate * 40:0Eh - two-byte location for the exact starting address of 752*0Sstevel@tonic-gate * the EBDA segment for EISA 753*0Sstevel@tonic-gate */ 754*0Sstevel@tonic-gate biosdatap = psm_map_phys(0x400, 0x20, PROT_READ); 755*0Sstevel@tonic-gate if (!biosdatap) 756*0Sstevel@tonic-gate return (retval); 757*0Sstevel@tonic-gate fpsp = (struct apic_mpfps_hdr *)NULL; 758*0Sstevel@tonic-gate mapsize = MPFPS_RAM_WIN_LEN; 759*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 760*0Sstevel@tonic-gate ebda_seg = *((ushort_t *)(biosdatap+0xe)); 761*0Sstevel@tonic-gate /* check the 1k of EBDA */ 762*0Sstevel@tonic-gate if (ebda_seg) { 763*0Sstevel@tonic-gate ebda_start = ((uint32_t)ebda_seg) << 4; 764*0Sstevel@tonic-gate fptr = psm_map_phys(ebda_start, MPFPS_RAM_WIN_LEN, PROT_READ); 765*0Sstevel@tonic-gate if (fptr) { 766*0Sstevel@tonic-gate if (!(fpsp = 767*0Sstevel@tonic-gate apic_find_fps_sig(fptr, MPFPS_RAM_WIN_LEN))) 768*0Sstevel@tonic-gate psm_unmap_phys(fptr, MPFPS_RAM_WIN_LEN); 769*0Sstevel@tonic-gate } 770*0Sstevel@tonic-gate } 771*0Sstevel@tonic-gate /* If not in EBDA, check the last k of system base memory */ 772*0Sstevel@tonic-gate if (!fpsp) { 773*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 774*0Sstevel@tonic-gate base_mem_size = *((ushort_t *)(biosdatap + 0x13)); 775*0Sstevel@tonic-gate 776*0Sstevel@tonic-gate if (base_mem_size > 512) 777*0Sstevel@tonic-gate base_mem_end = 639 * 1024; 778*0Sstevel@tonic-gate else 779*0Sstevel@tonic-gate base_mem_end = 511 * 1024; 780*0Sstevel@tonic-gate /* if ebda == last k of base mem, skip to check BIOS ROM */ 781*0Sstevel@tonic-gate if (base_mem_end != ebda_start) { 782*0Sstevel@tonic-gate 783*0Sstevel@tonic-gate fptr = psm_map_phys(base_mem_end, MPFPS_RAM_WIN_LEN, 784*0Sstevel@tonic-gate PROT_READ); 785*0Sstevel@tonic-gate 786*0Sstevel@tonic-gate if (fptr) { 787*0Sstevel@tonic-gate if (!(fpsp = apic_find_fps_sig(fptr, 788*0Sstevel@tonic-gate MPFPS_RAM_WIN_LEN))) 789*0Sstevel@tonic-gate psm_unmap_phys(fptr, MPFPS_RAM_WIN_LEN); 790*0Sstevel@tonic-gate } 791*0Sstevel@tonic-gate } 792*0Sstevel@tonic-gate } 793*0Sstevel@tonic-gate psm_unmap_phys(biosdatap, 0x20); 794*0Sstevel@tonic-gate 795*0Sstevel@tonic-gate /* If still cannot find it, check the BIOS ROM space */ 796*0Sstevel@tonic-gate if (!fpsp) { 797*0Sstevel@tonic-gate mapsize = MPFPS_ROM_WIN_LEN; 798*0Sstevel@tonic-gate fptr = psm_map_phys(MPFPS_ROM_WIN_START, 799*0Sstevel@tonic-gate MPFPS_ROM_WIN_LEN, PROT_READ); 800*0Sstevel@tonic-gate if (fptr) { 801*0Sstevel@tonic-gate if (!(fpsp = 802*0Sstevel@tonic-gate apic_find_fps_sig(fptr, MPFPS_ROM_WIN_LEN))) { 803*0Sstevel@tonic-gate psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 804*0Sstevel@tonic-gate return (retval); 805*0Sstevel@tonic-gate } 806*0Sstevel@tonic-gate } 807*0Sstevel@tonic-gate } 808*0Sstevel@tonic-gate 809*0Sstevel@tonic-gate if (apic_checksum((caddr_t)fpsp, fpsp->mpfps_length * 16) != 0) { 810*0Sstevel@tonic-gate psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 811*0Sstevel@tonic-gate return (retval); 812*0Sstevel@tonic-gate } 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate apic_spec_rev = fpsp->mpfps_spec_rev; 815*0Sstevel@tonic-gate if ((apic_spec_rev != 04) && (apic_spec_rev != 01)) { 816*0Sstevel@tonic-gate psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 817*0Sstevel@tonic-gate return (retval); 818*0Sstevel@tonic-gate } 819*0Sstevel@tonic-gate 820*0Sstevel@tonic-gate /* check IMCR is present or not */ 821*0Sstevel@tonic-gate apic_imcrp = fpsp->mpfps_featinfo2 & MPFPS_FEATINFO2_IMCRP; 822*0Sstevel@tonic-gate 823*0Sstevel@tonic-gate /* check default configuration (dual CPUs) */ 824*0Sstevel@tonic-gate if ((apic_defconf = fpsp->mpfps_featinfo1) != 0) { 825*0Sstevel@tonic-gate psm_unmap_phys(fptr, mapsize); 826*0Sstevel@tonic-gate return (apic_handle_defconf()); 827*0Sstevel@tonic-gate } 828*0Sstevel@tonic-gate 829*0Sstevel@tonic-gate /* MP Configuration Table */ 830*0Sstevel@tonic-gate mpct_addr = (uint32_t)(fpsp->mpfps_mpct_paddr); 831*0Sstevel@tonic-gate 832*0Sstevel@tonic-gate psm_unmap_phys(fptr, mapsize); /* unmap floating ptr struct */ 833*0Sstevel@tonic-gate 834*0Sstevel@tonic-gate /* 835*0Sstevel@tonic-gate * Map in enough memory for the MP Configuration Table Header. 836*0Sstevel@tonic-gate * Use this table to read the total length of the BIOS data and 837*0Sstevel@tonic-gate * map in all the info 838*0Sstevel@tonic-gate */ 839*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 840*0Sstevel@tonic-gate hdrp = (struct apic_mp_cnf_hdr *)psm_map_phys(mpct_addr, 841*0Sstevel@tonic-gate sizeof (struct apic_mp_cnf_hdr), PROT_READ); 842*0Sstevel@tonic-gate if (!hdrp) 843*0Sstevel@tonic-gate return (retval); 844*0Sstevel@tonic-gate 845*0Sstevel@tonic-gate /* check mp configuration table signature PCMP */ 846*0Sstevel@tonic-gate if (hdrp->mpcnf_sig != 0x504d4350) { 847*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)hdrp, sizeof (struct apic_mp_cnf_hdr)); 848*0Sstevel@tonic-gate return (retval); 849*0Sstevel@tonic-gate } 850*0Sstevel@tonic-gate mpct_size = (int)hdrp->mpcnf_tbl_length; 851*0Sstevel@tonic-gate 852*0Sstevel@tonic-gate apic_set_pwroff_method_from_mpcnfhdr(hdrp); 853*0Sstevel@tonic-gate 854*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)hdrp, sizeof (struct apic_mp_cnf_hdr)); 855*0Sstevel@tonic-gate 856*0Sstevel@tonic-gate if ((retval == PSM_SUCCESS) && !apic_use_acpi_madt_only) { 857*0Sstevel@tonic-gate /* This is an ACPI machine No need for further checks */ 858*0Sstevel@tonic-gate return (retval); 859*0Sstevel@tonic-gate } 860*0Sstevel@tonic-gate 861*0Sstevel@tonic-gate /* 862*0Sstevel@tonic-gate * Map in the entries for this machine, ie. Processor 863*0Sstevel@tonic-gate * Entry Tables, Bus Entry Tables, etc. 864*0Sstevel@tonic-gate * They are in fixed order following one another 865*0Sstevel@tonic-gate */ 866*0Sstevel@tonic-gate mpct = psm_map_phys(mpct_addr, mpct_size, PROT_READ); 867*0Sstevel@tonic-gate if (!mpct) 868*0Sstevel@tonic-gate return (retval); 869*0Sstevel@tonic-gate 870*0Sstevel@tonic-gate if (apic_checksum(mpct, mpct_size) != 0) 871*0Sstevel@tonic-gate goto apic_fail1; 872*0Sstevel@tonic-gate 873*0Sstevel@tonic-gate 874*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 875*0Sstevel@tonic-gate hdrp = (struct apic_mp_cnf_hdr *)mpct; 876*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 877*0Sstevel@tonic-gate apicadr = (uint32_t *)psm_map_phys((uint32_t)hdrp->mpcnf_local_apic, 878*0Sstevel@tonic-gate APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 879*0Sstevel@tonic-gate if (!apicadr) 880*0Sstevel@tonic-gate goto apic_fail1; 881*0Sstevel@tonic-gate 882*0Sstevel@tonic-gate /* Parse all information in the tables */ 883*0Sstevel@tonic-gate bypass_cpu_and_ioapics_in_mptables = (retval == PSM_SUCCESS); 884*0Sstevel@tonic-gate if (apic_parse_mpct(mpct, bypass_cpu_and_ioapics_in_mptables) == 885*0Sstevel@tonic-gate PSM_SUCCESS) 886*0Sstevel@tonic-gate return (PSM_SUCCESS); 887*0Sstevel@tonic-gate 888*0Sstevel@tonic-gate for (i = 0; i < apic_io_max; i++) 889*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicioadr[i], APIC_IO_MEMLEN); 890*0Sstevel@tonic-gate if (apic_cpus) 891*0Sstevel@tonic-gate kmem_free(apic_cpus, sizeof (*apic_cpus) * apic_nproc); 892*0Sstevel@tonic-gate if (apicadr) 893*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 894*0Sstevel@tonic-gate apic_fail1: 895*0Sstevel@tonic-gate psm_unmap_phys(mpct, mpct_size); 896*0Sstevel@tonic-gate return (retval); 897*0Sstevel@tonic-gate } 898*0Sstevel@tonic-gate 899*0Sstevel@tonic-gate static void 900*0Sstevel@tonic-gate apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr *hdrp) 901*0Sstevel@tonic-gate { 902*0Sstevel@tonic-gate int i; 903*0Sstevel@tonic-gate 904*0Sstevel@tonic-gate for (i = 0; i < (sizeof (apic_mps_ids) / sizeof (apic_mps_ids[0])); 905*0Sstevel@tonic-gate i++) { 906*0Sstevel@tonic-gate if ((strncmp(hdrp->mpcnf_oem_str, apic_mps_ids[i].oem_id, 907*0Sstevel@tonic-gate strlen(apic_mps_ids[i].oem_id)) == 0) && 908*0Sstevel@tonic-gate (strncmp(hdrp->mpcnf_prod_str, apic_mps_ids[i].prod_id, 909*0Sstevel@tonic-gate strlen(apic_mps_ids[i].prod_id)) == 0)) { 910*0Sstevel@tonic-gate 911*0Sstevel@tonic-gate apic_poweroff_method = apic_mps_ids[i].poweroff_method; 912*0Sstevel@tonic-gate break; 913*0Sstevel@tonic-gate } 914*0Sstevel@tonic-gate } 915*0Sstevel@tonic-gate 916*0Sstevel@tonic-gate if (apic_debug_mps_id != 0) { 917*0Sstevel@tonic-gate cmn_err(CE_CONT, "pcplusmp: MPS OEM ID = '%c%c%c%c%c%c%c%c'" 918*0Sstevel@tonic-gate "Product ID = '%c%c%c%c%c%c%c%c%c%c%c%c'\n", 919*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[0], 920*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[1], 921*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[2], 922*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[3], 923*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[4], 924*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[5], 925*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[6], 926*0Sstevel@tonic-gate hdrp->mpcnf_oem_str[7], 927*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[0], 928*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[1], 929*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[2], 930*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[3], 931*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[4], 932*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[5], 933*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[6], 934*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[7], 935*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[8], 936*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[9], 937*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[10], 938*0Sstevel@tonic-gate hdrp->mpcnf_prod_str[11]); 939*0Sstevel@tonic-gate } 940*0Sstevel@tonic-gate } 941*0Sstevel@tonic-gate 942*0Sstevel@tonic-gate static int 943*0Sstevel@tonic-gate acpi_probe(void) 944*0Sstevel@tonic-gate { 945*0Sstevel@tonic-gate int i, id, intmax, ver, index, rv; 946*0Sstevel@tonic-gate int acpi_verboseflags = 0; 947*0Sstevel@tonic-gate int madt_seen, madt_size; 948*0Sstevel@tonic-gate APIC_HEADER *ap; 949*0Sstevel@tonic-gate MADT_PROCESSOR_APIC *mpa; 950*0Sstevel@tonic-gate MADT_IO_APIC *mia; 951*0Sstevel@tonic-gate MADT_IO_SAPIC *misa; 952*0Sstevel@tonic-gate MADT_INTERRUPT_OVERRIDE *mio; 953*0Sstevel@tonic-gate MADT_NMI_SOURCE *mns; 954*0Sstevel@tonic-gate MADT_INTERRUPT_SOURCE *mis; 955*0Sstevel@tonic-gate MADT_LOCAL_APIC_NMI *mlan; 956*0Sstevel@tonic-gate MADT_ADDRESS_OVERRIDE *mao; 957*0Sstevel@tonic-gate ACPI_OBJECT_LIST arglist; 958*0Sstevel@tonic-gate ACPI_OBJECT arg; 959*0Sstevel@tonic-gate int sci; 960*0Sstevel@tonic-gate iflag_t sci_flags; 961*0Sstevel@tonic-gate volatile int32_t *ioapic; 962*0Sstevel@tonic-gate char local_ids[NCPU]; 963*0Sstevel@tonic-gate char proc_ids[NCPU]; 964*0Sstevel@tonic-gate uchar_t hid; 965*0Sstevel@tonic-gate 966*0Sstevel@tonic-gate if (!apic_use_acpi) 967*0Sstevel@tonic-gate return (PSM_FAILURE); 968*0Sstevel@tonic-gate 969*0Sstevel@tonic-gate if (AcpiGetFirmwareTable(APIC_SIG, 1, ACPI_LOGICAL_ADDRESSING, 970*0Sstevel@tonic-gate (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK) 971*0Sstevel@tonic-gate return (PSM_FAILURE); 972*0Sstevel@tonic-gate 973*0Sstevel@tonic-gate apicadr = (uint32_t *)psm_map_phys( 974*0Sstevel@tonic-gate (uint32_t)acpi_mapic_dtp->LocalApicAddress, 975*0Sstevel@tonic-gate APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 976*0Sstevel@tonic-gate if (!apicadr) 977*0Sstevel@tonic-gate return (PSM_FAILURE); 978*0Sstevel@tonic-gate 979*0Sstevel@tonic-gate id = apicadr[APIC_LID_REG]; 980*0Sstevel@tonic-gate local_ids[0] = (uchar_t)(((uint_t)id) >> 24); 981*0Sstevel@tonic-gate apic_nproc = index = 1; 982*0Sstevel@tonic-gate apic_io_max = 0; 983*0Sstevel@tonic-gate 984*0Sstevel@tonic-gate ap = (APIC_HEADER *) (acpi_mapic_dtp + 1); 985*0Sstevel@tonic-gate madt_size = acpi_mapic_dtp->Length; 986*0Sstevel@tonic-gate madt_seen = sizeof (*acpi_mapic_dtp); 987*0Sstevel@tonic-gate 988*0Sstevel@tonic-gate while (madt_seen < madt_size) { 989*0Sstevel@tonic-gate switch (ap->Type) { 990*0Sstevel@tonic-gate case APIC_PROCESSOR: 991*0Sstevel@tonic-gate mpa = (MADT_PROCESSOR_APIC *) ap; 992*0Sstevel@tonic-gate if (mpa->ProcessorEnabled) { 993*0Sstevel@tonic-gate if (mpa->LocalApicId == local_ids[0]) 994*0Sstevel@tonic-gate proc_ids[0] = mpa->ProcessorId; 995*0Sstevel@tonic-gate else if (apic_nproc < NCPU) { 996*0Sstevel@tonic-gate local_ids[index] = mpa->LocalApicId; 997*0Sstevel@tonic-gate proc_ids[index] = mpa->ProcessorId; 998*0Sstevel@tonic-gate index++; 999*0Sstevel@tonic-gate apic_nproc++; 1000*0Sstevel@tonic-gate } else 1001*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: exceeded " 1002*0Sstevel@tonic-gate "maximum no. of CPUs (= %d)", NCPU); 1003*0Sstevel@tonic-gate } 1004*0Sstevel@tonic-gate break; 1005*0Sstevel@tonic-gate 1006*0Sstevel@tonic-gate case APIC_IO: 1007*0Sstevel@tonic-gate mia = (MADT_IO_APIC *) ap; 1008*0Sstevel@tonic-gate if (apic_io_max < MAX_IO_APIC) { 1009*0Sstevel@tonic-gate apic_io_id[apic_io_max] = mia->IoApicId; 1010*0Sstevel@tonic-gate apic_io_vectbase[apic_io_max] = 1011*0Sstevel@tonic-gate mia->Interrupt; 1012*0Sstevel@tonic-gate ioapic = apicioadr[apic_io_max] = 1013*0Sstevel@tonic-gate (int32_t *)psm_map_phys( 1014*0Sstevel@tonic-gate (uint32_t)mia->Address, 1015*0Sstevel@tonic-gate APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1016*0Sstevel@tonic-gate if (!ioapic) 1017*0Sstevel@tonic-gate goto cleanup; 1018*0Sstevel@tonic-gate apic_io_max++; 1019*0Sstevel@tonic-gate } 1020*0Sstevel@tonic-gate break; 1021*0Sstevel@tonic-gate 1022*0Sstevel@tonic-gate case APIC_XRUPT_OVERRIDE: 1023*0Sstevel@tonic-gate mio = (MADT_INTERRUPT_OVERRIDE *) ap; 1024*0Sstevel@tonic-gate if (acpi_isop == NULL) 1025*0Sstevel@tonic-gate acpi_isop = mio; 1026*0Sstevel@tonic-gate acpi_iso_cnt++; 1027*0Sstevel@tonic-gate break; 1028*0Sstevel@tonic-gate 1029*0Sstevel@tonic-gate case APIC_NMI: 1030*0Sstevel@tonic-gate /* UNIMPLEMENTED */ 1031*0Sstevel@tonic-gate mns = (MADT_NMI_SOURCE *) ap; 1032*0Sstevel@tonic-gate if (acpi_nmi_sp == NULL) 1033*0Sstevel@tonic-gate acpi_nmi_sp = mns; 1034*0Sstevel@tonic-gate acpi_nmi_scnt++; 1035*0Sstevel@tonic-gate 1036*0Sstevel@tonic-gate cmn_err(CE_NOTE, "!apic: nmi source: %d %d %d\n", 1037*0Sstevel@tonic-gate mns->Interrupt, mns->Polarity, 1038*0Sstevel@tonic-gate mns->TriggerMode); 1039*0Sstevel@tonic-gate break; 1040*0Sstevel@tonic-gate 1041*0Sstevel@tonic-gate case APIC_LOCAL_NMI: 1042*0Sstevel@tonic-gate /* UNIMPLEMENTED */ 1043*0Sstevel@tonic-gate mlan = (MADT_LOCAL_APIC_NMI *) ap; 1044*0Sstevel@tonic-gate if (acpi_nmi_cp == NULL) 1045*0Sstevel@tonic-gate acpi_nmi_cp = mlan; 1046*0Sstevel@tonic-gate acpi_nmi_ccnt++; 1047*0Sstevel@tonic-gate 1048*0Sstevel@tonic-gate cmn_err(CE_NOTE, "!apic: local nmi: %d %d %d %d\n", 1049*0Sstevel@tonic-gate mlan->ProcessorId, mlan->Polarity, 1050*0Sstevel@tonic-gate mlan->TriggerMode, mlan->Lint); 1051*0Sstevel@tonic-gate break; 1052*0Sstevel@tonic-gate 1053*0Sstevel@tonic-gate case APIC_ADDRESS_OVERRIDE: 1054*0Sstevel@tonic-gate /* UNIMPLEMENTED */ 1055*0Sstevel@tonic-gate mao = (MADT_ADDRESS_OVERRIDE *) ap; 1056*0Sstevel@tonic-gate cmn_err(CE_NOTE, "!apic: address override: %lx\n", 1057*0Sstevel@tonic-gate (long)mao->Address); 1058*0Sstevel@tonic-gate break; 1059*0Sstevel@tonic-gate 1060*0Sstevel@tonic-gate case APIC_IO_SAPIC: 1061*0Sstevel@tonic-gate /* UNIMPLEMENTED */ 1062*0Sstevel@tonic-gate misa = (MADT_IO_SAPIC *) ap; 1063*0Sstevel@tonic-gate 1064*0Sstevel@tonic-gate cmn_err(CE_NOTE, "!apic: io sapic: %d %d %lx\n", 1065*0Sstevel@tonic-gate misa->IoSapicId, misa->InterruptBase, 1066*0Sstevel@tonic-gate (long)misa->Address); 1067*0Sstevel@tonic-gate break; 1068*0Sstevel@tonic-gate 1069*0Sstevel@tonic-gate case APIC_XRUPT_SOURCE: 1070*0Sstevel@tonic-gate /* UNIMPLEMENTED */ 1071*0Sstevel@tonic-gate mis = (MADT_INTERRUPT_SOURCE *) ap; 1072*0Sstevel@tonic-gate 1073*0Sstevel@tonic-gate cmn_err(CE_NOTE, 1074*0Sstevel@tonic-gate "!apic: irq source: %d %d %d %d %d %d %d\n", 1075*0Sstevel@tonic-gate mis->ProcessorId, mis->ProcessorEid, 1076*0Sstevel@tonic-gate mis->Interrupt, mis->Polarity, 1077*0Sstevel@tonic-gate mis->TriggerMode, mis->InterruptType, 1078*0Sstevel@tonic-gate mis->IoSapicVector); 1079*0Sstevel@tonic-gate break; 1080*0Sstevel@tonic-gate case APIC_RESERVED: 1081*0Sstevel@tonic-gate default: 1082*0Sstevel@tonic-gate goto cleanup; 1083*0Sstevel@tonic-gate } 1084*0Sstevel@tonic-gate 1085*0Sstevel@tonic-gate /* advance to next entry */ 1086*0Sstevel@tonic-gate madt_seen += ap->Length; 1087*0Sstevel@tonic-gate ap = (APIC_HEADER *)(((char *)ap) + ap->Length); 1088*0Sstevel@tonic-gate } 1089*0Sstevel@tonic-gate 1090*0Sstevel@tonic-gate if ((apic_cpus = kmem_zalloc(sizeof (*apic_cpus) * apic_nproc, 1091*0Sstevel@tonic-gate KM_NOSLEEP)) == NULL) 1092*0Sstevel@tonic-gate goto cleanup; 1093*0Sstevel@tonic-gate 1094*0Sstevel@tonic-gate apic_cpumask = (1 << apic_nproc) - 1; 1095*0Sstevel@tonic-gate 1096*0Sstevel@tonic-gate /* 1097*0Sstevel@tonic-gate * ACPI doesn't provide the local apic ver, get it directly from the 1098*0Sstevel@tonic-gate * local apic 1099*0Sstevel@tonic-gate */ 1100*0Sstevel@tonic-gate ver = apicadr[APIC_VERS_REG]; 1101*0Sstevel@tonic-gate for (i = 0; i < apic_nproc; i++) { 1102*0Sstevel@tonic-gate apic_cpus[i].aci_local_id = local_ids[i]; 1103*0Sstevel@tonic-gate apic_cpus[i].aci_local_ver = (uchar_t)(ver & 0xFF); 1104*0Sstevel@tonic-gate } 1105*0Sstevel@tonic-gate for (i = 0; i < apic_io_max; i++) { 1106*0Sstevel@tonic-gate ioapic = apicioadr[i]; 1107*0Sstevel@tonic-gate 1108*0Sstevel@tonic-gate /* 1109*0Sstevel@tonic-gate * need to check Sitka on the following acpi problem 1110*0Sstevel@tonic-gate * On the Sitka, the ioapic's apic_id field isn't reporting 1111*0Sstevel@tonic-gate * the actual io apic id. We have reported this problem 1112*0Sstevel@tonic-gate * to Intel. Until they fix the problem, we will get the 1113*0Sstevel@tonic-gate * actual id directly from the ioapic. 1114*0Sstevel@tonic-gate */ 1115*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_ID_CMD; 1116*0Sstevel@tonic-gate id = ioapic[APIC_IO_DATA]; 1117*0Sstevel@tonic-gate hid = (uchar_t)(((uint_t)id) >> 24); 1118*0Sstevel@tonic-gate 1119*0Sstevel@tonic-gate if (hid != apic_io_id[i]) { 1120*0Sstevel@tonic-gate if (apic_io_id[i] == 0) 1121*0Sstevel@tonic-gate apic_io_id[i] = hid; 1122*0Sstevel@tonic-gate else { /* set ioapic id to whatever reported by ACPI */ 1123*0Sstevel@tonic-gate id = ((int32_t)apic_io_id[i]) << 24; 1124*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_ID_CMD; 1125*0Sstevel@tonic-gate ioapic[APIC_IO_DATA] = id; 1126*0Sstevel@tonic-gate } 1127*0Sstevel@tonic-gate } 1128*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_VERS_CMD; 1129*0Sstevel@tonic-gate ver = ioapic[APIC_IO_DATA]; 1130*0Sstevel@tonic-gate apic_io_ver[i] = (uchar_t)(ver & 0xff); 1131*0Sstevel@tonic-gate intmax = (ver >> 16) & 0xff; 1132*0Sstevel@tonic-gate apic_io_vectend[i] = apic_io_vectbase[i] + intmax; 1133*0Sstevel@tonic-gate } 1134*0Sstevel@tonic-gate 1135*0Sstevel@tonic-gate 1136*0Sstevel@tonic-gate /* 1137*0Sstevel@tonic-gate * Process SCI configuration here 1138*0Sstevel@tonic-gate * An error may be returned here if 1139*0Sstevel@tonic-gate * acpi-user-options specifies legacy mode 1140*0Sstevel@tonic-gate * (no SCI, no ACPI mode) 1141*0Sstevel@tonic-gate */ 1142*0Sstevel@tonic-gate if (acpica_get_sci(&sci, &sci_flags) != AE_OK) 1143*0Sstevel@tonic-gate sci = -1; 1144*0Sstevel@tonic-gate 1145*0Sstevel@tonic-gate /* 1146*0Sstevel@tonic-gate * Now call acpi_init() to generate namespaces 1147*0Sstevel@tonic-gate * If this fails, we don't attempt to use ACPI 1148*0Sstevel@tonic-gate * even if we were able to get a MADT above 1149*0Sstevel@tonic-gate */ 1150*0Sstevel@tonic-gate if (acpica_init() != AE_OK) 1151*0Sstevel@tonic-gate goto cleanup; 1152*0Sstevel@tonic-gate 1153*0Sstevel@tonic-gate /* 1154*0Sstevel@tonic-gate * Squirrel away the SCI and flags for later on 1155*0Sstevel@tonic-gate * in apic_picinit() when we're ready 1156*0Sstevel@tonic-gate */ 1157*0Sstevel@tonic-gate apic_sci_vect = sci; 1158*0Sstevel@tonic-gate apic_sci_flags = sci_flags; 1159*0Sstevel@tonic-gate 1160*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IRQ_FLAG) 1161*0Sstevel@tonic-gate acpi_verboseflags |= PSM_VERBOSE_IRQ_FLAG; 1162*0Sstevel@tonic-gate 1163*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_POWEROFF_FLAG) 1164*0Sstevel@tonic-gate acpi_verboseflags |= PSM_VERBOSE_POWEROFF_FLAG; 1165*0Sstevel@tonic-gate 1166*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_POWEROFF_PAUSE_FLAG) 1167*0Sstevel@tonic-gate acpi_verboseflags |= PSM_VERBOSE_POWEROFF_PAUSE_FLAG; 1168*0Sstevel@tonic-gate 1169*0Sstevel@tonic-gate if (acpi_psm_init(apic_psm_info.p_mach_idstring, acpi_verboseflags) == 1170*0Sstevel@tonic-gate ACPI_PSM_FAILURE) 1171*0Sstevel@tonic-gate goto cleanup; 1172*0Sstevel@tonic-gate 1173*0Sstevel@tonic-gate /* Enable ACPI APIC interrupt routing */ 1174*0Sstevel@tonic-gate arglist.Count = 1; 1175*0Sstevel@tonic-gate arglist.Pointer = &arg; 1176*0Sstevel@tonic-gate arg.Type = ACPI_TYPE_INTEGER; 1177*0Sstevel@tonic-gate arg.Integer.Value = ACPI_APIC_MODE; /* 1 */ 1178*0Sstevel@tonic-gate rv = AcpiEvaluateObject(NULL, "\\_PIC", &arglist, NULL); 1179*0Sstevel@tonic-gate if (rv == AE_OK) { 1180*0Sstevel@tonic-gate build_reserved_irqlist((uchar_t *)apic_reserved_irqlist); 1181*0Sstevel@tonic-gate apic_enable_acpi = 1; 1182*0Sstevel@tonic-gate if (apic_use_acpi_madt_only) { 1183*0Sstevel@tonic-gate cmn_err(CE_CONT, 1184*0Sstevel@tonic-gate "?Using ACPI for CPU/IOAPIC information ONLY\n"); 1185*0Sstevel@tonic-gate } 1186*0Sstevel@tonic-gate return (PSM_SUCCESS); 1187*0Sstevel@tonic-gate } 1188*0Sstevel@tonic-gate /* if setting APIC mode failed above, we fall through to cleanup */ 1189*0Sstevel@tonic-gate 1190*0Sstevel@tonic-gate cleanup: 1191*0Sstevel@tonic-gate if (apicadr != NULL) { 1192*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 1193*0Sstevel@tonic-gate apicadr = NULL; 1194*0Sstevel@tonic-gate } 1195*0Sstevel@tonic-gate apic_nproc = 0; 1196*0Sstevel@tonic-gate for (i = 0; i < apic_io_max; i++) { 1197*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicioadr[i], APIC_IO_MEMLEN); 1198*0Sstevel@tonic-gate apicioadr[i] = NULL; 1199*0Sstevel@tonic-gate } 1200*0Sstevel@tonic-gate apic_io_max = 0; 1201*0Sstevel@tonic-gate acpi_isop = NULL; 1202*0Sstevel@tonic-gate acpi_iso_cnt = 0; 1203*0Sstevel@tonic-gate acpi_nmi_sp = NULL; 1204*0Sstevel@tonic-gate acpi_nmi_scnt = 0; 1205*0Sstevel@tonic-gate acpi_nmi_cp = NULL; 1206*0Sstevel@tonic-gate acpi_nmi_ccnt = 0; 1207*0Sstevel@tonic-gate return (PSM_FAILURE); 1208*0Sstevel@tonic-gate } 1209*0Sstevel@tonic-gate 1210*0Sstevel@tonic-gate /* 1211*0Sstevel@tonic-gate * Handle default configuration. Fill in reqd global variables & tables 1212*0Sstevel@tonic-gate * Fill all details as MP table does not give any more info 1213*0Sstevel@tonic-gate */ 1214*0Sstevel@tonic-gate static int 1215*0Sstevel@tonic-gate apic_handle_defconf() 1216*0Sstevel@tonic-gate { 1217*0Sstevel@tonic-gate uint_t lid; 1218*0Sstevel@tonic-gate 1219*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1220*0Sstevel@tonic-gate apicioadr[0] = (int32_t *)psm_map_phys(APIC_IO_ADDR, 1221*0Sstevel@tonic-gate APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1222*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1223*0Sstevel@tonic-gate apicadr = (uint32_t *)psm_map_phys(APIC_LOCAL_ADDR, 1224*0Sstevel@tonic-gate APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 1225*0Sstevel@tonic-gate apic_cpus = (apic_cpus_info_t *) 1226*0Sstevel@tonic-gate kmem_zalloc(sizeof (*apic_cpus) * 2, KM_NOSLEEP); 1227*0Sstevel@tonic-gate if ((!apicadr) || (!apicioadr[0]) || (!apic_cpus)) 1228*0Sstevel@tonic-gate goto apic_handle_defconf_fail; 1229*0Sstevel@tonic-gate apic_cpumask = 3; 1230*0Sstevel@tonic-gate apic_nproc = 2; 1231*0Sstevel@tonic-gate lid = apicadr[APIC_LID_REG]; 1232*0Sstevel@tonic-gate apic_cpus[0].aci_local_id = (uchar_t)(lid >> APIC_ID_BIT_OFFSET); 1233*0Sstevel@tonic-gate /* 1234*0Sstevel@tonic-gate * According to the PC+MP spec 1.1, the local ids 1235*0Sstevel@tonic-gate * for the default configuration has to be 0 or 1 1236*0Sstevel@tonic-gate */ 1237*0Sstevel@tonic-gate if (apic_cpus[0].aci_local_id == 1) 1238*0Sstevel@tonic-gate apic_cpus[1].aci_local_id = 0; 1239*0Sstevel@tonic-gate else if (apic_cpus[0].aci_local_id == 0) 1240*0Sstevel@tonic-gate apic_cpus[1].aci_local_id = 1; 1241*0Sstevel@tonic-gate else 1242*0Sstevel@tonic-gate goto apic_handle_defconf_fail; 1243*0Sstevel@tonic-gate 1244*0Sstevel@tonic-gate apic_io_id[0] = 2; 1245*0Sstevel@tonic-gate apic_io_max = 1; 1246*0Sstevel@tonic-gate if (apic_defconf >= 5) { 1247*0Sstevel@tonic-gate apic_cpus[0].aci_local_ver = APIC_INTEGRATED_VERS; 1248*0Sstevel@tonic-gate apic_cpus[1].aci_local_ver = APIC_INTEGRATED_VERS; 1249*0Sstevel@tonic-gate apic_io_ver[0] = APIC_INTEGRATED_VERS; 1250*0Sstevel@tonic-gate } else { 1251*0Sstevel@tonic-gate apic_cpus[0].aci_local_ver = 0; /* 82489 DX */ 1252*0Sstevel@tonic-gate apic_cpus[1].aci_local_ver = 0; 1253*0Sstevel@tonic-gate apic_io_ver[0] = 0; 1254*0Sstevel@tonic-gate } 1255*0Sstevel@tonic-gate if (apic_defconf == 2 || apic_defconf == 3 || apic_defconf == 6) 1256*0Sstevel@tonic-gate eisa_level_intr_mask = (inb(EISA_LEVEL_CNTL + 1) << 8) | 1257*0Sstevel@tonic-gate inb(EISA_LEVEL_CNTL) | ((uint_t)INT32_MAX + 1); 1258*0Sstevel@tonic-gate return (PSM_SUCCESS); 1259*0Sstevel@tonic-gate 1260*0Sstevel@tonic-gate apic_handle_defconf_fail: 1261*0Sstevel@tonic-gate if (apic_cpus) 1262*0Sstevel@tonic-gate kmem_free(apic_cpus, sizeof (*apic_cpus) * 2); 1263*0Sstevel@tonic-gate if (apicadr) 1264*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 1265*0Sstevel@tonic-gate if (apicioadr[0]) 1266*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)apicioadr[0], APIC_IO_MEMLEN); 1267*0Sstevel@tonic-gate return (PSM_FAILURE); 1268*0Sstevel@tonic-gate } 1269*0Sstevel@tonic-gate 1270*0Sstevel@tonic-gate /* Parse the entries in MP configuration table and collect info that we need */ 1271*0Sstevel@tonic-gate static int 1272*0Sstevel@tonic-gate apic_parse_mpct(caddr_t mpct, int bypass_cpus_and_ioapics) 1273*0Sstevel@tonic-gate { 1274*0Sstevel@tonic-gate struct apic_procent *procp; 1275*0Sstevel@tonic-gate struct apic_bus *busp; 1276*0Sstevel@tonic-gate struct apic_io_entry *ioapicp; 1277*0Sstevel@tonic-gate struct apic_io_intr *intrp; 1278*0Sstevel@tonic-gate volatile int32_t *ioapic; 1279*0Sstevel@tonic-gate uint_t lid; 1280*0Sstevel@tonic-gate int id; 1281*0Sstevel@tonic-gate uchar_t hid; 1282*0Sstevel@tonic-gate 1283*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1284*0Sstevel@tonic-gate procp = (struct apic_procent *)(mpct + sizeof (struct apic_mp_cnf_hdr)); 1285*0Sstevel@tonic-gate 1286*0Sstevel@tonic-gate /* No need to count cpu entries if we won't use them */ 1287*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics) { 1288*0Sstevel@tonic-gate 1289*0Sstevel@tonic-gate /* Find max # of CPUS and allocate structure accordingly */ 1290*0Sstevel@tonic-gate apic_nproc = 0; 1291*0Sstevel@tonic-gate while (procp->proc_entry == APIC_CPU_ENTRY) { 1292*0Sstevel@tonic-gate if (procp->proc_cpuflags & CPUFLAGS_EN) { 1293*0Sstevel@tonic-gate apic_nproc++; 1294*0Sstevel@tonic-gate } 1295*0Sstevel@tonic-gate procp++; 1296*0Sstevel@tonic-gate } 1297*0Sstevel@tonic-gate if (apic_nproc > NCPU) 1298*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: exceeded " 1299*0Sstevel@tonic-gate "maximum no. of CPUs (= %d)", NCPU); 1300*0Sstevel@tonic-gate if (!apic_nproc || !(apic_cpus = (apic_cpus_info_t *) 1301*0Sstevel@tonic-gate kmem_zalloc(sizeof (*apic_cpus)*apic_nproc, KM_NOSLEEP))) 1302*0Sstevel@tonic-gate return (PSM_FAILURE); 1303*0Sstevel@tonic-gate } 1304*0Sstevel@tonic-gate 1305*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1306*0Sstevel@tonic-gate procp = (struct apic_procent *)(mpct + sizeof (struct apic_mp_cnf_hdr)); 1307*0Sstevel@tonic-gate 1308*0Sstevel@tonic-gate /* 1309*0Sstevel@tonic-gate * start with index 1 as 0 needs to be filled in with Boot CPU, but 1310*0Sstevel@tonic-gate * if we're bypassing this information, it has already been filled 1311*0Sstevel@tonic-gate * in by acpi_probe(), so don't overwrite it. 1312*0Sstevel@tonic-gate */ 1313*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics) 1314*0Sstevel@tonic-gate apic_nproc = 1; 1315*0Sstevel@tonic-gate 1316*0Sstevel@tonic-gate while (procp->proc_entry == APIC_CPU_ENTRY) { 1317*0Sstevel@tonic-gate /* check whether the cpu exists or not */ 1318*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics && 1319*0Sstevel@tonic-gate procp->proc_cpuflags & CPUFLAGS_EN) { 1320*0Sstevel@tonic-gate if (procp->proc_cpuflags & CPUFLAGS_BP) { /* Boot CPU */ 1321*0Sstevel@tonic-gate lid = apicadr[APIC_LID_REG]; 1322*0Sstevel@tonic-gate apic_cpus[0].aci_local_id = procp->proc_apicid; 1323*0Sstevel@tonic-gate if (apic_cpus[0].aci_local_id != 1324*0Sstevel@tonic-gate (uchar_t)(lid >> APIC_ID_BIT_OFFSET)) { 1325*0Sstevel@tonic-gate return (PSM_FAILURE); 1326*0Sstevel@tonic-gate } 1327*0Sstevel@tonic-gate apic_cpus[0].aci_local_ver = 1328*0Sstevel@tonic-gate procp->proc_version; 1329*0Sstevel@tonic-gate } else { 1330*0Sstevel@tonic-gate 1331*0Sstevel@tonic-gate apic_cpus[apic_nproc].aci_local_id = 1332*0Sstevel@tonic-gate procp->proc_apicid; 1333*0Sstevel@tonic-gate apic_cpus[apic_nproc].aci_local_ver = 1334*0Sstevel@tonic-gate procp->proc_version; 1335*0Sstevel@tonic-gate apic_nproc++; 1336*0Sstevel@tonic-gate 1337*0Sstevel@tonic-gate } 1338*0Sstevel@tonic-gate } 1339*0Sstevel@tonic-gate procp++; 1340*0Sstevel@tonic-gate } 1341*0Sstevel@tonic-gate 1342*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics) { 1343*0Sstevel@tonic-gate /* convert the number of processors into a cpumask */ 1344*0Sstevel@tonic-gate apic_cpumask = (1 << apic_nproc) - 1; 1345*0Sstevel@tonic-gate } 1346*0Sstevel@tonic-gate 1347*0Sstevel@tonic-gate /* 1348*0Sstevel@tonic-gate * Save start of bus entries for later use. 1349*0Sstevel@tonic-gate * Get EISA level cntrl if EISA bus is present. 1350*0Sstevel@tonic-gate * Also get the CPI bus id for single CPI bus case 1351*0Sstevel@tonic-gate */ 1352*0Sstevel@tonic-gate apic_busp = busp = (struct apic_bus *)procp; 1353*0Sstevel@tonic-gate while (busp->bus_entry == APIC_BUS_ENTRY) { 1354*0Sstevel@tonic-gate lid = apic_find_bus_type((char *)&busp->bus_str1); 1355*0Sstevel@tonic-gate if (lid == BUS_EISA) { 1356*0Sstevel@tonic-gate eisa_level_intr_mask = (inb(EISA_LEVEL_CNTL + 1) << 8) | 1357*0Sstevel@tonic-gate inb(EISA_LEVEL_CNTL) | ((uint_t)INT32_MAX + 1); 1358*0Sstevel@tonic-gate } else if (lid == BUS_PCI) { 1359*0Sstevel@tonic-gate /* 1360*0Sstevel@tonic-gate * apic_single_pci_busid will be used only if 1361*0Sstevel@tonic-gate * apic_pic_bus_total is equal to 1 1362*0Sstevel@tonic-gate */ 1363*0Sstevel@tonic-gate apic_pci_bus_total++; 1364*0Sstevel@tonic-gate apic_single_pci_busid = busp->bus_id; 1365*0Sstevel@tonic-gate } 1366*0Sstevel@tonic-gate busp++; 1367*0Sstevel@tonic-gate } 1368*0Sstevel@tonic-gate 1369*0Sstevel@tonic-gate ioapicp = (struct apic_io_entry *)busp; 1370*0Sstevel@tonic-gate 1371*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics) 1372*0Sstevel@tonic-gate apic_io_max = 0; 1373*0Sstevel@tonic-gate do { 1374*0Sstevel@tonic-gate if (!bypass_cpus_and_ioapics && apic_io_max < MAX_IO_APIC) { 1375*0Sstevel@tonic-gate if (ioapicp->io_flags & IOAPIC_FLAGS_EN) { 1376*0Sstevel@tonic-gate apic_io_id[apic_io_max] = ioapicp->io_apicid; 1377*0Sstevel@tonic-gate apic_io_ver[apic_io_max] = ioapicp->io_version; 1378*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1379*0Sstevel@tonic-gate apicioadr[apic_io_max] = 1380*0Sstevel@tonic-gate (int32_t *)psm_map_phys( 1381*0Sstevel@tonic-gate (uint32_t)ioapicp->io_apic_addr, 1382*0Sstevel@tonic-gate APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1383*0Sstevel@tonic-gate 1384*0Sstevel@tonic-gate if (!apicioadr[apic_io_max]) 1385*0Sstevel@tonic-gate return (PSM_FAILURE); 1386*0Sstevel@tonic-gate 1387*0Sstevel@tonic-gate ioapic = apicioadr[apic_io_max]; 1388*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_ID_CMD; 1389*0Sstevel@tonic-gate id = ioapic[APIC_IO_DATA]; 1390*0Sstevel@tonic-gate hid = (uchar_t)(((uint_t)id) >> 24); 1391*0Sstevel@tonic-gate 1392*0Sstevel@tonic-gate if (hid != apic_io_id[apic_io_max]) { 1393*0Sstevel@tonic-gate if (apic_io_id[apic_io_max] == 0) 1394*0Sstevel@tonic-gate apic_io_id[apic_io_max] = hid; 1395*0Sstevel@tonic-gate else { 1396*0Sstevel@tonic-gate /* 1397*0Sstevel@tonic-gate * set ioapic id to whatever 1398*0Sstevel@tonic-gate * reported by MPS 1399*0Sstevel@tonic-gate * 1400*0Sstevel@tonic-gate * may not need to set index 1401*0Sstevel@tonic-gate * again ??? 1402*0Sstevel@tonic-gate * take it out and try 1403*0Sstevel@tonic-gate */ 1404*0Sstevel@tonic-gate 1405*0Sstevel@tonic-gate id = ((int32_t) 1406*0Sstevel@tonic-gate apic_io_id[apic_io_max]) << 1407*0Sstevel@tonic-gate 24; 1408*0Sstevel@tonic-gate 1409*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = 1410*0Sstevel@tonic-gate APIC_ID_CMD; 1411*0Sstevel@tonic-gate 1412*0Sstevel@tonic-gate ioapic[APIC_IO_DATA] = id; 1413*0Sstevel@tonic-gate 1414*0Sstevel@tonic-gate } 1415*0Sstevel@tonic-gate } 1416*0Sstevel@tonic-gate apic_io_max++; 1417*0Sstevel@tonic-gate } 1418*0Sstevel@tonic-gate } 1419*0Sstevel@tonic-gate ioapicp++; 1420*0Sstevel@tonic-gate } while (ioapicp->io_entry == APIC_IO_ENTRY); 1421*0Sstevel@tonic-gate 1422*0Sstevel@tonic-gate apic_io_intrp = (struct apic_io_intr *)ioapicp; 1423*0Sstevel@tonic-gate 1424*0Sstevel@tonic-gate intrp = apic_io_intrp; 1425*0Sstevel@tonic-gate while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 1426*0Sstevel@tonic-gate if ((intrp->intr_irq > APIC_MAX_ISA_IRQ) || 1427*0Sstevel@tonic-gate (apic_find_bus(intrp->intr_busid) == BUS_PCI)) { 1428*0Sstevel@tonic-gate apic_irq_translate = 1; 1429*0Sstevel@tonic-gate break; 1430*0Sstevel@tonic-gate } 1431*0Sstevel@tonic-gate intrp++; 1432*0Sstevel@tonic-gate } 1433*0Sstevel@tonic-gate 1434*0Sstevel@tonic-gate return (PSM_SUCCESS); 1435*0Sstevel@tonic-gate } 1436*0Sstevel@tonic-gate 1437*0Sstevel@tonic-gate static struct apic_mpfps_hdr * 1438*0Sstevel@tonic-gate apic_find_fps_sig(caddr_t cptr, int len) 1439*0Sstevel@tonic-gate { 1440*0Sstevel@tonic-gate int i; 1441*0Sstevel@tonic-gate 1442*0Sstevel@tonic-gate /* Look for the pattern "_MP_" */ 1443*0Sstevel@tonic-gate for (i = 0; i < len; i += 16) { 1444*0Sstevel@tonic-gate if ((*(cptr+i) == '_') && 1445*0Sstevel@tonic-gate (*(cptr+i+1) == 'M') && 1446*0Sstevel@tonic-gate (*(cptr+i+2) == 'P') && 1447*0Sstevel@tonic-gate (*(cptr+i+3) == '_')) 1448*0Sstevel@tonic-gate /*LINTED: pointer cast may result in improper alignment */ 1449*0Sstevel@tonic-gate return ((struct apic_mpfps_hdr *)(cptr + i)); 1450*0Sstevel@tonic-gate } 1451*0Sstevel@tonic-gate return (NULL); 1452*0Sstevel@tonic-gate } 1453*0Sstevel@tonic-gate 1454*0Sstevel@tonic-gate static int 1455*0Sstevel@tonic-gate apic_checksum(caddr_t bptr, int len) 1456*0Sstevel@tonic-gate { 1457*0Sstevel@tonic-gate int i; 1458*0Sstevel@tonic-gate uchar_t cksum; 1459*0Sstevel@tonic-gate 1460*0Sstevel@tonic-gate cksum = 0; 1461*0Sstevel@tonic-gate for (i = 0; i < len; i++) 1462*0Sstevel@tonic-gate cksum += *bptr++; 1463*0Sstevel@tonic-gate return ((int)cksum); 1464*0Sstevel@tonic-gate } 1465*0Sstevel@tonic-gate 1466*0Sstevel@tonic-gate 1467*0Sstevel@tonic-gate /* 1468*0Sstevel@tonic-gate * Initialise vector->ipl and ipl->pri arrays. level_intr and irqtable 1469*0Sstevel@tonic-gate * are also set to NULL. vector->irq is set to a value which cannot map 1470*0Sstevel@tonic-gate * to a real irq to show that it is free. 1471*0Sstevel@tonic-gate */ 1472*0Sstevel@tonic-gate void 1473*0Sstevel@tonic-gate apic_init() 1474*0Sstevel@tonic-gate { 1475*0Sstevel@tonic-gate int i; 1476*0Sstevel@tonic-gate int *iptr; 1477*0Sstevel@tonic-gate 1478*0Sstevel@tonic-gate int j = 1; 1479*0Sstevel@tonic-gate apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */ 1480*0Sstevel@tonic-gate for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) { 1481*0Sstevel@tonic-gate if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) && 1482*0Sstevel@tonic-gate (apic_vectortoipl[i + 1] == apic_vectortoipl[i])) 1483*0Sstevel@tonic-gate /* get to highest vector at the same ipl */ 1484*0Sstevel@tonic-gate continue; 1485*0Sstevel@tonic-gate for (; j <= apic_vectortoipl[i]; j++) { 1486*0Sstevel@tonic-gate apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + 1487*0Sstevel@tonic-gate APIC_BASE_VECT; 1488*0Sstevel@tonic-gate } 1489*0Sstevel@tonic-gate } 1490*0Sstevel@tonic-gate for (; j < MAXIPL + 1; j++) 1491*0Sstevel@tonic-gate /* fill up any empty ipltopri slots */ 1492*0Sstevel@tonic-gate apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT; 1493*0Sstevel@tonic-gate 1494*0Sstevel@tonic-gate /* cpu 0 is always up */ 1495*0Sstevel@tonic-gate apic_cpus[0].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE; 1496*0Sstevel@tonic-gate 1497*0Sstevel@tonic-gate iptr = (int *)&apic_irq_table[0]; 1498*0Sstevel@tonic-gate for (i = 0; i <= APIC_MAX_VECTOR; i++) { 1499*0Sstevel@tonic-gate apic_level_intr[i] = 0; 1500*0Sstevel@tonic-gate *iptr++ = NULL; 1501*0Sstevel@tonic-gate apic_vector_to_irq[i] = APIC_RESV_IRQ; 1502*0Sstevel@tonic-gate apic_reprogram_info[i].valid = 0; 1503*0Sstevel@tonic-gate apic_reprogram_info[i].bindcpu = 0; 1504*0Sstevel@tonic-gate apic_reprogram_info[i].timeouts = 0; 1505*0Sstevel@tonic-gate } 1506*0Sstevel@tonic-gate 1507*0Sstevel@tonic-gate /* 1508*0Sstevel@tonic-gate * Allocate a dummy irq table entry for the reserved entry. 1509*0Sstevel@tonic-gate * This takes care of the race between removing an irq and 1510*0Sstevel@tonic-gate * clock detecting a CPU in that irq during interrupt load 1511*0Sstevel@tonic-gate * sampling. 1512*0Sstevel@tonic-gate */ 1513*0Sstevel@tonic-gate apic_irq_table[APIC_RESV_IRQ] = 1514*0Sstevel@tonic-gate kmem_zalloc(sizeof (apic_irq_t), KM_NOSLEEP); 1515*0Sstevel@tonic-gate 1516*0Sstevel@tonic-gate mutex_init(&airq_mutex, NULL, MUTEX_DEFAULT, NULL); 1517*0Sstevel@tonic-gate mutex_init(&apic_reprogram_timeout_mutex, NULL, MUTEX_DEFAULT, NULL); 1518*0Sstevel@tonic-gate #if defined(__amd64) 1519*0Sstevel@tonic-gate /* 1520*0Sstevel@tonic-gate * Make cpu-specific interrupt info point to cr8pri vector 1521*0Sstevel@tonic-gate */ 1522*0Sstevel@tonic-gate for (i = 0; i <= MAXIPL; i++) 1523*0Sstevel@tonic-gate apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT; 1524*0Sstevel@tonic-gate CPU->cpu_pri_data = apic_cr8pri; 1525*0Sstevel@tonic-gate intpri_use_cr8 = 1; 1526*0Sstevel@tonic-gate #endif /* __amd64 */ 1527*0Sstevel@tonic-gate } 1528*0Sstevel@tonic-gate 1529*0Sstevel@tonic-gate /* 1530*0Sstevel@tonic-gate * handler for APIC Error interrupt. Just print a warning and continue 1531*0Sstevel@tonic-gate */ 1532*0Sstevel@tonic-gate static int 1533*0Sstevel@tonic-gate apic_error_intr() 1534*0Sstevel@tonic-gate { 1535*0Sstevel@tonic-gate uint_t error0, error1, error; 1536*0Sstevel@tonic-gate uint_t i; 1537*0Sstevel@tonic-gate 1538*0Sstevel@tonic-gate /* 1539*0Sstevel@tonic-gate * We need to write before read as per 7.4.17 of system prog manual. 1540*0Sstevel@tonic-gate * We do both and or the results to be safe 1541*0Sstevel@tonic-gate */ 1542*0Sstevel@tonic-gate error0 = apicadr[APIC_ERROR_STATUS]; 1543*0Sstevel@tonic-gate apicadr[APIC_ERROR_STATUS] = 0; 1544*0Sstevel@tonic-gate error1 = apicadr[APIC_ERROR_STATUS]; 1545*0Sstevel@tonic-gate error = error0 | error1; 1546*0Sstevel@tonic-gate 1547*0Sstevel@tonic-gate /* 1548*0Sstevel@tonic-gate * Prevent more than 1 CPU from handling error interrupt causing 1549*0Sstevel@tonic-gate * double printing (interleave of characters from multiple 1550*0Sstevel@tonic-gate * CPU's when using prom_printf) 1551*0Sstevel@tonic-gate */ 1552*0Sstevel@tonic-gate if (lock_try(&apic_error_lock) == 0) 1553*0Sstevel@tonic-gate return (error ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 1554*0Sstevel@tonic-gate if (error) { 1555*0Sstevel@tonic-gate #if DEBUG 1556*0Sstevel@tonic-gate if (apic_debug) 1557*0Sstevel@tonic-gate debug_enter("pcplusmp: APIC Error interrupt received"); 1558*0Sstevel@tonic-gate #endif /* DEBUG */ 1559*0Sstevel@tonic-gate if (apic_panic_on_apic_error) 1560*0Sstevel@tonic-gate cmn_err(CE_PANIC, 1561*0Sstevel@tonic-gate "APIC Error interrupt on CPU %d. Status = %x\n", 1562*0Sstevel@tonic-gate psm_get_cpu_id(), error); 1563*0Sstevel@tonic-gate else { 1564*0Sstevel@tonic-gate if ((error & ~APIC_CS_ERRORS) == 0) { 1565*0Sstevel@tonic-gate /* cksum error only */ 1566*0Sstevel@tonic-gate apic_error |= APIC_ERR_APIC_ERROR; 1567*0Sstevel@tonic-gate apic_apic_error |= error; 1568*0Sstevel@tonic-gate apic_num_apic_errors++; 1569*0Sstevel@tonic-gate apic_num_cksum_errors++; 1570*0Sstevel@tonic-gate } else { 1571*0Sstevel@tonic-gate /* 1572*0Sstevel@tonic-gate * prom_printf is the best shot we have of 1573*0Sstevel@tonic-gate * something which is problem free from 1574*0Sstevel@tonic-gate * high level/NMI type of interrupts 1575*0Sstevel@tonic-gate */ 1576*0Sstevel@tonic-gate prom_printf("APIC Error interrupt on CPU %d. " 1577*0Sstevel@tonic-gate "Status 0 = %x, Status 1 = %x\n", 1578*0Sstevel@tonic-gate psm_get_cpu_id(), error0, error1); 1579*0Sstevel@tonic-gate apic_error |= APIC_ERR_APIC_ERROR; 1580*0Sstevel@tonic-gate apic_apic_error |= error; 1581*0Sstevel@tonic-gate apic_num_apic_errors++; 1582*0Sstevel@tonic-gate for (i = 0; i < apic_error_display_delay; i++) { 1583*0Sstevel@tonic-gate tenmicrosec(); 1584*0Sstevel@tonic-gate } 1585*0Sstevel@tonic-gate /* 1586*0Sstevel@tonic-gate * provide more delay next time limited to 1587*0Sstevel@tonic-gate * roughly 1 clock tick time 1588*0Sstevel@tonic-gate */ 1589*0Sstevel@tonic-gate if (apic_error_display_delay < 500) 1590*0Sstevel@tonic-gate apic_error_display_delay *= 2; 1591*0Sstevel@tonic-gate } 1592*0Sstevel@tonic-gate } 1593*0Sstevel@tonic-gate lock_clear(&apic_error_lock); 1594*0Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 1595*0Sstevel@tonic-gate } else { 1596*0Sstevel@tonic-gate lock_clear(&apic_error_lock); 1597*0Sstevel@tonic-gate return (DDI_INTR_UNCLAIMED); 1598*0Sstevel@tonic-gate } 1599*0Sstevel@tonic-gate /* NOTREACHED */ 1600*0Sstevel@tonic-gate } 1601*0Sstevel@tonic-gate 1602*0Sstevel@tonic-gate /* 1603*0Sstevel@tonic-gate * Turn off the mask bit in the performance counter Local Vector Table entry. 1604*0Sstevel@tonic-gate */ 1605*0Sstevel@tonic-gate static void 1606*0Sstevel@tonic-gate apic_cpcovf_mask_clear(void) 1607*0Sstevel@tonic-gate { 1608*0Sstevel@tonic-gate apicadr[APIC_PCINT_VECT] &= ~APIC_LVT_MASK; 1609*0Sstevel@tonic-gate } 1610*0Sstevel@tonic-gate 1611*0Sstevel@tonic-gate static void 1612*0Sstevel@tonic-gate apic_init_intr() 1613*0Sstevel@tonic-gate { 1614*0Sstevel@tonic-gate processorid_t cpun = psm_get_cpu_id(); 1615*0Sstevel@tonic-gate 1616*0Sstevel@tonic-gate #if defined(__amd64) 1617*0Sstevel@tonic-gate setcr8((ulong_t)(APIC_MASK_ALL >> APIC_IPL_SHIFT)); 1618*0Sstevel@tonic-gate #else 1619*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = APIC_MASK_ALL; 1620*0Sstevel@tonic-gate #endif 1621*0Sstevel@tonic-gate 1622*0Sstevel@tonic-gate if (apic_flat_model) 1623*0Sstevel@tonic-gate apicadr[APIC_FORMAT_REG] = APIC_FLAT_MODEL; 1624*0Sstevel@tonic-gate else 1625*0Sstevel@tonic-gate apicadr[APIC_FORMAT_REG] = APIC_CLUSTER_MODEL; 1626*0Sstevel@tonic-gate apicadr[APIC_DEST_REG] = AV_HIGH_ORDER >> cpun; 1627*0Sstevel@tonic-gate 1628*0Sstevel@tonic-gate /* need to enable APIC before unmasking NMI */ 1629*0Sstevel@tonic-gate apicadr[APIC_SPUR_INT_REG] = AV_UNIT_ENABLE | APIC_SPUR_INTR; 1630*0Sstevel@tonic-gate 1631*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = AV_MASK; 1632*0Sstevel@tonic-gate apicadr[APIC_INT_VECT0] = AV_MASK; /* local intr reg 0 */ 1633*0Sstevel@tonic-gate apicadr[APIC_INT_VECT1] = AV_NMI; /* enable NMI */ 1634*0Sstevel@tonic-gate 1635*0Sstevel@tonic-gate if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) 1636*0Sstevel@tonic-gate return; 1637*0Sstevel@tonic-gate 1638*0Sstevel@tonic-gate /* Enable performance counter overflow interrupt */ 1639*0Sstevel@tonic-gate 1640*0Sstevel@tonic-gate if ((x86_feature & X86_MSR) != X86_MSR) 1641*0Sstevel@tonic-gate apic_enable_cpcovf_intr = 0; 1642*0Sstevel@tonic-gate if (apic_enable_cpcovf_intr) { 1643*0Sstevel@tonic-gate if (apic_cpcovf_vect == 0) { 1644*0Sstevel@tonic-gate int ipl = APIC_PCINT_IPL; 1645*0Sstevel@tonic-gate int irq = apic_get_ipivect(ipl, -1); 1646*0Sstevel@tonic-gate 1647*0Sstevel@tonic-gate ASSERT(irq != -1); 1648*0Sstevel@tonic-gate apic_cpcovf_vect = apic_irq_table[irq]->airq_vector; 1649*0Sstevel@tonic-gate ASSERT(apic_cpcovf_vect); 1650*0Sstevel@tonic-gate (void) add_avintr(NULL, ipl, 1651*0Sstevel@tonic-gate (avfunc)kcpc_hw_overflow_intr, 1652*0Sstevel@tonic-gate "apic pcint", irq, NULL, NULL, NULL); 1653*0Sstevel@tonic-gate kcpc_hw_overflow_intr_installed = 1; 1654*0Sstevel@tonic-gate kcpc_hw_enable_cpc_intr = apic_cpcovf_mask_clear; 1655*0Sstevel@tonic-gate } 1656*0Sstevel@tonic-gate apicadr[APIC_PCINT_VECT] = apic_cpcovf_vect; 1657*0Sstevel@tonic-gate } 1658*0Sstevel@tonic-gate 1659*0Sstevel@tonic-gate /* Enable error interrupt */ 1660*0Sstevel@tonic-gate 1661*0Sstevel@tonic-gate if (apic_enable_error_intr) { 1662*0Sstevel@tonic-gate if (apic_errvect == 0) { 1663*0Sstevel@tonic-gate int ipl = 0xf; /* get highest priority intr */ 1664*0Sstevel@tonic-gate int irq = apic_get_ipivect(ipl, -1); 1665*0Sstevel@tonic-gate 1666*0Sstevel@tonic-gate ASSERT(irq != -1); 1667*0Sstevel@tonic-gate apic_errvect = apic_irq_table[irq]->airq_vector; 1668*0Sstevel@tonic-gate ASSERT(apic_errvect); 1669*0Sstevel@tonic-gate /* 1670*0Sstevel@tonic-gate * Not PSMI compliant, but we are going to merge 1671*0Sstevel@tonic-gate * with ON anyway 1672*0Sstevel@tonic-gate */ 1673*0Sstevel@tonic-gate (void) add_avintr((void *)NULL, ipl, 1674*0Sstevel@tonic-gate (avfunc)apic_error_intr, "apic error intr", 1675*0Sstevel@tonic-gate irq, NULL, NULL, NULL); 1676*0Sstevel@tonic-gate } 1677*0Sstevel@tonic-gate apicadr[APIC_ERR_VECT] = apic_errvect; 1678*0Sstevel@tonic-gate apicadr[APIC_ERROR_STATUS] = 0; 1679*0Sstevel@tonic-gate apicadr[APIC_ERROR_STATUS] = 0; 1680*0Sstevel@tonic-gate } 1681*0Sstevel@tonic-gate } 1682*0Sstevel@tonic-gate 1683*0Sstevel@tonic-gate static void 1684*0Sstevel@tonic-gate apic_disable_local_apic() 1685*0Sstevel@tonic-gate { 1686*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = APIC_MASK_ALL; 1687*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = AV_MASK; 1688*0Sstevel@tonic-gate apicadr[APIC_INT_VECT0] = AV_MASK; /* local intr reg 0 */ 1689*0Sstevel@tonic-gate apicadr[APIC_INT_VECT1] = AV_MASK; /* disable NMI */ 1690*0Sstevel@tonic-gate apicadr[APIC_ERR_VECT] = AV_MASK; /* and error interrupt */ 1691*0Sstevel@tonic-gate apicadr[APIC_PCINT_VECT] = AV_MASK; /* and perf counter intr */ 1692*0Sstevel@tonic-gate apicadr[APIC_SPUR_INT_REG] = APIC_SPUR_INTR; 1693*0Sstevel@tonic-gate } 1694*0Sstevel@tonic-gate 1695*0Sstevel@tonic-gate static void 1696*0Sstevel@tonic-gate apic_picinit(void) 1697*0Sstevel@tonic-gate { 1698*0Sstevel@tonic-gate int i, j; 1699*0Sstevel@tonic-gate uint_t isr; 1700*0Sstevel@tonic-gate volatile int32_t *ioapic; 1701*0Sstevel@tonic-gate apic_irq_t *irqptr; 1702*0Sstevel@tonic-gate 1703*0Sstevel@tonic-gate /* 1704*0Sstevel@tonic-gate * On UniSys Model 6520, the BIOS leaves vector 0x20 isr 1705*0Sstevel@tonic-gate * bit on without clearing it with EOI. Since softint 1706*0Sstevel@tonic-gate * uses vector 0x20 to interrupt itself, so softint will 1707*0Sstevel@tonic-gate * not work on this machine. In order to fix this problem 1708*0Sstevel@tonic-gate * a check is made to verify all the isr bits are clear. 1709*0Sstevel@tonic-gate * If not, EOIs are issued to clear the bits. 1710*0Sstevel@tonic-gate */ 1711*0Sstevel@tonic-gate for (i = 7; i >= 1; i--) { 1712*0Sstevel@tonic-gate if ((isr = apicadr[APIC_ISR_REG + (i * 4)]) != 0) 1713*0Sstevel@tonic-gate for (j = 0; ((j < 32) && (isr != 0)); j++) 1714*0Sstevel@tonic-gate if (isr & (1 << j)) { 1715*0Sstevel@tonic-gate apicadr[APIC_EOI_REG] = 0; 1716*0Sstevel@tonic-gate isr &= ~(1 << j); 1717*0Sstevel@tonic-gate apic_error |= APIC_ERR_BOOT_EOI; 1718*0Sstevel@tonic-gate } 1719*0Sstevel@tonic-gate } 1720*0Sstevel@tonic-gate 1721*0Sstevel@tonic-gate /* set a flag so we know we have run apic_picinit() */ 1722*0Sstevel@tonic-gate apic_flag = 1; 1723*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_gethrtime_lock); 1724*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_ioapic_lock); 1725*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_revector_lock); 1726*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_ioapic_reprogram_lock); 1727*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_error_lock); 1728*0Sstevel@tonic-gate 1729*0Sstevel@tonic-gate picsetup(); /* initialise the 8259 */ 1730*0Sstevel@tonic-gate 1731*0Sstevel@tonic-gate /* add nmi handler - least priority nmi handler */ 1732*0Sstevel@tonic-gate LOCK_INIT_CLEAR(&apic_nmi_lock); 1733*0Sstevel@tonic-gate 1734*0Sstevel@tonic-gate if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr, 1735*0Sstevel@tonic-gate "pcplusmp NMI handler", (caddr_t)NULL)) 1736*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler"); 1737*0Sstevel@tonic-gate 1738*0Sstevel@tonic-gate apic_init_intr(); 1739*0Sstevel@tonic-gate 1740*0Sstevel@tonic-gate /* enable apic mode if imcr present */ 1741*0Sstevel@tonic-gate if (apic_imcrp) { 1742*0Sstevel@tonic-gate outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 1743*0Sstevel@tonic-gate outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC); 1744*0Sstevel@tonic-gate } 1745*0Sstevel@tonic-gate 1746*0Sstevel@tonic-gate /* mask interrupt vectors */ 1747*0Sstevel@tonic-gate for (j = 0; j < apic_io_max; j++) { 1748*0Sstevel@tonic-gate int intin_max; 1749*0Sstevel@tonic-gate ioapic = apicioadr[j]; 1750*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_VERS_CMD; 1751*0Sstevel@tonic-gate /* Bits 23-16 define the maximum redirection entries */ 1752*0Sstevel@tonic-gate intin_max = (ioapic[APIC_IO_DATA] >> 16) & 0xff; 1753*0Sstevel@tonic-gate for (i = 0; i < intin_max; i++) { 1754*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * i; 1755*0Sstevel@tonic-gate ioapic[APIC_IO_DATA] = AV_MASK; 1756*0Sstevel@tonic-gate } 1757*0Sstevel@tonic-gate } 1758*0Sstevel@tonic-gate 1759*0Sstevel@tonic-gate /* 1760*0Sstevel@tonic-gate * Hack alert: deal with ACPI SCI interrupt chicken/egg here 1761*0Sstevel@tonic-gate */ 1762*0Sstevel@tonic-gate if (apic_sci_vect >= 0) { 1763*0Sstevel@tonic-gate /* 1764*0Sstevel@tonic-gate * acpica has already done add_avintr(); we just 1765*0Sstevel@tonic-gate * to finish the job by mimicing translate_irq() 1766*0Sstevel@tonic-gate */ 1767*0Sstevel@tonic-gate if (apic_setup_sci_irq_table(apic_sci_vect, SCI_IPL, 1768*0Sstevel@tonic-gate &apic_sci_flags) < 0) { 1769*0Sstevel@tonic-gate cmn_err(CE_WARN, "!apic: SCI setup failed"); 1770*0Sstevel@tonic-gate return; 1771*0Sstevel@tonic-gate } 1772*0Sstevel@tonic-gate irqptr = apic_irq_table[apic_sci_vect]; 1773*0Sstevel@tonic-gate 1774*0Sstevel@tonic-gate /* Assert we're the sole entry in the list */ 1775*0Sstevel@tonic-gate ASSERT(irqptr != NULL); 1776*0Sstevel@tonic-gate ASSERT(irqptr->airq_next == NULL); 1777*0Sstevel@tonic-gate 1778*0Sstevel@tonic-gate /* Program I/O APIC */ 1779*0Sstevel@tonic-gate (void) apic_setup_io_intr(irqptr, apic_sci_vect); 1780*0Sstevel@tonic-gate } 1781*0Sstevel@tonic-gate } 1782*0Sstevel@tonic-gate 1783*0Sstevel@tonic-gate 1784*0Sstevel@tonic-gate static void 1785*0Sstevel@tonic-gate apic_cpu_start(processorid_t cpun, caddr_t rm_code) 1786*0Sstevel@tonic-gate { 1787*0Sstevel@tonic-gate int loop_count; 1788*0Sstevel@tonic-gate uint32_t vector; 1789*0Sstevel@tonic-gate uint_t cpu_id, iflag; 1790*0Sstevel@tonic-gate 1791*0Sstevel@tonic-gate cpu_id = apic_cpus[cpun].aci_local_id; 1792*0Sstevel@tonic-gate 1793*0Sstevel@tonic-gate apic_cmos_ssb_set = 1; 1794*0Sstevel@tonic-gate 1795*0Sstevel@tonic-gate /* 1796*0Sstevel@tonic-gate * Interrupts on BSP cpu will be disabled during these startup 1797*0Sstevel@tonic-gate * steps in order to avoid unwanted side effects from 1798*0Sstevel@tonic-gate * executing interrupt handlers on a problematic BIOS. 1799*0Sstevel@tonic-gate */ 1800*0Sstevel@tonic-gate 1801*0Sstevel@tonic-gate iflag = intr_clear(); 1802*0Sstevel@tonic-gate outb(CMOS_ADDR, SSB); 1803*0Sstevel@tonic-gate outb(CMOS_DATA, BIOS_SHUTDOWN); 1804*0Sstevel@tonic-gate 1805*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 1806*0Sstevel@tonic-gate apic_ret(); 1807*0Sstevel@tonic-gate 1808*0Sstevel@tonic-gate /* for integrated - make sure there is one INIT IPI in buffer */ 1809*0Sstevel@tonic-gate /* for external - it will wake up the cpu */ 1810*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1811*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = AV_ASSERT | AV_RESET; 1812*0Sstevel@tonic-gate 1813*0Sstevel@tonic-gate /* If only 1 CPU is installed, PENDING bit will not go low */ 1814*0Sstevel@tonic-gate for (loop_count = 0x1000; loop_count; loop_count--) 1815*0Sstevel@tonic-gate if (get_apic_cmd1() & AV_PENDING) 1816*0Sstevel@tonic-gate apic_ret(); 1817*0Sstevel@tonic-gate else 1818*0Sstevel@tonic-gate break; 1819*0Sstevel@tonic-gate 1820*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1821*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = AV_DEASSERT | AV_RESET; 1822*0Sstevel@tonic-gate 1823*0Sstevel@tonic-gate drv_usecwait(20000); /* 20 milli sec */ 1824*0Sstevel@tonic-gate 1825*0Sstevel@tonic-gate if (apic_cpus[cpun].aci_local_ver >= APIC_INTEGRATED_VERS) { 1826*0Sstevel@tonic-gate /* integrated apic */ 1827*0Sstevel@tonic-gate 1828*0Sstevel@tonic-gate rm_code = (caddr_t)(uintptr_t)rm_platter_pa; 1829*0Sstevel@tonic-gate vector = (rm_platter_pa >> MMU_PAGESHIFT) & 1830*0Sstevel@tonic-gate (APIC_VECTOR_MASK | APIC_IPL_MASK); 1831*0Sstevel@tonic-gate 1832*0Sstevel@tonic-gate /* to offset the INIT IPI queue up in the buffer */ 1833*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1834*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = vector | AV_STARTUP; 1835*0Sstevel@tonic-gate 1836*0Sstevel@tonic-gate drv_usecwait(200); /* 20 micro sec */ 1837*0Sstevel@tonic-gate 1838*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1839*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = vector | AV_STARTUP; 1840*0Sstevel@tonic-gate 1841*0Sstevel@tonic-gate drv_usecwait(200); /* 20 micro sec */ 1842*0Sstevel@tonic-gate } 1843*0Sstevel@tonic-gate intr_restore(iflag); 1844*0Sstevel@tonic-gate } 1845*0Sstevel@tonic-gate 1846*0Sstevel@tonic-gate 1847*0Sstevel@tonic-gate #ifdef DEBUG 1848*0Sstevel@tonic-gate int apic_break_on_cpu = 9; 1849*0Sstevel@tonic-gate int apic_stretch_interrupts = 0; 1850*0Sstevel@tonic-gate int apic_stretch_ISR = 1 << 3; /* IPL of 3 matches nothing now */ 1851*0Sstevel@tonic-gate 1852*0Sstevel@tonic-gate void 1853*0Sstevel@tonic-gate apic_break() 1854*0Sstevel@tonic-gate { 1855*0Sstevel@tonic-gate } 1856*0Sstevel@tonic-gate #endif /* DEBUG */ 1857*0Sstevel@tonic-gate 1858*0Sstevel@tonic-gate /* 1859*0Sstevel@tonic-gate * platform_intr_enter 1860*0Sstevel@tonic-gate * 1861*0Sstevel@tonic-gate * Called at the beginning of the interrupt service routine to 1862*0Sstevel@tonic-gate * mask all level equal to and below the interrupt priority 1863*0Sstevel@tonic-gate * of the interrupting vector. An EOI should be given to 1864*0Sstevel@tonic-gate * the interrupt controller to enable other HW interrupts. 1865*0Sstevel@tonic-gate * 1866*0Sstevel@tonic-gate * Return -1 for spurious interrupts 1867*0Sstevel@tonic-gate * 1868*0Sstevel@tonic-gate */ 1869*0Sstevel@tonic-gate /*ARGSUSED*/ 1870*0Sstevel@tonic-gate static int 1871*0Sstevel@tonic-gate apic_intr_enter(int ipl, int *vectorp) 1872*0Sstevel@tonic-gate { 1873*0Sstevel@tonic-gate uchar_t vector; 1874*0Sstevel@tonic-gate int nipl; 1875*0Sstevel@tonic-gate int irq, iflag; 1876*0Sstevel@tonic-gate apic_cpus_info_t *cpu_infop; 1877*0Sstevel@tonic-gate 1878*0Sstevel@tonic-gate /* 1879*0Sstevel@tonic-gate * The real vector programmed in APIC is *vectorp + 0x20 1880*0Sstevel@tonic-gate * But, cmnint code subtracts 0x20 before pushing it. 1881*0Sstevel@tonic-gate * Hence APIC_BASE_VECT is 0x20. 1882*0Sstevel@tonic-gate */ 1883*0Sstevel@tonic-gate 1884*0Sstevel@tonic-gate vector = (uchar_t)*vectorp; 1885*0Sstevel@tonic-gate 1886*0Sstevel@tonic-gate /* if interrupted by the clock, increment apic_nsec_since_boot */ 1887*0Sstevel@tonic-gate if (vector == apic_clkvect) { 1888*0Sstevel@tonic-gate if (!apic_oneshot) { 1889*0Sstevel@tonic-gate /* NOTE: this is not MT aware */ 1890*0Sstevel@tonic-gate apic_hrtime_stamp++; 1891*0Sstevel@tonic-gate apic_nsec_since_boot += apic_nsec_per_intr; 1892*0Sstevel@tonic-gate apic_hrtime_stamp++; 1893*0Sstevel@tonic-gate last_count_read = apic_hertz_count; 1894*0Sstevel@tonic-gate apic_redistribute_compute(); 1895*0Sstevel@tonic-gate } 1896*0Sstevel@tonic-gate 1897*0Sstevel@tonic-gate /* We will avoid all the book keeping overhead for clock */ 1898*0Sstevel@tonic-gate nipl = apic_vectortoipl[vector >> APIC_IPL_SHIFT]; 1899*0Sstevel@tonic-gate #if defined(__amd64) 1900*0Sstevel@tonic-gate setcr8((ulong_t)apic_cr8pri[nipl]); 1901*0Sstevel@tonic-gate #else 1902*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = apic_ipltopri[nipl]; 1903*0Sstevel@tonic-gate #endif 1904*0Sstevel@tonic-gate *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT]; 1905*0Sstevel@tonic-gate apicadr[APIC_EOI_REG] = 0; 1906*0Sstevel@tonic-gate return (nipl); 1907*0Sstevel@tonic-gate } 1908*0Sstevel@tonic-gate 1909*0Sstevel@tonic-gate cpu_infop = &apic_cpus[psm_get_cpu_id()]; 1910*0Sstevel@tonic-gate 1911*0Sstevel@tonic-gate if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) { 1912*0Sstevel@tonic-gate cpu_infop->aci_spur_cnt++; 1913*0Sstevel@tonic-gate return (APIC_INT_SPURIOUS); 1914*0Sstevel@tonic-gate } 1915*0Sstevel@tonic-gate 1916*0Sstevel@tonic-gate /* Check if the vector we got is really what we need */ 1917*0Sstevel@tonic-gate if (apic_revector_pending) { 1918*0Sstevel@tonic-gate /* 1919*0Sstevel@tonic-gate * Disable interrupts for the duration of 1920*0Sstevel@tonic-gate * the vector translation to prevent a self-race for 1921*0Sstevel@tonic-gate * the apic_revector_lock. This cannot be done 1922*0Sstevel@tonic-gate * in apic_xlate_vector because it is recursive and 1923*0Sstevel@tonic-gate * we want the vector translation to be atomic with 1924*0Sstevel@tonic-gate * respect to other (higher-priority) interrupts. 1925*0Sstevel@tonic-gate */ 1926*0Sstevel@tonic-gate iflag = intr_clear(); 1927*0Sstevel@tonic-gate vector = apic_xlate_vector(vector + APIC_BASE_VECT) - 1928*0Sstevel@tonic-gate APIC_BASE_VECT; 1929*0Sstevel@tonic-gate intr_restore(iflag); 1930*0Sstevel@tonic-gate } 1931*0Sstevel@tonic-gate 1932*0Sstevel@tonic-gate nipl = apic_vectortoipl[vector >> APIC_IPL_SHIFT]; 1933*0Sstevel@tonic-gate *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT]; 1934*0Sstevel@tonic-gate 1935*0Sstevel@tonic-gate #if defined(__amd64) 1936*0Sstevel@tonic-gate setcr8((ulong_t)apic_cr8pri[nipl]); 1937*0Sstevel@tonic-gate #else 1938*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = apic_ipltopri[nipl]; 1939*0Sstevel@tonic-gate #endif 1940*0Sstevel@tonic-gate 1941*0Sstevel@tonic-gate cpu_infop->aci_current[nipl] = (uchar_t)irq; 1942*0Sstevel@tonic-gate cpu_infop->aci_curipl = (uchar_t)nipl; 1943*0Sstevel@tonic-gate cpu_infop->aci_ISR_in_progress |= 1 << nipl; 1944*0Sstevel@tonic-gate 1945*0Sstevel@tonic-gate /* 1946*0Sstevel@tonic-gate * apic_level_intr could have been assimilated into the irq struct. 1947*0Sstevel@tonic-gate * but, having it as a character array is more efficient in terms of 1948*0Sstevel@tonic-gate * cache usage. So, we leave it as is. 1949*0Sstevel@tonic-gate */ 1950*0Sstevel@tonic-gate if (!apic_level_intr[irq]) 1951*0Sstevel@tonic-gate apicadr[APIC_EOI_REG] = 0; 1952*0Sstevel@tonic-gate 1953*0Sstevel@tonic-gate #ifdef DEBUG 1954*0Sstevel@tonic-gate APIC_DEBUG_BUF_PUT(vector); 1955*0Sstevel@tonic-gate APIC_DEBUG_BUF_PUT(irq); 1956*0Sstevel@tonic-gate APIC_DEBUG_BUF_PUT(nipl); 1957*0Sstevel@tonic-gate APIC_DEBUG_BUF_PUT(psm_get_cpu_id()); 1958*0Sstevel@tonic-gate if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl))) 1959*0Sstevel@tonic-gate drv_usecwait(apic_stretch_interrupts); 1960*0Sstevel@tonic-gate 1961*0Sstevel@tonic-gate if (apic_break_on_cpu == psm_get_cpu_id()) 1962*0Sstevel@tonic-gate apic_break(); 1963*0Sstevel@tonic-gate #endif /* DEBUG */ 1964*0Sstevel@tonic-gate return (nipl); 1965*0Sstevel@tonic-gate } 1966*0Sstevel@tonic-gate 1967*0Sstevel@tonic-gate static void 1968*0Sstevel@tonic-gate apic_intr_exit(int prev_ipl, int irq) 1969*0Sstevel@tonic-gate { 1970*0Sstevel@tonic-gate apic_cpus_info_t *cpu_infop; 1971*0Sstevel@tonic-gate 1972*0Sstevel@tonic-gate #if defined(__amd64) 1973*0Sstevel@tonic-gate setcr8((ulong_t)apic_cr8pri[prev_ipl]); 1974*0Sstevel@tonic-gate #else 1975*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl]; 1976*0Sstevel@tonic-gate #endif 1977*0Sstevel@tonic-gate 1978*0Sstevel@tonic-gate cpu_infop = &apic_cpus[psm_get_cpu_id()]; 1979*0Sstevel@tonic-gate if (apic_level_intr[irq]) 1980*0Sstevel@tonic-gate apicadr[APIC_EOI_REG] = 0; 1981*0Sstevel@tonic-gate 1982*0Sstevel@tonic-gate cpu_infop->aci_curipl = (uchar_t)prev_ipl; 1983*0Sstevel@tonic-gate /* ISR above current pri could not be in progress */ 1984*0Sstevel@tonic-gate cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; 1985*0Sstevel@tonic-gate } 1986*0Sstevel@tonic-gate 1987*0Sstevel@tonic-gate /* 1988*0Sstevel@tonic-gate * Mask all interrupts below or equal to the given IPL 1989*0Sstevel@tonic-gate */ 1990*0Sstevel@tonic-gate static void 1991*0Sstevel@tonic-gate apic_setspl(int ipl) 1992*0Sstevel@tonic-gate { 1993*0Sstevel@tonic-gate 1994*0Sstevel@tonic-gate #if defined(__amd64) 1995*0Sstevel@tonic-gate setcr8((ulong_t)apic_cr8pri[ipl]); 1996*0Sstevel@tonic-gate #else 1997*0Sstevel@tonic-gate apicadr[APIC_TASK_REG] = apic_ipltopri[ipl]; 1998*0Sstevel@tonic-gate #endif 1999*0Sstevel@tonic-gate 2000*0Sstevel@tonic-gate /* interrupts at ipl above this cannot be in progress */ 2001*0Sstevel@tonic-gate apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1; 2002*0Sstevel@tonic-gate /* 2003*0Sstevel@tonic-gate * this is a patch fix for the ALR QSMP P5 machine, so that interrupts 2004*0Sstevel@tonic-gate * have enough time to come in before the priority is raised again 2005*0Sstevel@tonic-gate * during the idle() loop. 2006*0Sstevel@tonic-gate */ 2007*0Sstevel@tonic-gate if (apic_setspl_delay) 2008*0Sstevel@tonic-gate (void) get_apic_pri(); 2009*0Sstevel@tonic-gate } 2010*0Sstevel@tonic-gate 2011*0Sstevel@tonic-gate /* 2012*0Sstevel@tonic-gate * trigger a software interrupt at the given IPL 2013*0Sstevel@tonic-gate */ 2014*0Sstevel@tonic-gate static void 2015*0Sstevel@tonic-gate apic_set_softintr(int ipl) 2016*0Sstevel@tonic-gate { 2017*0Sstevel@tonic-gate int vector; 2018*0Sstevel@tonic-gate uint_t flag; 2019*0Sstevel@tonic-gate 2020*0Sstevel@tonic-gate vector = apic_resv_vector[ipl]; 2021*0Sstevel@tonic-gate 2022*0Sstevel@tonic-gate flag = intr_clear(); 2023*0Sstevel@tonic-gate 2024*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 2025*0Sstevel@tonic-gate apic_ret(); 2026*0Sstevel@tonic-gate 2027*0Sstevel@tonic-gate /* generate interrupt at vector on itself only */ 2028*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = AV_SH_SELF | vector; 2029*0Sstevel@tonic-gate 2030*0Sstevel@tonic-gate intr_restore(flag); 2031*0Sstevel@tonic-gate } 2032*0Sstevel@tonic-gate 2033*0Sstevel@tonic-gate /* 2034*0Sstevel@tonic-gate * generates an interprocessor interrupt to another CPU 2035*0Sstevel@tonic-gate */ 2036*0Sstevel@tonic-gate static void 2037*0Sstevel@tonic-gate apic_send_ipi(int cpun, int ipl) 2038*0Sstevel@tonic-gate { 2039*0Sstevel@tonic-gate int vector; 2040*0Sstevel@tonic-gate uint_t flag; 2041*0Sstevel@tonic-gate 2042*0Sstevel@tonic-gate vector = apic_resv_vector[ipl]; 2043*0Sstevel@tonic-gate 2044*0Sstevel@tonic-gate flag = intr_clear(); 2045*0Sstevel@tonic-gate 2046*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 2047*0Sstevel@tonic-gate apic_ret(); 2048*0Sstevel@tonic-gate 2049*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = 2050*0Sstevel@tonic-gate apic_cpus[cpun].aci_local_id << APIC_ICR_ID_BIT_OFFSET; 2051*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = vector; 2052*0Sstevel@tonic-gate 2053*0Sstevel@tonic-gate intr_restore(flag); 2054*0Sstevel@tonic-gate } 2055*0Sstevel@tonic-gate 2056*0Sstevel@tonic-gate 2057*0Sstevel@tonic-gate /*ARGSUSED*/ 2058*0Sstevel@tonic-gate static void 2059*0Sstevel@tonic-gate apic_set_idlecpu(processorid_t cpun) 2060*0Sstevel@tonic-gate { 2061*0Sstevel@tonic-gate } 2062*0Sstevel@tonic-gate 2063*0Sstevel@tonic-gate /*ARGSUSED*/ 2064*0Sstevel@tonic-gate static void 2065*0Sstevel@tonic-gate apic_unset_idlecpu(processorid_t cpun) 2066*0Sstevel@tonic-gate { 2067*0Sstevel@tonic-gate } 2068*0Sstevel@tonic-gate 2069*0Sstevel@tonic-gate 2070*0Sstevel@tonic-gate static void 2071*0Sstevel@tonic-gate apic_ret() 2072*0Sstevel@tonic-gate { 2073*0Sstevel@tonic-gate } 2074*0Sstevel@tonic-gate 2075*0Sstevel@tonic-gate static int 2076*0Sstevel@tonic-gate get_apic_cmd1() 2077*0Sstevel@tonic-gate { 2078*0Sstevel@tonic-gate return (apicadr[APIC_INT_CMD1]); 2079*0Sstevel@tonic-gate } 2080*0Sstevel@tonic-gate 2081*0Sstevel@tonic-gate static int 2082*0Sstevel@tonic-gate get_apic_pri() 2083*0Sstevel@tonic-gate { 2084*0Sstevel@tonic-gate #if defined(__amd64) 2085*0Sstevel@tonic-gate return ((int)getcr8()); 2086*0Sstevel@tonic-gate #else 2087*0Sstevel@tonic-gate return (apicadr[APIC_TASK_REG]); 2088*0Sstevel@tonic-gate #endif 2089*0Sstevel@tonic-gate } 2090*0Sstevel@tonic-gate 2091*0Sstevel@tonic-gate /* 2092*0Sstevel@tonic-gate * If apic_coarse_time == 1, then apic_gettime() is used instead of 2093*0Sstevel@tonic-gate * apic_gethrtime(). This is used for performance instead of accuracy. 2094*0Sstevel@tonic-gate */ 2095*0Sstevel@tonic-gate 2096*0Sstevel@tonic-gate static hrtime_t 2097*0Sstevel@tonic-gate apic_gettime() 2098*0Sstevel@tonic-gate { 2099*0Sstevel@tonic-gate int old_hrtime_stamp; 2100*0Sstevel@tonic-gate hrtime_t temp; 2101*0Sstevel@tonic-gate 2102*0Sstevel@tonic-gate /* 2103*0Sstevel@tonic-gate * In one-shot mode, we do not keep time, so if anyone 2104*0Sstevel@tonic-gate * calls psm_gettime() directly, we vector over to 2105*0Sstevel@tonic-gate * gethrtime(). 2106*0Sstevel@tonic-gate * one-shot mode MUST NOT be enabled if this psm is the source of 2107*0Sstevel@tonic-gate * hrtime. 2108*0Sstevel@tonic-gate */ 2109*0Sstevel@tonic-gate 2110*0Sstevel@tonic-gate if (apic_oneshot) 2111*0Sstevel@tonic-gate return (gethrtime()); 2112*0Sstevel@tonic-gate 2113*0Sstevel@tonic-gate 2114*0Sstevel@tonic-gate gettime_again: 2115*0Sstevel@tonic-gate while ((old_hrtime_stamp = apic_hrtime_stamp) & 1) 2116*0Sstevel@tonic-gate apic_ret(); 2117*0Sstevel@tonic-gate 2118*0Sstevel@tonic-gate temp = apic_nsec_since_boot; 2119*0Sstevel@tonic-gate 2120*0Sstevel@tonic-gate if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */ 2121*0Sstevel@tonic-gate goto gettime_again; 2122*0Sstevel@tonic-gate } 2123*0Sstevel@tonic-gate return (temp); 2124*0Sstevel@tonic-gate } 2125*0Sstevel@tonic-gate 2126*0Sstevel@tonic-gate /* 2127*0Sstevel@tonic-gate * Here we return the number of nanoseconds since booting. Note every 2128*0Sstevel@tonic-gate * clock interrupt increments apic_nsec_since_boot by the appropriate 2129*0Sstevel@tonic-gate * amount. 2130*0Sstevel@tonic-gate */ 2131*0Sstevel@tonic-gate static hrtime_t 2132*0Sstevel@tonic-gate apic_gethrtime() 2133*0Sstevel@tonic-gate { 2134*0Sstevel@tonic-gate int curr_timeval, countval, elapsed_ticks, oflags; 2135*0Sstevel@tonic-gate int old_hrtime_stamp, status; 2136*0Sstevel@tonic-gate hrtime_t temp; 2137*0Sstevel@tonic-gate uchar_t cpun; 2138*0Sstevel@tonic-gate 2139*0Sstevel@tonic-gate 2140*0Sstevel@tonic-gate /* 2141*0Sstevel@tonic-gate * In one-shot mode, we do not keep time, so if anyone 2142*0Sstevel@tonic-gate * calls psm_gethrtime() directly, we vector over to 2143*0Sstevel@tonic-gate * gethrtime(). 2144*0Sstevel@tonic-gate * one-shot mode MUST NOT be enabled if this psm is the source of 2145*0Sstevel@tonic-gate * hrtime. 2146*0Sstevel@tonic-gate */ 2147*0Sstevel@tonic-gate 2148*0Sstevel@tonic-gate if (apic_oneshot) 2149*0Sstevel@tonic-gate return (gethrtime()); 2150*0Sstevel@tonic-gate 2151*0Sstevel@tonic-gate oflags = intr_clear(); /* prevent migration */ 2152*0Sstevel@tonic-gate 2153*0Sstevel@tonic-gate cpun = (uchar_t)((uint_t)apicadr[APIC_LID_REG] >> APIC_ID_BIT_OFFSET); 2154*0Sstevel@tonic-gate 2155*0Sstevel@tonic-gate lock_set(&apic_gethrtime_lock); 2156*0Sstevel@tonic-gate 2157*0Sstevel@tonic-gate gethrtime_again: 2158*0Sstevel@tonic-gate while ((old_hrtime_stamp = apic_hrtime_stamp) & 1) 2159*0Sstevel@tonic-gate apic_ret(); 2160*0Sstevel@tonic-gate 2161*0Sstevel@tonic-gate /* 2162*0Sstevel@tonic-gate * Check to see which CPU we are on. Note the time is kept on 2163*0Sstevel@tonic-gate * the local APIC of CPU 0. If on CPU 0, simply read the current 2164*0Sstevel@tonic-gate * counter. If on another CPU, issue a remote read command to CPU 0. 2165*0Sstevel@tonic-gate */ 2166*0Sstevel@tonic-gate if (cpun == apic_cpus[0].aci_local_id) { 2167*0Sstevel@tonic-gate countval = apicadr[APIC_CURR_COUNT]; 2168*0Sstevel@tonic-gate } else { 2169*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 2170*0Sstevel@tonic-gate apic_ret(); 2171*0Sstevel@tonic-gate 2172*0Sstevel@tonic-gate apicadr[APIC_INT_CMD2] = 2173*0Sstevel@tonic-gate apic_cpus[0].aci_local_id << APIC_ICR_ID_BIT_OFFSET; 2174*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = APIC_CURR_ADD|AV_REMOTE; 2175*0Sstevel@tonic-gate 2176*0Sstevel@tonic-gate while ((status = get_apic_cmd1()) & AV_READ_PENDING) 2177*0Sstevel@tonic-gate apic_ret(); 2178*0Sstevel@tonic-gate 2179*0Sstevel@tonic-gate if (status & AV_REMOTE_STATUS) /* 1 = valid */ 2180*0Sstevel@tonic-gate countval = apicadr[APIC_REMOTE_READ]; 2181*0Sstevel@tonic-gate else { /* 0 = invalid */ 2182*0Sstevel@tonic-gate apic_remote_hrterr++; 2183*0Sstevel@tonic-gate /* 2184*0Sstevel@tonic-gate * return last hrtime right now, will need more 2185*0Sstevel@tonic-gate * testing if change to retry 2186*0Sstevel@tonic-gate */ 2187*0Sstevel@tonic-gate temp = apic_last_hrtime; 2188*0Sstevel@tonic-gate 2189*0Sstevel@tonic-gate lock_clear(&apic_gethrtime_lock); 2190*0Sstevel@tonic-gate 2191*0Sstevel@tonic-gate intr_restore(oflags); 2192*0Sstevel@tonic-gate 2193*0Sstevel@tonic-gate return (temp); 2194*0Sstevel@tonic-gate } 2195*0Sstevel@tonic-gate } 2196*0Sstevel@tonic-gate if (countval > last_count_read) 2197*0Sstevel@tonic-gate countval = 0; 2198*0Sstevel@tonic-gate else 2199*0Sstevel@tonic-gate last_count_read = countval; 2200*0Sstevel@tonic-gate 2201*0Sstevel@tonic-gate elapsed_ticks = apic_hertz_count - countval; 2202*0Sstevel@tonic-gate 2203*0Sstevel@tonic-gate curr_timeval = elapsed_ticks * apic_nsec_per_tick; 2204*0Sstevel@tonic-gate temp = apic_nsec_since_boot + curr_timeval; 2205*0Sstevel@tonic-gate 2206*0Sstevel@tonic-gate if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */ 2207*0Sstevel@tonic-gate /* we might have clobbered last_count_read. Restore it */ 2208*0Sstevel@tonic-gate last_count_read = apic_hertz_count; 2209*0Sstevel@tonic-gate goto gethrtime_again; 2210*0Sstevel@tonic-gate } 2211*0Sstevel@tonic-gate 2212*0Sstevel@tonic-gate if (temp < apic_last_hrtime) { 2213*0Sstevel@tonic-gate /* return last hrtime if error occurs */ 2214*0Sstevel@tonic-gate apic_hrtime_error++; 2215*0Sstevel@tonic-gate temp = apic_last_hrtime; 2216*0Sstevel@tonic-gate } 2217*0Sstevel@tonic-gate else 2218*0Sstevel@tonic-gate apic_last_hrtime = temp; 2219*0Sstevel@tonic-gate 2220*0Sstevel@tonic-gate lock_clear(&apic_gethrtime_lock); 2221*0Sstevel@tonic-gate intr_restore(oflags); 2222*0Sstevel@tonic-gate 2223*0Sstevel@tonic-gate return (temp); 2224*0Sstevel@tonic-gate } 2225*0Sstevel@tonic-gate 2226*0Sstevel@tonic-gate /* apic NMI handler */ 2227*0Sstevel@tonic-gate /*ARGSUSED*/ 2228*0Sstevel@tonic-gate static void 2229*0Sstevel@tonic-gate apic_nmi_intr(caddr_t arg) 2230*0Sstevel@tonic-gate { 2231*0Sstevel@tonic-gate if (apic_shutdown_processors) { 2232*0Sstevel@tonic-gate apic_disable_local_apic(); 2233*0Sstevel@tonic-gate return; 2234*0Sstevel@tonic-gate } 2235*0Sstevel@tonic-gate 2236*0Sstevel@tonic-gate if (lock_try(&apic_nmi_lock)) { 2237*0Sstevel@tonic-gate if (apic_kmdb_on_nmi) { 2238*0Sstevel@tonic-gate if (psm_debugger() == 0) { 2239*0Sstevel@tonic-gate cmn_err(CE_PANIC, 2240*0Sstevel@tonic-gate "NMI detected, kmdb is not available."); 2241*0Sstevel@tonic-gate } else { 2242*0Sstevel@tonic-gate debug_enter("\nNMI detected, entering kmdb.\n"); 2243*0Sstevel@tonic-gate } 2244*0Sstevel@tonic-gate } else { 2245*0Sstevel@tonic-gate if (apic_panic_on_nmi) { 2246*0Sstevel@tonic-gate /* Keep panic from entering kmdb. */ 2247*0Sstevel@tonic-gate nopanicdebug = 1; 2248*0Sstevel@tonic-gate cmn_err(CE_PANIC, "pcplusmp: NMI received"); 2249*0Sstevel@tonic-gate } else { 2250*0Sstevel@tonic-gate /* 2251*0Sstevel@tonic-gate * prom_printf is the best shot we have 2252*0Sstevel@tonic-gate * of something which is problem free from 2253*0Sstevel@tonic-gate * high level/NMI type of interrupts 2254*0Sstevel@tonic-gate */ 2255*0Sstevel@tonic-gate prom_printf("pcplusmp: NMI received\n"); 2256*0Sstevel@tonic-gate apic_error |= APIC_ERR_NMI; 2257*0Sstevel@tonic-gate apic_num_nmis++; 2258*0Sstevel@tonic-gate } 2259*0Sstevel@tonic-gate } 2260*0Sstevel@tonic-gate lock_clear(&apic_nmi_lock); 2261*0Sstevel@tonic-gate } 2262*0Sstevel@tonic-gate } 2263*0Sstevel@tonic-gate 2264*0Sstevel@tonic-gate /* 2265*0Sstevel@tonic-gate * Add mask bits to disable interrupt vector from happening 2266*0Sstevel@tonic-gate * at or above IPL. In addition, it should remove mask bits 2267*0Sstevel@tonic-gate * to enable interrupt vectors below the given IPL. 2268*0Sstevel@tonic-gate * 2269*0Sstevel@tonic-gate * Both add and delspl are complicated by the fact that different interrupts 2270*0Sstevel@tonic-gate * may share IRQs. This can happen in two ways. 2271*0Sstevel@tonic-gate * 1. The same H/W line is shared by more than 1 device 2272*0Sstevel@tonic-gate * 1a. with interrupts at different IPLs 2273*0Sstevel@tonic-gate * 1b. with interrupts at same IPL 2274*0Sstevel@tonic-gate * 2. We ran out of vectors at a given IPL and started sharing vectors. 2275*0Sstevel@tonic-gate * 1b and 2 should be handled gracefully, except for the fact some ISRs 2276*0Sstevel@tonic-gate * will get called often when no interrupt is pending for the device. 2277*0Sstevel@tonic-gate * For 1a, we just hope that the machine blows up with the person who 2278*0Sstevel@tonic-gate * set it up that way!. In the meantime, we handle it at the higher IPL. 2279*0Sstevel@tonic-gate */ 2280*0Sstevel@tonic-gate /*ARGSUSED*/ 2281*0Sstevel@tonic-gate static int 2282*0Sstevel@tonic-gate apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl) 2283*0Sstevel@tonic-gate { 2284*0Sstevel@tonic-gate uchar_t vector; 2285*0Sstevel@tonic-gate int iflag; 2286*0Sstevel@tonic-gate apic_irq_t *irqptr, *irqheadptr; 2287*0Sstevel@tonic-gate int irqindex; 2288*0Sstevel@tonic-gate 2289*0Sstevel@tonic-gate ASSERT(max_ipl <= UCHAR_MAX); 2290*0Sstevel@tonic-gate irqindex = IRQINDEX(irqno); 2291*0Sstevel@tonic-gate 2292*0Sstevel@tonic-gate if ((irqindex == -1) || (!apic_irq_table[irqindex])) 2293*0Sstevel@tonic-gate return (PSM_FAILURE); 2294*0Sstevel@tonic-gate 2295*0Sstevel@tonic-gate irqptr = irqheadptr = apic_irq_table[irqindex]; 2296*0Sstevel@tonic-gate 2297*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_addspl: dip=0x%p type=%d irqno=0x%x " 2298*0Sstevel@tonic-gate "vector=0x%x\n", (void *)irqptr->airq_dip, 2299*0Sstevel@tonic-gate irqptr->airq_mps_intr_index, irqno, irqptr->airq_vector)); 2300*0Sstevel@tonic-gate 2301*0Sstevel@tonic-gate while (irqptr) { 2302*0Sstevel@tonic-gate if (VIRTIRQ(irqindex, irqptr->airq_share_id) == irqno) 2303*0Sstevel@tonic-gate break; 2304*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 2305*0Sstevel@tonic-gate } 2306*0Sstevel@tonic-gate irqptr->airq_share++; 2307*0Sstevel@tonic-gate 2308*0Sstevel@tonic-gate /* return if it is not hardware interrupt */ 2309*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index == RESERVE_INDEX) 2310*0Sstevel@tonic-gate return (PSM_SUCCESS); 2311*0Sstevel@tonic-gate 2312*0Sstevel@tonic-gate /* Or if there are more interupts at a higher IPL */ 2313*0Sstevel@tonic-gate if (ipl != max_ipl) 2314*0Sstevel@tonic-gate return (PSM_SUCCESS); 2315*0Sstevel@tonic-gate 2316*0Sstevel@tonic-gate /* 2317*0Sstevel@tonic-gate * if apic_picinit() has not been called yet, just return. 2318*0Sstevel@tonic-gate * At the end of apic_picinit(), we will call setup_io_intr(). 2319*0Sstevel@tonic-gate */ 2320*0Sstevel@tonic-gate 2321*0Sstevel@tonic-gate if (!apic_flag) 2322*0Sstevel@tonic-gate return (PSM_SUCCESS); 2323*0Sstevel@tonic-gate 2324*0Sstevel@tonic-gate iflag = intr_clear(); 2325*0Sstevel@tonic-gate 2326*0Sstevel@tonic-gate /* 2327*0Sstevel@tonic-gate * Upgrade vector if max_ipl is not earlier ipl. If we cannot allocate, 2328*0Sstevel@tonic-gate * return failure. Not very elegant, but then we hope the 2329*0Sstevel@tonic-gate * machine will blow up with ... 2330*0Sstevel@tonic-gate */ 2331*0Sstevel@tonic-gate if (irqptr->airq_ipl != max_ipl) { 2332*0Sstevel@tonic-gate vector = apic_allocate_vector(max_ipl, irqindex, 1); 2333*0Sstevel@tonic-gate if (vector == 0) { 2334*0Sstevel@tonic-gate intr_restore(iflag); 2335*0Sstevel@tonic-gate irqptr->airq_share--; 2336*0Sstevel@tonic-gate return (PSM_FAILURE); 2337*0Sstevel@tonic-gate } 2338*0Sstevel@tonic-gate irqptr = irqheadptr; 2339*0Sstevel@tonic-gate apic_mark_vector(irqptr->airq_vector, vector); 2340*0Sstevel@tonic-gate while (irqptr) { 2341*0Sstevel@tonic-gate irqptr->airq_vector = vector; 2342*0Sstevel@tonic-gate irqptr->airq_ipl = (uchar_t)max_ipl; 2343*0Sstevel@tonic-gate /* 2344*0Sstevel@tonic-gate * reprogram irq being added and every one else 2345*0Sstevel@tonic-gate * who is not in the UNINIT state 2346*0Sstevel@tonic-gate */ 2347*0Sstevel@tonic-gate if ((VIRTIRQ(irqindex, irqptr->airq_share_id) == 2348*0Sstevel@tonic-gate irqno) || (irqptr->airq_temp_cpu != IRQ_UNINIT)) { 2349*0Sstevel@tonic-gate apic_record_rdt_entry(irqptr, irqindex); 2350*0Sstevel@tonic-gate (void) apic_setup_io_intr(irqptr, irqindex); 2351*0Sstevel@tonic-gate } 2352*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 2353*0Sstevel@tonic-gate } 2354*0Sstevel@tonic-gate intr_restore(iflag); 2355*0Sstevel@tonic-gate return (PSM_SUCCESS); 2356*0Sstevel@tonic-gate } 2357*0Sstevel@tonic-gate 2358*0Sstevel@tonic-gate ASSERT(irqptr); 2359*0Sstevel@tonic-gate (void) apic_setup_io_intr(irqptr, irqindex); 2360*0Sstevel@tonic-gate intr_restore(iflag); 2361*0Sstevel@tonic-gate return (PSM_SUCCESS); 2362*0Sstevel@tonic-gate } 2363*0Sstevel@tonic-gate 2364*0Sstevel@tonic-gate /* 2365*0Sstevel@tonic-gate * Recompute mask bits for the given interrupt vector. 2366*0Sstevel@tonic-gate * If there is no interrupt servicing routine for this 2367*0Sstevel@tonic-gate * vector, this function should disable interrupt vector 2368*0Sstevel@tonic-gate * from happening at all IPLs. If there are still 2369*0Sstevel@tonic-gate * handlers using the given vector, this function should 2370*0Sstevel@tonic-gate * disable the given vector from happening below the lowest 2371*0Sstevel@tonic-gate * IPL of the remaining hadlers. 2372*0Sstevel@tonic-gate */ 2373*0Sstevel@tonic-gate /*ARGSUSED*/ 2374*0Sstevel@tonic-gate static int 2375*0Sstevel@tonic-gate apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl) 2376*0Sstevel@tonic-gate { 2377*0Sstevel@tonic-gate uchar_t vector, bind_cpu; 2378*0Sstevel@tonic-gate int iflag, intin, irqindex; 2379*0Sstevel@tonic-gate volatile int32_t *ioapic; 2380*0Sstevel@tonic-gate apic_irq_t *irqptr, *irqheadptr; 2381*0Sstevel@tonic-gate 2382*0Sstevel@tonic-gate irqindex = IRQINDEX(irqno); 2383*0Sstevel@tonic-gate irqptr = irqheadptr = apic_irq_table[irqindex]; 2384*0Sstevel@tonic-gate 2385*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_delspl: dip=0x%p type=%d irqno=0x%x " 2386*0Sstevel@tonic-gate "vector=0x%x\n", (void *)irqptr->airq_dip, 2387*0Sstevel@tonic-gate irqptr->airq_mps_intr_index, irqno, irqptr->airq_vector)); 2388*0Sstevel@tonic-gate 2389*0Sstevel@tonic-gate while (irqptr) { 2390*0Sstevel@tonic-gate if (VIRTIRQ(irqindex, irqptr->airq_share_id) == irqno) 2391*0Sstevel@tonic-gate break; 2392*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 2393*0Sstevel@tonic-gate } 2394*0Sstevel@tonic-gate ASSERT(irqptr); 2395*0Sstevel@tonic-gate 2396*0Sstevel@tonic-gate irqptr->airq_share--; 2397*0Sstevel@tonic-gate 2398*0Sstevel@tonic-gate if (ipl < max_ipl) 2399*0Sstevel@tonic-gate return (PSM_SUCCESS); 2400*0Sstevel@tonic-gate 2401*0Sstevel@tonic-gate /* return if it is not hardware interrupt */ 2402*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index == RESERVE_INDEX) 2403*0Sstevel@tonic-gate return (PSM_SUCCESS); 2404*0Sstevel@tonic-gate 2405*0Sstevel@tonic-gate if (!apic_flag) { 2406*0Sstevel@tonic-gate /* 2407*0Sstevel@tonic-gate * Clear irq_struct. If two devices shared an intpt 2408*0Sstevel@tonic-gate * line & 1 unloaded before picinit, we are hosed. But, then 2409*0Sstevel@tonic-gate * we hope the machine will ... 2410*0Sstevel@tonic-gate */ 2411*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = FREE_INDEX; 2412*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 2413*0Sstevel@tonic-gate apic_free_vector(irqptr->airq_vector); 2414*0Sstevel@tonic-gate return (PSM_SUCCESS); 2415*0Sstevel@tonic-gate } 2416*0Sstevel@tonic-gate /* 2417*0Sstevel@tonic-gate * Downgrade vector to new max_ipl if needed.If we cannot allocate, 2418*0Sstevel@tonic-gate * use old IPL. Not very elegant, but then we hope ... 2419*0Sstevel@tonic-gate */ 2420*0Sstevel@tonic-gate if ((irqptr->airq_ipl != max_ipl) && (max_ipl != PSM_INVALID_IPL)) { 2421*0Sstevel@tonic-gate apic_irq_t *irqp; 2422*0Sstevel@tonic-gate if (vector = apic_allocate_vector(max_ipl, irqno, 1)) { 2423*0Sstevel@tonic-gate apic_mark_vector(irqheadptr->airq_vector, vector); 2424*0Sstevel@tonic-gate irqp = irqheadptr; 2425*0Sstevel@tonic-gate while (irqp) { 2426*0Sstevel@tonic-gate irqp->airq_vector = vector; 2427*0Sstevel@tonic-gate irqp->airq_ipl = (uchar_t)max_ipl; 2428*0Sstevel@tonic-gate if (irqp->airq_temp_cpu != IRQ_UNINIT) { 2429*0Sstevel@tonic-gate apic_record_rdt_entry(irqp, irqindex); 2430*0Sstevel@tonic-gate (void) apic_setup_io_intr(irqp, 2431*0Sstevel@tonic-gate irqindex); 2432*0Sstevel@tonic-gate } 2433*0Sstevel@tonic-gate irqp = irqp->airq_next; 2434*0Sstevel@tonic-gate } 2435*0Sstevel@tonic-gate } 2436*0Sstevel@tonic-gate } 2437*0Sstevel@tonic-gate 2438*0Sstevel@tonic-gate if (irqptr->airq_share) 2439*0Sstevel@tonic-gate return (PSM_SUCCESS); 2440*0Sstevel@tonic-gate 2441*0Sstevel@tonic-gate ioapic = apicioadr[irqptr->airq_ioapicindex]; 2442*0Sstevel@tonic-gate intin = irqptr->airq_intin_no; 2443*0Sstevel@tonic-gate iflag = intr_clear(); 2444*0Sstevel@tonic-gate lock_set(&apic_ioapic_lock); 2445*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * intin; 2446*0Sstevel@tonic-gate ioapic[APIC_IO_DATA] = AV_MASK; 2447*0Sstevel@tonic-gate 2448*0Sstevel@tonic-gate /* Disable the MSI/X vector */ 2449*0Sstevel@tonic-gate if (APIC_IS_MSI_OR_MSIX_INDEX(irqptr->airq_mps_intr_index)) { 2450*0Sstevel@tonic-gate int type = (irqptr->airq_mps_intr_index == MSI_INDEX) ? 2451*0Sstevel@tonic-gate DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX; 2452*0Sstevel@tonic-gate 2453*0Sstevel@tonic-gate /* 2454*0Sstevel@tonic-gate * Make sure we only disable on the last 2455*0Sstevel@tonic-gate * of the multi-MSI support 2456*0Sstevel@tonic-gate */ 2457*0Sstevel@tonic-gate if (i_ddi_intr_get_current_nintrs(irqptr->airq_dip) == 1) { 2458*0Sstevel@tonic-gate (void) pci_msi_unconfigure(irqptr->airq_dip, type, 2459*0Sstevel@tonic-gate irqptr->airq_ioapicindex); 2460*0Sstevel@tonic-gate 2461*0Sstevel@tonic-gate (void) pci_msi_disable_mode(irqptr->airq_dip, type, 2462*0Sstevel@tonic-gate irqptr->airq_ioapicindex); 2463*0Sstevel@tonic-gate } 2464*0Sstevel@tonic-gate } 2465*0Sstevel@tonic-gate 2466*0Sstevel@tonic-gate if (max_ipl == PSM_INVALID_IPL) { 2467*0Sstevel@tonic-gate ASSERT(irqheadptr == irqptr); 2468*0Sstevel@tonic-gate bind_cpu = irqptr->airq_temp_cpu; 2469*0Sstevel@tonic-gate if (((uchar_t)bind_cpu != IRQ_UNBOUND) && 2470*0Sstevel@tonic-gate ((uchar_t)bind_cpu != IRQ_UNINIT)) { 2471*0Sstevel@tonic-gate ASSERT((bind_cpu & ~IRQ_USER_BOUND) < apic_nproc); 2472*0Sstevel@tonic-gate if (bind_cpu & IRQ_USER_BOUND) { 2473*0Sstevel@tonic-gate /* If hardbound, temp_cpu == cpu */ 2474*0Sstevel@tonic-gate bind_cpu &= ~IRQ_USER_BOUND; 2475*0Sstevel@tonic-gate apic_cpus[bind_cpu].aci_bound--; 2476*0Sstevel@tonic-gate } else 2477*0Sstevel@tonic-gate apic_cpus[bind_cpu].aci_temp_bound--; 2478*0Sstevel@tonic-gate } 2479*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 2480*0Sstevel@tonic-gate intr_restore(iflag); 2481*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 2482*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = FREE_INDEX; 2483*0Sstevel@tonic-gate apic_free_vector(irqptr->airq_vector); 2484*0Sstevel@tonic-gate return (PSM_SUCCESS); 2485*0Sstevel@tonic-gate } 2486*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 2487*0Sstevel@tonic-gate intr_restore(iflag); 2488*0Sstevel@tonic-gate 2489*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 2490*0Sstevel@tonic-gate if ((irqptr == apic_irq_table[irqindex])) { 2491*0Sstevel@tonic-gate apic_irq_t *oldirqptr; 2492*0Sstevel@tonic-gate /* Move valid irq entry to the head */ 2493*0Sstevel@tonic-gate irqheadptr = oldirqptr = irqptr; 2494*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 2495*0Sstevel@tonic-gate ASSERT(irqptr); 2496*0Sstevel@tonic-gate while (irqptr) { 2497*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index != FREE_INDEX) 2498*0Sstevel@tonic-gate break; 2499*0Sstevel@tonic-gate oldirqptr = irqptr; 2500*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 2501*0Sstevel@tonic-gate } 2502*0Sstevel@tonic-gate /* remove all invalid ones from the beginning */ 2503*0Sstevel@tonic-gate apic_irq_table[irqindex] = irqptr; 2504*0Sstevel@tonic-gate /* 2505*0Sstevel@tonic-gate * and link them back after the head. The invalid ones 2506*0Sstevel@tonic-gate * begin with irqheadptr and end at oldirqptr 2507*0Sstevel@tonic-gate */ 2508*0Sstevel@tonic-gate oldirqptr->airq_next = irqptr->airq_next; 2509*0Sstevel@tonic-gate irqptr->airq_next = irqheadptr; 2510*0Sstevel@tonic-gate } 2511*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 2512*0Sstevel@tonic-gate 2513*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 2514*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = FREE_INDEX; 2515*0Sstevel@tonic-gate return (PSM_SUCCESS); 2516*0Sstevel@tonic-gate } 2517*0Sstevel@tonic-gate 2518*0Sstevel@tonic-gate /* 2519*0Sstevel@tonic-gate * Return HW interrupt number corresponding to the given IPL 2520*0Sstevel@tonic-gate */ 2521*0Sstevel@tonic-gate /*ARGSUSED*/ 2522*0Sstevel@tonic-gate static int 2523*0Sstevel@tonic-gate apic_softlvl_to_irq(int ipl) 2524*0Sstevel@tonic-gate { 2525*0Sstevel@tonic-gate /* 2526*0Sstevel@tonic-gate * Do not use apic to trigger soft interrupt. 2527*0Sstevel@tonic-gate * It will cause the system to hang when 2 hardware interrupts 2528*0Sstevel@tonic-gate * at the same priority with the softint are already accepted 2529*0Sstevel@tonic-gate * by the apic. Cause the AV_PENDING bit will not be cleared 2530*0Sstevel@tonic-gate * until one of the hardware interrupt is eoi'ed. If we need 2531*0Sstevel@tonic-gate * to send an ipi at this time, we will end up looping forever 2532*0Sstevel@tonic-gate * to wait for the AV_PENDING bit to clear. 2533*0Sstevel@tonic-gate */ 2534*0Sstevel@tonic-gate return (PSM_SV_SOFTWARE); 2535*0Sstevel@tonic-gate } 2536*0Sstevel@tonic-gate 2537*0Sstevel@tonic-gate static int 2538*0Sstevel@tonic-gate apic_post_cpu_start() 2539*0Sstevel@tonic-gate { 2540*0Sstevel@tonic-gate int i, cpun; 2541*0Sstevel@tonic-gate apic_irq_t *irq_ptr; 2542*0Sstevel@tonic-gate 2543*0Sstevel@tonic-gate apic_init_intr(); 2544*0Sstevel@tonic-gate 2545*0Sstevel@tonic-gate /* 2546*0Sstevel@tonic-gate * since some systems don't enable the internal cache on the non-boot 2547*0Sstevel@tonic-gate * cpus, so we have to enable them here 2548*0Sstevel@tonic-gate */ 2549*0Sstevel@tonic-gate setcr0(getcr0() & ~(0x60000000)); 2550*0Sstevel@tonic-gate 2551*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 2552*0Sstevel@tonic-gate apic_ret(); 2553*0Sstevel@tonic-gate 2554*0Sstevel@tonic-gate cpun = psm_get_cpu_id(); 2555*0Sstevel@tonic-gate apic_cpus[cpun].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE; 2556*0Sstevel@tonic-gate 2557*0Sstevel@tonic-gate for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 2558*0Sstevel@tonic-gate irq_ptr = apic_irq_table[i]; 2559*0Sstevel@tonic-gate if ((irq_ptr == NULL) || 2560*0Sstevel@tonic-gate ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) != cpun)) 2561*0Sstevel@tonic-gate continue; 2562*0Sstevel@tonic-gate 2563*0Sstevel@tonic-gate while (irq_ptr) { 2564*0Sstevel@tonic-gate if (irq_ptr->airq_temp_cpu != IRQ_UNINIT) 2565*0Sstevel@tonic-gate (void) apic_rebind(irq_ptr, cpun, 1, IMMEDIATE); 2566*0Sstevel@tonic-gate irq_ptr = irq_ptr->airq_next; 2567*0Sstevel@tonic-gate } 2568*0Sstevel@tonic-gate } 2569*0Sstevel@tonic-gate 2570*0Sstevel@tonic-gate return (PSM_SUCCESS); 2571*0Sstevel@tonic-gate } 2572*0Sstevel@tonic-gate 2573*0Sstevel@tonic-gate processorid_t 2574*0Sstevel@tonic-gate apic_get_next_processorid(processorid_t cpu_id) 2575*0Sstevel@tonic-gate { 2576*0Sstevel@tonic-gate 2577*0Sstevel@tonic-gate int i; 2578*0Sstevel@tonic-gate 2579*0Sstevel@tonic-gate if (cpu_id == -1) 2580*0Sstevel@tonic-gate return ((processorid_t)0); 2581*0Sstevel@tonic-gate 2582*0Sstevel@tonic-gate for (i = cpu_id + 1; i < NCPU; i++) { 2583*0Sstevel@tonic-gate if (apic_cpumask & (1 << i)) 2584*0Sstevel@tonic-gate return (i); 2585*0Sstevel@tonic-gate } 2586*0Sstevel@tonic-gate 2587*0Sstevel@tonic-gate return ((processorid_t)-1); 2588*0Sstevel@tonic-gate } 2589*0Sstevel@tonic-gate 2590*0Sstevel@tonic-gate 2591*0Sstevel@tonic-gate /* 2592*0Sstevel@tonic-gate * type == -1 indicates it is an internal request. Do not change 2593*0Sstevel@tonic-gate * resv_vector for these requests 2594*0Sstevel@tonic-gate */ 2595*0Sstevel@tonic-gate static int 2596*0Sstevel@tonic-gate apic_get_ipivect(int ipl, int type) 2597*0Sstevel@tonic-gate { 2598*0Sstevel@tonic-gate uchar_t vector; 2599*0Sstevel@tonic-gate int irq; 2600*0Sstevel@tonic-gate 2601*0Sstevel@tonic-gate if (irq = apic_allocate_irq(APIC_VECTOR(ipl))) { 2602*0Sstevel@tonic-gate if (vector = apic_allocate_vector(ipl, irq, 1)) { 2603*0Sstevel@tonic-gate apic_irq_table[irq]->airq_mps_intr_index = 2604*0Sstevel@tonic-gate RESERVE_INDEX; 2605*0Sstevel@tonic-gate apic_irq_table[irq]->airq_vector = vector; 2606*0Sstevel@tonic-gate if (type != -1) { 2607*0Sstevel@tonic-gate apic_resv_vector[ipl] = vector; 2608*0Sstevel@tonic-gate } 2609*0Sstevel@tonic-gate return (irq); 2610*0Sstevel@tonic-gate } 2611*0Sstevel@tonic-gate } 2612*0Sstevel@tonic-gate apic_error |= APIC_ERR_GET_IPIVECT_FAIL; 2613*0Sstevel@tonic-gate return (-1); /* shouldn't happen */ 2614*0Sstevel@tonic-gate } 2615*0Sstevel@tonic-gate 2616*0Sstevel@tonic-gate static int 2617*0Sstevel@tonic-gate apic_getclkirq(int ipl) 2618*0Sstevel@tonic-gate { 2619*0Sstevel@tonic-gate int irq; 2620*0Sstevel@tonic-gate 2621*0Sstevel@tonic-gate if ((irq = apic_get_ipivect(ipl, -1)) == -1) 2622*0Sstevel@tonic-gate return (-1); 2623*0Sstevel@tonic-gate /* 2624*0Sstevel@tonic-gate * Note the vector in apic_clkvect for per clock handling. 2625*0Sstevel@tonic-gate */ 2626*0Sstevel@tonic-gate apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT; 2627*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n", 2628*0Sstevel@tonic-gate apic_clkvect)); 2629*0Sstevel@tonic-gate return (irq); 2630*0Sstevel@tonic-gate } 2631*0Sstevel@tonic-gate 2632*0Sstevel@tonic-gate /* 2633*0Sstevel@tonic-gate * Return the number of APIC clock ticks elapsed for 8245 to decrement 2634*0Sstevel@tonic-gate * (APIC_TIME_COUNT + pit_ticks_adj) ticks. 2635*0Sstevel@tonic-gate */ 2636*0Sstevel@tonic-gate static uint_t 2637*0Sstevel@tonic-gate apic_calibrate(volatile uint32_t *addr, uint16_t *pit_ticks_adj) 2638*0Sstevel@tonic-gate { 2639*0Sstevel@tonic-gate uint8_t pit_tick_lo; 2640*0Sstevel@tonic-gate uint16_t pit_tick, target_pit_tick; 2641*0Sstevel@tonic-gate uint32_t start_apic_tick, end_apic_tick; 2642*0Sstevel@tonic-gate int iflag; 2643*0Sstevel@tonic-gate 2644*0Sstevel@tonic-gate addr += APIC_CURR_COUNT; 2645*0Sstevel@tonic-gate 2646*0Sstevel@tonic-gate iflag = intr_clear(); 2647*0Sstevel@tonic-gate 2648*0Sstevel@tonic-gate do { 2649*0Sstevel@tonic-gate pit_tick_lo = inb(PITCTR0_PORT); 2650*0Sstevel@tonic-gate pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2651*0Sstevel@tonic-gate } while (pit_tick < APIC_TIME_MIN || 2652*0Sstevel@tonic-gate pit_tick_lo <= APIC_LB_MIN || pit_tick_lo >= APIC_LB_MAX); 2653*0Sstevel@tonic-gate 2654*0Sstevel@tonic-gate /* 2655*0Sstevel@tonic-gate * Wait for the 8254 to decrement by 5 ticks to ensure 2656*0Sstevel@tonic-gate * we didn't start in the middle of a tick. 2657*0Sstevel@tonic-gate * Compare with 0x10 for the wrap around case. 2658*0Sstevel@tonic-gate */ 2659*0Sstevel@tonic-gate target_pit_tick = pit_tick - 5; 2660*0Sstevel@tonic-gate do { 2661*0Sstevel@tonic-gate pit_tick_lo = inb(PITCTR0_PORT); 2662*0Sstevel@tonic-gate pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2663*0Sstevel@tonic-gate } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10); 2664*0Sstevel@tonic-gate 2665*0Sstevel@tonic-gate start_apic_tick = *addr; 2666*0Sstevel@tonic-gate 2667*0Sstevel@tonic-gate /* 2668*0Sstevel@tonic-gate * Wait for the 8254 to decrement by 2669*0Sstevel@tonic-gate * (APIC_TIME_COUNT + pit_ticks_adj) ticks 2670*0Sstevel@tonic-gate */ 2671*0Sstevel@tonic-gate target_pit_tick = pit_tick - APIC_TIME_COUNT; 2672*0Sstevel@tonic-gate do { 2673*0Sstevel@tonic-gate pit_tick_lo = inb(PITCTR0_PORT); 2674*0Sstevel@tonic-gate pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2675*0Sstevel@tonic-gate } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10); 2676*0Sstevel@tonic-gate 2677*0Sstevel@tonic-gate end_apic_tick = *addr; 2678*0Sstevel@tonic-gate 2679*0Sstevel@tonic-gate *pit_ticks_adj = target_pit_tick - pit_tick; 2680*0Sstevel@tonic-gate 2681*0Sstevel@tonic-gate intr_restore(iflag); 2682*0Sstevel@tonic-gate 2683*0Sstevel@tonic-gate return (start_apic_tick - end_apic_tick); 2684*0Sstevel@tonic-gate } 2685*0Sstevel@tonic-gate 2686*0Sstevel@tonic-gate /* 2687*0Sstevel@tonic-gate * Initialise the APIC timer on the local APIC of CPU 0 to the desired 2688*0Sstevel@tonic-gate * frequency. Note at this stage in the boot sequence, the boot processor 2689*0Sstevel@tonic-gate * is the only active processor. 2690*0Sstevel@tonic-gate * hertz value of 0 indicates a one-shot mode request. In this case 2691*0Sstevel@tonic-gate * the function returns the resolution (in nanoseconds) for the hardware 2692*0Sstevel@tonic-gate * timer interrupt. If one-shot mode capability is not available, 2693*0Sstevel@tonic-gate * the return value will be 0. apic_enable_oneshot is a global switch 2694*0Sstevel@tonic-gate * for disabling the functionality. 2695*0Sstevel@tonic-gate * A non-zero positive value for hertz indicates a periodic mode request. 2696*0Sstevel@tonic-gate * In this case the hardware will be programmed to generate clock interrupts 2697*0Sstevel@tonic-gate * at hertz frequency and returns the resolution of interrupts in 2698*0Sstevel@tonic-gate * nanosecond. 2699*0Sstevel@tonic-gate */ 2700*0Sstevel@tonic-gate 2701*0Sstevel@tonic-gate static int 2702*0Sstevel@tonic-gate apic_clkinit(int hertz) 2703*0Sstevel@tonic-gate { 2704*0Sstevel@tonic-gate 2705*0Sstevel@tonic-gate uint_t apic_ticks = 0; 2706*0Sstevel@tonic-gate uint_t pit_time; 2707*0Sstevel@tonic-gate int ret; 2708*0Sstevel@tonic-gate uint16_t pit_ticks_adj; 2709*0Sstevel@tonic-gate static int firsttime = 1; 2710*0Sstevel@tonic-gate 2711*0Sstevel@tonic-gate if (firsttime) { 2712*0Sstevel@tonic-gate /* first time calibrate */ 2713*0Sstevel@tonic-gate 2714*0Sstevel@tonic-gate apicadr[APIC_DIVIDE_REG] = 0x0; 2715*0Sstevel@tonic-gate apicadr[APIC_INIT_COUNT] = APIC_MAXVAL; 2716*0Sstevel@tonic-gate 2717*0Sstevel@tonic-gate /* set periodic interrupt based on CLKIN */ 2718*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = 2719*0Sstevel@tonic-gate (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 2720*0Sstevel@tonic-gate tenmicrosec(); 2721*0Sstevel@tonic-gate 2722*0Sstevel@tonic-gate apic_ticks = apic_calibrate(apicadr, &pit_ticks_adj); 2723*0Sstevel@tonic-gate 2724*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = 2725*0Sstevel@tonic-gate (apic_clkvect + APIC_BASE_VECT) | AV_MASK; 2726*0Sstevel@tonic-gate /* 2727*0Sstevel@tonic-gate * pit time is the amount of real time (in nanoseconds ) it took 2728*0Sstevel@tonic-gate * the 8254 to decrement (APIC_TIME_COUNT + pit_ticks_adj) ticks 2729*0Sstevel@tonic-gate */ 2730*0Sstevel@tonic-gate pit_time = ((longlong_t)(APIC_TIME_COUNT + 2731*0Sstevel@tonic-gate pit_ticks_adj) * NANOSEC) / PIT_HZ; 2732*0Sstevel@tonic-gate 2733*0Sstevel@tonic-gate /* 2734*0Sstevel@tonic-gate * Determine the number of nanoseconds per APIC clock tick 2735*0Sstevel@tonic-gate * and then determine how many APIC ticks to interrupt at the 2736*0Sstevel@tonic-gate * desired frequency 2737*0Sstevel@tonic-gate */ 2738*0Sstevel@tonic-gate apic_nsec_per_tick = pit_time / apic_ticks; 2739*0Sstevel@tonic-gate if (apic_nsec_per_tick == 0) 2740*0Sstevel@tonic-gate apic_nsec_per_tick = 1; 2741*0Sstevel@tonic-gate 2742*0Sstevel@tonic-gate /* the interval timer initial count is 32 bit max */ 2743*0Sstevel@tonic-gate apic_nsec_max = (hrtime_t)apic_nsec_per_tick * APIC_MAXVAL; 2744*0Sstevel@tonic-gate firsttime = 0; 2745*0Sstevel@tonic-gate } 2746*0Sstevel@tonic-gate 2747*0Sstevel@tonic-gate if (hertz != 0) { 2748*0Sstevel@tonic-gate /* periodic */ 2749*0Sstevel@tonic-gate apic_nsec_per_intr = NANOSEC / hertz; 2750*0Sstevel@tonic-gate apic_hertz_count = (longlong_t)apic_nsec_per_intr / 2751*0Sstevel@tonic-gate apic_nsec_per_tick; 2752*0Sstevel@tonic-gate apic_sample_factor_redistribution = hertz + 1; 2753*0Sstevel@tonic-gate } 2754*0Sstevel@tonic-gate 2755*0Sstevel@tonic-gate apic_int_busy_mark = (apic_int_busy_mark * 2756*0Sstevel@tonic-gate apic_sample_factor_redistribution) / 100; 2757*0Sstevel@tonic-gate apic_int_free_mark = (apic_int_free_mark * 2758*0Sstevel@tonic-gate apic_sample_factor_redistribution) / 100; 2759*0Sstevel@tonic-gate apic_diff_for_redistribution = (apic_diff_for_redistribution * 2760*0Sstevel@tonic-gate apic_sample_factor_redistribution) / 100; 2761*0Sstevel@tonic-gate 2762*0Sstevel@tonic-gate if (hertz == 0) { 2763*0Sstevel@tonic-gate /* requested one_shot */ 2764*0Sstevel@tonic-gate if (!apic_oneshot_enable) 2765*0Sstevel@tonic-gate return (0); 2766*0Sstevel@tonic-gate apic_oneshot = 1; 2767*0Sstevel@tonic-gate ret = (int)apic_nsec_per_tick; 2768*0Sstevel@tonic-gate } else { 2769*0Sstevel@tonic-gate /* program the local APIC to interrupt at the given frequency */ 2770*0Sstevel@tonic-gate apicadr[APIC_INIT_COUNT] = apic_hertz_count; 2771*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = 2772*0Sstevel@tonic-gate (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 2773*0Sstevel@tonic-gate apic_oneshot = 0; 2774*0Sstevel@tonic-gate ret = NANOSEC / hertz; 2775*0Sstevel@tonic-gate } 2776*0Sstevel@tonic-gate 2777*0Sstevel@tonic-gate return (ret); 2778*0Sstevel@tonic-gate 2779*0Sstevel@tonic-gate } 2780*0Sstevel@tonic-gate 2781*0Sstevel@tonic-gate /* 2782*0Sstevel@tonic-gate * apic_preshutdown: 2783*0Sstevel@tonic-gate * Called early in shutdown whilst we can still access filesystems to do 2784*0Sstevel@tonic-gate * things like loading modules which will be required to complete shutdown 2785*0Sstevel@tonic-gate * after filesystems are all unmounted. 2786*0Sstevel@tonic-gate */ 2787*0Sstevel@tonic-gate static void 2788*0Sstevel@tonic-gate apic_preshutdown(int cmd, int fcn) 2789*0Sstevel@tonic-gate { 2790*0Sstevel@tonic-gate APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n", 2791*0Sstevel@tonic-gate cmd, fcn, apic_poweroff_method, apic_enable_acpi)); 2792*0Sstevel@tonic-gate 2793*0Sstevel@tonic-gate if ((cmd != A_SHUTDOWN) || (fcn != AD_POWEROFF)) { 2794*0Sstevel@tonic-gate return; 2795*0Sstevel@tonic-gate } 2796*0Sstevel@tonic-gate } 2797*0Sstevel@tonic-gate 2798*0Sstevel@tonic-gate static void 2799*0Sstevel@tonic-gate apic_shutdown(int cmd, int fcn) 2800*0Sstevel@tonic-gate { 2801*0Sstevel@tonic-gate int iflag, restarts, attempts; 2802*0Sstevel@tonic-gate int i, j; 2803*0Sstevel@tonic-gate volatile int32_t *ioapic; 2804*0Sstevel@tonic-gate uchar_t byte; 2805*0Sstevel@tonic-gate 2806*0Sstevel@tonic-gate /* Send NMI to all CPUs except self to do per processor shutdown */ 2807*0Sstevel@tonic-gate iflag = intr_clear(); 2808*0Sstevel@tonic-gate while (get_apic_cmd1() & AV_PENDING) 2809*0Sstevel@tonic-gate apic_ret(); 2810*0Sstevel@tonic-gate apic_shutdown_processors = 1; 2811*0Sstevel@tonic-gate apicadr[APIC_INT_CMD1] = AV_NMI | AV_LEVEL | AV_SH_ALL_EXCSELF; 2812*0Sstevel@tonic-gate 2813*0Sstevel@tonic-gate /* restore cmos shutdown byte before reboot */ 2814*0Sstevel@tonic-gate if (apic_cmos_ssb_set) { 2815*0Sstevel@tonic-gate outb(CMOS_ADDR, SSB); 2816*0Sstevel@tonic-gate outb(CMOS_DATA, 0); 2817*0Sstevel@tonic-gate } 2818*0Sstevel@tonic-gate /* Disable the I/O APIC redirection entries */ 2819*0Sstevel@tonic-gate for (j = 0; j < apic_io_max; j++) { 2820*0Sstevel@tonic-gate int intin_max; 2821*0Sstevel@tonic-gate ioapic = apicioadr[j]; 2822*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_VERS_CMD; 2823*0Sstevel@tonic-gate /* Bits 23-16 define the maximum redirection entries */ 2824*0Sstevel@tonic-gate intin_max = (ioapic[APIC_IO_DATA] >> 16) & 0xff; 2825*0Sstevel@tonic-gate for (i = 0; i < intin_max; i++) { 2826*0Sstevel@tonic-gate ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * i; 2827*0Sstevel@tonic-gate ioapic[APIC_IO_DATA] = AV_MASK; 2828*0Sstevel@tonic-gate } 2829*0Sstevel@tonic-gate } 2830*0Sstevel@tonic-gate 2831*0Sstevel@tonic-gate /* disable apic mode if imcr present */ 2832*0Sstevel@tonic-gate if (apic_imcrp) { 2833*0Sstevel@tonic-gate outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 2834*0Sstevel@tonic-gate outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_PIC); 2835*0Sstevel@tonic-gate } 2836*0Sstevel@tonic-gate 2837*0Sstevel@tonic-gate apic_disable_local_apic(); 2838*0Sstevel@tonic-gate 2839*0Sstevel@tonic-gate intr_restore(iflag); 2840*0Sstevel@tonic-gate 2841*0Sstevel@tonic-gate if ((cmd != A_SHUTDOWN) || (fcn != AD_POWEROFF)) { 2842*0Sstevel@tonic-gate return; 2843*0Sstevel@tonic-gate } 2844*0Sstevel@tonic-gate 2845*0Sstevel@tonic-gate switch (apic_poweroff_method) { 2846*0Sstevel@tonic-gate case APIC_POWEROFF_VIA_RTC: 2847*0Sstevel@tonic-gate 2848*0Sstevel@tonic-gate /* select the extended NVRAM bank in the RTC */ 2849*0Sstevel@tonic-gate outb(CMOS_ADDR, RTC_REGA); 2850*0Sstevel@tonic-gate byte = inb(CMOS_DATA); 2851*0Sstevel@tonic-gate outb(CMOS_DATA, (byte | EXT_BANK)); 2852*0Sstevel@tonic-gate 2853*0Sstevel@tonic-gate outb(CMOS_ADDR, PFR_REG); 2854*0Sstevel@tonic-gate 2855*0Sstevel@tonic-gate /* for Predator must toggle the PAB bit */ 2856*0Sstevel@tonic-gate byte = inb(CMOS_DATA); 2857*0Sstevel@tonic-gate 2858*0Sstevel@tonic-gate /* 2859*0Sstevel@tonic-gate * clear power active bar, wakeup alarm and 2860*0Sstevel@tonic-gate * kickstart 2861*0Sstevel@tonic-gate */ 2862*0Sstevel@tonic-gate byte &= ~(PAB_CBIT | WF_FLAG | KS_FLAG); 2863*0Sstevel@tonic-gate outb(CMOS_DATA, byte); 2864*0Sstevel@tonic-gate 2865*0Sstevel@tonic-gate /* delay before next write */ 2866*0Sstevel@tonic-gate drv_usecwait(1000); 2867*0Sstevel@tonic-gate 2868*0Sstevel@tonic-gate /* for S40 the following would suffice */ 2869*0Sstevel@tonic-gate byte = inb(CMOS_DATA); 2870*0Sstevel@tonic-gate 2871*0Sstevel@tonic-gate /* power active bar control bit */ 2872*0Sstevel@tonic-gate byte |= PAB_CBIT; 2873*0Sstevel@tonic-gate outb(CMOS_DATA, byte); 2874*0Sstevel@tonic-gate 2875*0Sstevel@tonic-gate break; 2876*0Sstevel@tonic-gate 2877*0Sstevel@tonic-gate case APIC_POWEROFF_VIA_ASPEN_BMC: 2878*0Sstevel@tonic-gate restarts = 0; 2879*0Sstevel@tonic-gate restart_aspen_bmc: 2880*0Sstevel@tonic-gate if (++restarts == 3) 2881*0Sstevel@tonic-gate break; 2882*0Sstevel@tonic-gate attempts = 0; 2883*0Sstevel@tonic-gate do { 2884*0Sstevel@tonic-gate byte = inb(MISMIC_FLAG_REGISTER); 2885*0Sstevel@tonic-gate byte &= MISMIC_BUSY_MASK; 2886*0Sstevel@tonic-gate if (byte != 0) { 2887*0Sstevel@tonic-gate drv_usecwait(1000); 2888*0Sstevel@tonic-gate if (attempts >= 3) 2889*0Sstevel@tonic-gate goto restart_aspen_bmc; 2890*0Sstevel@tonic-gate ++attempts; 2891*0Sstevel@tonic-gate } 2892*0Sstevel@tonic-gate } while (byte != 0); 2893*0Sstevel@tonic-gate outb(MISMIC_CNTL_REGISTER, CC_SMS_GET_STATUS); 2894*0Sstevel@tonic-gate byte = inb(MISMIC_FLAG_REGISTER); 2895*0Sstevel@tonic-gate byte |= 0x1; 2896*0Sstevel@tonic-gate outb(MISMIC_FLAG_REGISTER, byte); 2897*0Sstevel@tonic-gate i = 0; 2898*0Sstevel@tonic-gate for (; i < (sizeof (aspen_bmc)/sizeof (aspen_bmc[0])); 2899*0Sstevel@tonic-gate i++) { 2900*0Sstevel@tonic-gate attempts = 0; 2901*0Sstevel@tonic-gate do { 2902*0Sstevel@tonic-gate byte = inb(MISMIC_FLAG_REGISTER); 2903*0Sstevel@tonic-gate byte &= MISMIC_BUSY_MASK; 2904*0Sstevel@tonic-gate if (byte != 0) { 2905*0Sstevel@tonic-gate drv_usecwait(1000); 2906*0Sstevel@tonic-gate if (attempts >= 3) 2907*0Sstevel@tonic-gate goto restart_aspen_bmc; 2908*0Sstevel@tonic-gate ++attempts; 2909*0Sstevel@tonic-gate } 2910*0Sstevel@tonic-gate } while (byte != 0); 2911*0Sstevel@tonic-gate outb(MISMIC_CNTL_REGISTER, aspen_bmc[i].cntl); 2912*0Sstevel@tonic-gate outb(MISMIC_DATA_REGISTER, aspen_bmc[i].data); 2913*0Sstevel@tonic-gate byte = inb(MISMIC_FLAG_REGISTER); 2914*0Sstevel@tonic-gate byte |= 0x1; 2915*0Sstevel@tonic-gate outb(MISMIC_FLAG_REGISTER, byte); 2916*0Sstevel@tonic-gate } 2917*0Sstevel@tonic-gate break; 2918*0Sstevel@tonic-gate 2919*0Sstevel@tonic-gate case APIC_POWEROFF_VIA_SITKA_BMC: 2920*0Sstevel@tonic-gate restarts = 0; 2921*0Sstevel@tonic-gate restart_sitka_bmc: 2922*0Sstevel@tonic-gate if (++restarts == 3) 2923*0Sstevel@tonic-gate break; 2924*0Sstevel@tonic-gate attempts = 0; 2925*0Sstevel@tonic-gate do { 2926*0Sstevel@tonic-gate byte = inb(SMS_STATUS_REGISTER); 2927*0Sstevel@tonic-gate byte &= SMS_STATE_MASK; 2928*0Sstevel@tonic-gate if ((byte == SMS_READ_STATE) || 2929*0Sstevel@tonic-gate (byte == SMS_WRITE_STATE)) { 2930*0Sstevel@tonic-gate drv_usecwait(1000); 2931*0Sstevel@tonic-gate if (attempts >= 3) 2932*0Sstevel@tonic-gate goto restart_sitka_bmc; 2933*0Sstevel@tonic-gate ++attempts; 2934*0Sstevel@tonic-gate } 2935*0Sstevel@tonic-gate } while ((byte == SMS_READ_STATE) || 2936*0Sstevel@tonic-gate (byte == SMS_WRITE_STATE)); 2937*0Sstevel@tonic-gate outb(SMS_COMMAND_REGISTER, SMS_GET_STATUS); 2938*0Sstevel@tonic-gate i = 0; 2939*0Sstevel@tonic-gate for (; i < (sizeof (sitka_bmc)/sizeof (sitka_bmc[0])); 2940*0Sstevel@tonic-gate i++) { 2941*0Sstevel@tonic-gate attempts = 0; 2942*0Sstevel@tonic-gate do { 2943*0Sstevel@tonic-gate byte = inb(SMS_STATUS_REGISTER); 2944*0Sstevel@tonic-gate byte &= SMS_IBF_MASK; 2945*0Sstevel@tonic-gate if (byte != 0) { 2946*0Sstevel@tonic-gate drv_usecwait(1000); 2947*0Sstevel@tonic-gate if (attempts >= 3) 2948*0Sstevel@tonic-gate goto restart_sitka_bmc; 2949*0Sstevel@tonic-gate ++attempts; 2950*0Sstevel@tonic-gate } 2951*0Sstevel@tonic-gate } while (byte != 0); 2952*0Sstevel@tonic-gate outb(sitka_bmc[i].port, sitka_bmc[i].data); 2953*0Sstevel@tonic-gate } 2954*0Sstevel@tonic-gate break; 2955*0Sstevel@tonic-gate 2956*0Sstevel@tonic-gate case APIC_POWEROFF_NONE: 2957*0Sstevel@tonic-gate 2958*0Sstevel@tonic-gate /* If no APIC direct method, we will try using ACPI */ 2959*0Sstevel@tonic-gate if (apic_enable_acpi) { 2960*0Sstevel@tonic-gate if (acpi_poweroff() == 1) 2961*0Sstevel@tonic-gate return; 2962*0Sstevel@tonic-gate } else 2963*0Sstevel@tonic-gate return; 2964*0Sstevel@tonic-gate 2965*0Sstevel@tonic-gate break; 2966*0Sstevel@tonic-gate } 2967*0Sstevel@tonic-gate /* 2968*0Sstevel@tonic-gate * Wait a limited time here for power to go off. 2969*0Sstevel@tonic-gate * If the power does not go off, then there was a 2970*0Sstevel@tonic-gate * problem and we should continue to the halt which 2971*0Sstevel@tonic-gate * prints a message for the user to press a key to 2972*0Sstevel@tonic-gate * reboot. 2973*0Sstevel@tonic-gate */ 2974*0Sstevel@tonic-gate drv_usecwait(7000000); /* wait seven seconds */ 2975*0Sstevel@tonic-gate 2976*0Sstevel@tonic-gate } 2977*0Sstevel@tonic-gate 2978*0Sstevel@tonic-gate /* 2979*0Sstevel@tonic-gate * Try and disable all interrupts. We just assign interrupts to other 2980*0Sstevel@tonic-gate * processors based on policy. If any were bound by user request, we 2981*0Sstevel@tonic-gate * let them continue and return failure. We do not bother to check 2982*0Sstevel@tonic-gate * for cache affinity while rebinding. 2983*0Sstevel@tonic-gate */ 2984*0Sstevel@tonic-gate 2985*0Sstevel@tonic-gate static int 2986*0Sstevel@tonic-gate apic_disable_intr(processorid_t cpun) 2987*0Sstevel@tonic-gate { 2988*0Sstevel@tonic-gate int bind_cpu = 0, i, hardbound = 0, iflag; 2989*0Sstevel@tonic-gate apic_irq_t *irq_ptr; 2990*0Sstevel@tonic-gate 2991*0Sstevel@tonic-gate if (cpun == 0) 2992*0Sstevel@tonic-gate return (PSM_FAILURE); 2993*0Sstevel@tonic-gate 2994*0Sstevel@tonic-gate iflag = intr_clear(); 2995*0Sstevel@tonic-gate lock_set(&apic_ioapic_lock); 2996*0Sstevel@tonic-gate apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE; 2997*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 2998*0Sstevel@tonic-gate intr_restore(iflag); 2999*0Sstevel@tonic-gate apic_cpus[cpun].aci_curipl = 0; 3000*0Sstevel@tonic-gate i = apic_min_device_irq; 3001*0Sstevel@tonic-gate for (; i <= apic_max_device_irq; i++) { 3002*0Sstevel@tonic-gate /* 3003*0Sstevel@tonic-gate * If there are bound interrupts on this cpu, then 3004*0Sstevel@tonic-gate * rebind them to other processors. 3005*0Sstevel@tonic-gate */ 3006*0Sstevel@tonic-gate if ((irq_ptr = apic_irq_table[i]) != NULL) { 3007*0Sstevel@tonic-gate ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) || 3008*0Sstevel@tonic-gate (irq_ptr->airq_temp_cpu == IRQ_UNINIT) || 3009*0Sstevel@tonic-gate ((irq_ptr->airq_temp_cpu & ~IRQ_USER_BOUND) < 3010*0Sstevel@tonic-gate apic_nproc)); 3011*0Sstevel@tonic-gate 3012*0Sstevel@tonic-gate if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) { 3013*0Sstevel@tonic-gate hardbound = 1; 3014*0Sstevel@tonic-gate continue; 3015*0Sstevel@tonic-gate } 3016*0Sstevel@tonic-gate 3017*0Sstevel@tonic-gate if (irq_ptr->airq_temp_cpu == cpun) { 3018*0Sstevel@tonic-gate do { 3019*0Sstevel@tonic-gate apic_next_bind_cpu += 2; 3020*0Sstevel@tonic-gate bind_cpu = apic_next_bind_cpu / 2; 3021*0Sstevel@tonic-gate if (bind_cpu >= apic_nproc) { 3022*0Sstevel@tonic-gate apic_next_bind_cpu = 1; 3023*0Sstevel@tonic-gate bind_cpu = 0; 3024*0Sstevel@tonic-gate 3025*0Sstevel@tonic-gate } 3026*0Sstevel@tonic-gate } while (apic_rebind_all(irq_ptr, bind_cpu, 1)); 3027*0Sstevel@tonic-gate } 3028*0Sstevel@tonic-gate } 3029*0Sstevel@tonic-gate } 3030*0Sstevel@tonic-gate if (hardbound) { 3031*0Sstevel@tonic-gate cmn_err(CE_WARN, "Could not disable interrupts on %d" 3032*0Sstevel@tonic-gate "due to user bound interrupts", cpun); 3033*0Sstevel@tonic-gate return (PSM_FAILURE); 3034*0Sstevel@tonic-gate } 3035*0Sstevel@tonic-gate else 3036*0Sstevel@tonic-gate return (PSM_SUCCESS); 3037*0Sstevel@tonic-gate } 3038*0Sstevel@tonic-gate 3039*0Sstevel@tonic-gate static void 3040*0Sstevel@tonic-gate apic_enable_intr(processorid_t cpun) 3041*0Sstevel@tonic-gate { 3042*0Sstevel@tonic-gate int i, iflag; 3043*0Sstevel@tonic-gate apic_irq_t *irq_ptr; 3044*0Sstevel@tonic-gate 3045*0Sstevel@tonic-gate iflag = intr_clear(); 3046*0Sstevel@tonic-gate lock_set(&apic_ioapic_lock); 3047*0Sstevel@tonic-gate apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE; 3048*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 3049*0Sstevel@tonic-gate intr_restore(iflag); 3050*0Sstevel@tonic-gate 3051*0Sstevel@tonic-gate i = apic_min_device_irq; 3052*0Sstevel@tonic-gate for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 3053*0Sstevel@tonic-gate if ((irq_ptr = apic_irq_table[i]) != NULL) { 3054*0Sstevel@tonic-gate if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) { 3055*0Sstevel@tonic-gate (void) apic_rebind_all(irq_ptr, 3056*0Sstevel@tonic-gate irq_ptr->airq_cpu, 1); 3057*0Sstevel@tonic-gate } 3058*0Sstevel@tonic-gate } 3059*0Sstevel@tonic-gate } 3060*0Sstevel@tonic-gate } 3061*0Sstevel@tonic-gate 3062*0Sstevel@tonic-gate /* 3063*0Sstevel@tonic-gate * apic_introp_xlate() replaces apic_translate_irq() and is 3064*0Sstevel@tonic-gate * called only from apic_intr_ops(). With the new ADII framework, 3065*0Sstevel@tonic-gate * the priority can no longer be retrived through i_ddi_get_intrspec(). 3066*0Sstevel@tonic-gate * It has to be passed in from the caller. 3067*0Sstevel@tonic-gate */ 3068*0Sstevel@tonic-gate int 3069*0Sstevel@tonic-gate apic_introp_xlate(dev_info_t *dip, struct intrspec *ispec, int type) 3070*0Sstevel@tonic-gate { 3071*0Sstevel@tonic-gate char dev_type[16]; 3072*0Sstevel@tonic-gate int dev_len, pci_irq, newirq, bustype, devid, busid, i; 3073*0Sstevel@tonic-gate int irqno = ispec->intrspec_vec; 3074*0Sstevel@tonic-gate ddi_acc_handle_t cfg_handle; 3075*0Sstevel@tonic-gate uchar_t ipin; 3076*0Sstevel@tonic-gate struct apic_io_intr *intrp; 3077*0Sstevel@tonic-gate iflag_t intr_flag; 3078*0Sstevel@tonic-gate APIC_HEADER *hp; 3079*0Sstevel@tonic-gate MADT_INTERRUPT_OVERRIDE *isop; 3080*0Sstevel@tonic-gate apic_irq_t *airqp; 3081*0Sstevel@tonic-gate 3082*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_introp_xlate: dip=0x%p name=%s " 3083*0Sstevel@tonic-gate "type=%d irqno=0x%x\n", (void *)dip, ddi_get_name(dip), type, 3084*0Sstevel@tonic-gate irqno)); 3085*0Sstevel@tonic-gate 3086*0Sstevel@tonic-gate if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 3087*0Sstevel@tonic-gate if ((airqp = apic_find_irq(dip, ispec, type)) != NULL) 3088*0Sstevel@tonic-gate return (apic_vector_to_irq[airqp->airq_vector]); 3089*0Sstevel@tonic-gate return (apic_setup_irq_table(dip, irqno, NULL, ispec, 3090*0Sstevel@tonic-gate NULL, type)); 3091*0Sstevel@tonic-gate } 3092*0Sstevel@tonic-gate 3093*0Sstevel@tonic-gate bustype = 0; 3094*0Sstevel@tonic-gate 3095*0Sstevel@tonic-gate /* check if we have already translated this irq */ 3096*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 3097*0Sstevel@tonic-gate newirq = apic_min_device_irq; 3098*0Sstevel@tonic-gate for (; newirq <= apic_max_device_irq; newirq++) { 3099*0Sstevel@tonic-gate airqp = apic_irq_table[newirq]; 3100*0Sstevel@tonic-gate while (airqp) { 3101*0Sstevel@tonic-gate if ((airqp->airq_dip == dip) && 3102*0Sstevel@tonic-gate (airqp->airq_origirq == irqno) && 3103*0Sstevel@tonic-gate (airqp->airq_mps_intr_index != FREE_INDEX)) { 3104*0Sstevel@tonic-gate 3105*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3106*0Sstevel@tonic-gate return (VIRTIRQ(newirq, airqp->airq_share_id)); 3107*0Sstevel@tonic-gate } 3108*0Sstevel@tonic-gate airqp = airqp->airq_next; 3109*0Sstevel@tonic-gate } 3110*0Sstevel@tonic-gate } 3111*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3112*0Sstevel@tonic-gate 3113*0Sstevel@tonic-gate if (apic_defconf) 3114*0Sstevel@tonic-gate goto defconf; 3115*0Sstevel@tonic-gate 3116*0Sstevel@tonic-gate if ((dip == NULL) || (!apic_irq_translate && !apic_enable_acpi)) 3117*0Sstevel@tonic-gate goto nonpci; 3118*0Sstevel@tonic-gate 3119*0Sstevel@tonic-gate dev_len = sizeof (dev_type); 3120*0Sstevel@tonic-gate if (ddi_getlongprop_buf(DDI_DEV_T_NONE, ddi_get_parent(dip), 3121*0Sstevel@tonic-gate DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type, 3122*0Sstevel@tonic-gate &dev_len) != DDI_PROP_SUCCESS) { 3123*0Sstevel@tonic-gate goto nonpci; 3124*0Sstevel@tonic-gate } 3125*0Sstevel@tonic-gate 3126*0Sstevel@tonic-gate if (strcmp(dev_type, "pci") == 0) { 3127*0Sstevel@tonic-gate /* pci device */ 3128*0Sstevel@tonic-gate if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0) 3129*0Sstevel@tonic-gate goto nonpci; 3130*0Sstevel@tonic-gate if (busid == 0 && apic_pci_bus_total == 1) 3131*0Sstevel@tonic-gate busid = (int)apic_single_pci_busid; 3132*0Sstevel@tonic-gate 3133*0Sstevel@tonic-gate if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS) 3134*0Sstevel@tonic-gate goto nonpci; 3135*0Sstevel@tonic-gate ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA; 3136*0Sstevel@tonic-gate pci_config_teardown(&cfg_handle); 3137*0Sstevel@tonic-gate if (apic_enable_acpi && !apic_use_acpi_madt_only) { 3138*0Sstevel@tonic-gate if (apic_acpi_translate_pci_irq(dip, busid, devid, 3139*0Sstevel@tonic-gate ipin, &pci_irq, &intr_flag) != ACPI_PSM_SUCCESS) 3140*0Sstevel@tonic-gate goto nonpci; 3141*0Sstevel@tonic-gate 3142*0Sstevel@tonic-gate intr_flag.bustype = BUS_PCI; 3143*0Sstevel@tonic-gate if ((newirq = apic_setup_irq_table(dip, pci_irq, NULL, 3144*0Sstevel@tonic-gate ispec, &intr_flag, type)) == -1) 3145*0Sstevel@tonic-gate goto nonpci; 3146*0Sstevel@tonic-gate return (newirq); 3147*0Sstevel@tonic-gate } else { 3148*0Sstevel@tonic-gate pci_irq = ((devid & 0x1f) << 2) | (ipin & 0x3); 3149*0Sstevel@tonic-gate if ((intrp = apic_find_io_intr_w_busid(pci_irq, busid)) 3150*0Sstevel@tonic-gate == NULL) { 3151*0Sstevel@tonic-gate if ((pci_irq = apic_handle_pci_pci_bridge(dip, 3152*0Sstevel@tonic-gate devid, ipin, &intrp)) == -1) 3153*0Sstevel@tonic-gate goto nonpci; 3154*0Sstevel@tonic-gate } 3155*0Sstevel@tonic-gate if ((newirq = apic_setup_irq_table(dip, pci_irq, intrp, 3156*0Sstevel@tonic-gate ispec, NULL, type)) == -1) 3157*0Sstevel@tonic-gate goto nonpci; 3158*0Sstevel@tonic-gate return (newirq); 3159*0Sstevel@tonic-gate } 3160*0Sstevel@tonic-gate } else if (strcmp(dev_type, "isa") == 0) 3161*0Sstevel@tonic-gate bustype = BUS_ISA; 3162*0Sstevel@tonic-gate else if (strcmp(dev_type, "eisa") == 0) 3163*0Sstevel@tonic-gate bustype = BUS_EISA; 3164*0Sstevel@tonic-gate 3165*0Sstevel@tonic-gate nonpci: 3166*0Sstevel@tonic-gate if (apic_enable_acpi && !apic_use_acpi_madt_only) { 3167*0Sstevel@tonic-gate /* search iso entries first */ 3168*0Sstevel@tonic-gate if (acpi_iso_cnt != 0) { 3169*0Sstevel@tonic-gate hp = (APIC_HEADER *)acpi_isop; 3170*0Sstevel@tonic-gate i = 0; 3171*0Sstevel@tonic-gate while (i < acpi_iso_cnt) { 3172*0Sstevel@tonic-gate if (hp->Type == APIC_XRUPT_OVERRIDE) { 3173*0Sstevel@tonic-gate isop = (MADT_INTERRUPT_OVERRIDE *)hp; 3174*0Sstevel@tonic-gate if (isop->Bus == 0 && 3175*0Sstevel@tonic-gate isop->Source == irqno) { 3176*0Sstevel@tonic-gate newirq = isop->Interrupt; 3177*0Sstevel@tonic-gate intr_flag.intr_po = 3178*0Sstevel@tonic-gate isop->Polarity; 3179*0Sstevel@tonic-gate intr_flag.intr_el = 3180*0Sstevel@tonic-gate isop->TriggerMode; 3181*0Sstevel@tonic-gate intr_flag.bustype = BUS_ISA; 3182*0Sstevel@tonic-gate 3183*0Sstevel@tonic-gate return (apic_setup_irq_table( 3184*0Sstevel@tonic-gate dip, newirq, NULL, ispec, 3185*0Sstevel@tonic-gate &intr_flag, type)); 3186*0Sstevel@tonic-gate 3187*0Sstevel@tonic-gate } 3188*0Sstevel@tonic-gate i++; 3189*0Sstevel@tonic-gate } 3190*0Sstevel@tonic-gate hp = (APIC_HEADER *)(((char *)hp) + 3191*0Sstevel@tonic-gate hp->Length); 3192*0Sstevel@tonic-gate } 3193*0Sstevel@tonic-gate } 3194*0Sstevel@tonic-gate intr_flag.intr_po = INTR_PO_ACTIVE_HIGH; 3195*0Sstevel@tonic-gate intr_flag.intr_el = INTR_EL_EDGE; 3196*0Sstevel@tonic-gate intr_flag.bustype = BUS_ISA; 3197*0Sstevel@tonic-gate return (apic_setup_irq_table(dip, irqno, NULL, ispec, 3198*0Sstevel@tonic-gate &intr_flag, type)); 3199*0Sstevel@tonic-gate } else { 3200*0Sstevel@tonic-gate if (bustype == 0) 3201*0Sstevel@tonic-gate bustype = eisa_level_intr_mask ? BUS_EISA : BUS_ISA; 3202*0Sstevel@tonic-gate for (i = 0; i < 2; i++) { 3203*0Sstevel@tonic-gate if (((busid = apic_find_bus_id(bustype)) != -1) && 3204*0Sstevel@tonic-gate ((intrp = apic_find_io_intr_w_busid(irqno, busid)) 3205*0Sstevel@tonic-gate != NULL)) { 3206*0Sstevel@tonic-gate if ((newirq = apic_setup_irq_table(dip, irqno, 3207*0Sstevel@tonic-gate intrp, ispec, NULL, type)) != -1) { 3208*0Sstevel@tonic-gate return (newirq); 3209*0Sstevel@tonic-gate } 3210*0Sstevel@tonic-gate goto defconf; 3211*0Sstevel@tonic-gate } 3212*0Sstevel@tonic-gate bustype = (bustype == BUS_EISA) ? BUS_ISA : BUS_EISA; 3213*0Sstevel@tonic-gate } 3214*0Sstevel@tonic-gate } 3215*0Sstevel@tonic-gate 3216*0Sstevel@tonic-gate /* MPS default configuration */ 3217*0Sstevel@tonic-gate defconf: 3218*0Sstevel@tonic-gate newirq = apic_setup_irq_table(dip, irqno, NULL, ispec, NULL, type); 3219*0Sstevel@tonic-gate if (newirq == -1) 3220*0Sstevel@tonic-gate return (newirq); 3221*0Sstevel@tonic-gate ASSERT(IRQINDEX(newirq) == irqno); 3222*0Sstevel@tonic-gate ASSERT(apic_irq_table[irqno]); 3223*0Sstevel@tonic-gate return (newirq); 3224*0Sstevel@tonic-gate } 3225*0Sstevel@tonic-gate 3226*0Sstevel@tonic-gate 3227*0Sstevel@tonic-gate 3228*0Sstevel@tonic-gate 3229*0Sstevel@tonic-gate 3230*0Sstevel@tonic-gate 3231*0Sstevel@tonic-gate /* 3232*0Sstevel@tonic-gate * On machines with PCI-PCI bridges, a device behind a PCI-PCI bridge 3233*0Sstevel@tonic-gate * needs special handling. We may need to chase up the device tree, 3234*0Sstevel@tonic-gate * using the PCI-PCI Bridge specification's "rotating IPIN assumptions", 3235*0Sstevel@tonic-gate * to find the IPIN at the root bus that relates to the IPIN on the 3236*0Sstevel@tonic-gate * subsidiary bus (for ACPI or MP). We may, however, have an entry 3237*0Sstevel@tonic-gate * in the MP table or the ACPI namespace for this device itself. 3238*0Sstevel@tonic-gate * We handle both cases in the search below. 3239*0Sstevel@tonic-gate */ 3240*0Sstevel@tonic-gate /* this is the non-acpi version */ 3241*0Sstevel@tonic-gate static int 3242*0Sstevel@tonic-gate apic_handle_pci_pci_bridge(dev_info_t *idip, int child_devno, int child_ipin, 3243*0Sstevel@tonic-gate struct apic_io_intr **intrp) 3244*0Sstevel@tonic-gate { 3245*0Sstevel@tonic-gate dev_info_t *dipp, *dip; 3246*0Sstevel@tonic-gate int pci_irq; 3247*0Sstevel@tonic-gate ddi_acc_handle_t cfg_handle; 3248*0Sstevel@tonic-gate int bridge_devno, bridge_bus; 3249*0Sstevel@tonic-gate int ipin; 3250*0Sstevel@tonic-gate 3251*0Sstevel@tonic-gate dip = idip; 3252*0Sstevel@tonic-gate 3253*0Sstevel@tonic-gate /*CONSTCOND*/ 3254*0Sstevel@tonic-gate while (1) { 3255*0Sstevel@tonic-gate if ((dipp = ddi_get_parent(dip)) == (dev_info_t *)NULL) 3256*0Sstevel@tonic-gate return (-1); 3257*0Sstevel@tonic-gate if ((pci_config_setup(dipp, &cfg_handle) == DDI_SUCCESS) && 3258*0Sstevel@tonic-gate (pci_config_get8(cfg_handle, PCI_CONF_BASCLASS) == 3259*0Sstevel@tonic-gate PCI_CLASS_BRIDGE) && (pci_config_get8(cfg_handle, 3260*0Sstevel@tonic-gate PCI_CONF_SUBCLASS) == PCI_BRIDGE_PCI)) { 3261*0Sstevel@tonic-gate pci_config_teardown(&cfg_handle); 3262*0Sstevel@tonic-gate if (acpica_get_bdf(dipp, &bridge_bus, &bridge_devno, 3263*0Sstevel@tonic-gate NULL) != 0) 3264*0Sstevel@tonic-gate return (-1); 3265*0Sstevel@tonic-gate /* 3266*0Sstevel@tonic-gate * This is the rotating scheme that Compaq is using 3267*0Sstevel@tonic-gate * and documented in the pci to pci spec. Also, if 3268*0Sstevel@tonic-gate * the pci to pci bridge is behind another pci to 3269*0Sstevel@tonic-gate * pci bridge, then it need to keep transversing 3270*0Sstevel@tonic-gate * up until an interrupt entry is found or reach 3271*0Sstevel@tonic-gate * the top of the tree 3272*0Sstevel@tonic-gate */ 3273*0Sstevel@tonic-gate ipin = (child_devno + child_ipin) % PCI_INTD; 3274*0Sstevel@tonic-gate if (bridge_bus == 0 && apic_pci_bus_total == 1) 3275*0Sstevel@tonic-gate bridge_bus = (int)apic_single_pci_busid; 3276*0Sstevel@tonic-gate pci_irq = ((bridge_devno & 0x1f) << 2) | 3277*0Sstevel@tonic-gate (ipin & 0x3); 3278*0Sstevel@tonic-gate if ((*intrp = apic_find_io_intr_w_busid(pci_irq, 3279*0Sstevel@tonic-gate bridge_bus)) != NULL) { 3280*0Sstevel@tonic-gate return (pci_irq); 3281*0Sstevel@tonic-gate } 3282*0Sstevel@tonic-gate dip = dipp; 3283*0Sstevel@tonic-gate child_devno = bridge_devno; 3284*0Sstevel@tonic-gate child_ipin = ipin; 3285*0Sstevel@tonic-gate } else 3286*0Sstevel@tonic-gate return (-1); 3287*0Sstevel@tonic-gate } 3288*0Sstevel@tonic-gate /*LINTED: function will not fall off the bottom */ 3289*0Sstevel@tonic-gate } 3290*0Sstevel@tonic-gate 3291*0Sstevel@tonic-gate 3292*0Sstevel@tonic-gate 3293*0Sstevel@tonic-gate 3294*0Sstevel@tonic-gate static uchar_t 3295*0Sstevel@tonic-gate acpi_find_ioapic(int irq) 3296*0Sstevel@tonic-gate { 3297*0Sstevel@tonic-gate int i; 3298*0Sstevel@tonic-gate 3299*0Sstevel@tonic-gate for (i = 0; i < apic_io_max; i++) { 3300*0Sstevel@tonic-gate if (irq >= apic_io_vectbase[i] && irq <= apic_io_vectend[i]) 3301*0Sstevel@tonic-gate return (i); 3302*0Sstevel@tonic-gate } 3303*0Sstevel@tonic-gate return (0xFF); /* shouldn't happen */ 3304*0Sstevel@tonic-gate } 3305*0Sstevel@tonic-gate 3306*0Sstevel@tonic-gate /* 3307*0Sstevel@tonic-gate * See if two irqs are compatible for sharing a vector. 3308*0Sstevel@tonic-gate * Currently we only support sharing of PCI devices. 3309*0Sstevel@tonic-gate */ 3310*0Sstevel@tonic-gate static int 3311*0Sstevel@tonic-gate acpi_intr_compatible(iflag_t iflag1, iflag_t iflag2) 3312*0Sstevel@tonic-gate { 3313*0Sstevel@tonic-gate uint_t level1, po1; 3314*0Sstevel@tonic-gate uint_t level2, po2; 3315*0Sstevel@tonic-gate 3316*0Sstevel@tonic-gate /* Assume active high by default */ 3317*0Sstevel@tonic-gate po1 = 0; 3318*0Sstevel@tonic-gate po2 = 0; 3319*0Sstevel@tonic-gate 3320*0Sstevel@tonic-gate if (iflag1.bustype != iflag2.bustype || iflag1.bustype != BUS_PCI) 3321*0Sstevel@tonic-gate return (0); 3322*0Sstevel@tonic-gate 3323*0Sstevel@tonic-gate if (iflag1.intr_el == INTR_EL_CONFORM) 3324*0Sstevel@tonic-gate level1 = AV_LEVEL; 3325*0Sstevel@tonic-gate else 3326*0Sstevel@tonic-gate level1 = (iflag1.intr_el == INTR_EL_LEVEL) ? AV_LEVEL : 0; 3327*0Sstevel@tonic-gate 3328*0Sstevel@tonic-gate if (level1 && ((iflag1.intr_po == INTR_PO_ACTIVE_LOW) || 3329*0Sstevel@tonic-gate (iflag1.intr_po == INTR_PO_CONFORM))) 3330*0Sstevel@tonic-gate po1 = AV_ACTIVE_LOW; 3331*0Sstevel@tonic-gate 3332*0Sstevel@tonic-gate if (iflag2.intr_el == INTR_EL_CONFORM) 3333*0Sstevel@tonic-gate level2 = AV_LEVEL; 3334*0Sstevel@tonic-gate else 3335*0Sstevel@tonic-gate level2 = (iflag2.intr_el == INTR_EL_LEVEL) ? AV_LEVEL : 0; 3336*0Sstevel@tonic-gate 3337*0Sstevel@tonic-gate if (level2 && ((iflag2.intr_po == INTR_PO_ACTIVE_LOW) || 3338*0Sstevel@tonic-gate (iflag2.intr_po == INTR_PO_CONFORM))) 3339*0Sstevel@tonic-gate po2 = AV_ACTIVE_LOW; 3340*0Sstevel@tonic-gate 3341*0Sstevel@tonic-gate if ((level1 == level2) && (po1 == po2)) 3342*0Sstevel@tonic-gate return (1); 3343*0Sstevel@tonic-gate 3344*0Sstevel@tonic-gate return (0); 3345*0Sstevel@tonic-gate } 3346*0Sstevel@tonic-gate 3347*0Sstevel@tonic-gate /* 3348*0Sstevel@tonic-gate * Attempt to share vector with someone else 3349*0Sstevel@tonic-gate */ 3350*0Sstevel@tonic-gate static int 3351*0Sstevel@tonic-gate apic_share_vector(int irqno, iflag_t *intr_flagp, short intr_index, int ipl, 3352*0Sstevel@tonic-gate uchar_t ioapicindex, uchar_t ipin, apic_irq_t **irqptrp) 3353*0Sstevel@tonic-gate { 3354*0Sstevel@tonic-gate #ifdef DEBUG 3355*0Sstevel@tonic-gate apic_irq_t *tmpirqp = NULL; 3356*0Sstevel@tonic-gate #endif /* DEBUG */ 3357*0Sstevel@tonic-gate apic_irq_t *irqptr, dummyirq; 3358*0Sstevel@tonic-gate int newirq, chosen_irq = -1, share = 127; 3359*0Sstevel@tonic-gate int lowest, highest, i; 3360*0Sstevel@tonic-gate uchar_t share_id; 3361*0Sstevel@tonic-gate 3362*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_share_vector: irqno=0x%x " 3363*0Sstevel@tonic-gate "intr_index=0x%x ipl=0x%x\n", irqno, intr_index, ipl)); 3364*0Sstevel@tonic-gate 3365*0Sstevel@tonic-gate highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK; 3366*0Sstevel@tonic-gate lowest = apic_ipltopri[ipl-1] + APIC_VECTOR_PER_IPL; 3367*0Sstevel@tonic-gate 3368*0Sstevel@tonic-gate if (highest < lowest) /* Both ipl and ipl-1 map to same pri */ 3369*0Sstevel@tonic-gate lowest -= APIC_VECTOR_PER_IPL; 3370*0Sstevel@tonic-gate dummyirq.airq_mps_intr_index = intr_index; 3371*0Sstevel@tonic-gate dummyirq.airq_ioapicindex = ioapicindex; 3372*0Sstevel@tonic-gate dummyirq.airq_intin_no = ipin; 3373*0Sstevel@tonic-gate if (intr_flagp) 3374*0Sstevel@tonic-gate dummyirq.airq_iflag = *intr_flagp; 3375*0Sstevel@tonic-gate apic_record_rdt_entry(&dummyirq, irqno); 3376*0Sstevel@tonic-gate for (i = lowest; i <= highest; i++) { 3377*0Sstevel@tonic-gate newirq = apic_vector_to_irq[i]; 3378*0Sstevel@tonic-gate if (newirq == APIC_RESV_IRQ) 3379*0Sstevel@tonic-gate continue; 3380*0Sstevel@tonic-gate irqptr = apic_irq_table[newirq]; 3381*0Sstevel@tonic-gate 3382*0Sstevel@tonic-gate /* don't share SCI */ 3383*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index == SCI_INDEX) 3384*0Sstevel@tonic-gate continue; 3385*0Sstevel@tonic-gate 3386*0Sstevel@tonic-gate if ((dummyirq.airq_rdt_entry & 0xFF00) != 3387*0Sstevel@tonic-gate (irqptr->airq_rdt_entry & 0xFF00)) 3388*0Sstevel@tonic-gate /* not compatible */ 3389*0Sstevel@tonic-gate continue; 3390*0Sstevel@tonic-gate 3391*0Sstevel@tonic-gate if (irqptr->airq_share < share) { 3392*0Sstevel@tonic-gate share = irqptr->airq_share; 3393*0Sstevel@tonic-gate chosen_irq = newirq; 3394*0Sstevel@tonic-gate } 3395*0Sstevel@tonic-gate } 3396*0Sstevel@tonic-gate if (chosen_irq != -1) { 3397*0Sstevel@tonic-gate /* 3398*0Sstevel@tonic-gate * Assign a share id which is free or which is larger 3399*0Sstevel@tonic-gate * than the largest one. 3400*0Sstevel@tonic-gate */ 3401*0Sstevel@tonic-gate share_id = 1; 3402*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 3403*0Sstevel@tonic-gate irqptr = apic_irq_table[chosen_irq]; 3404*0Sstevel@tonic-gate while (irqptr) { 3405*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index == FREE_INDEX) { 3406*0Sstevel@tonic-gate share_id = irqptr->airq_share_id; 3407*0Sstevel@tonic-gate break; 3408*0Sstevel@tonic-gate } 3409*0Sstevel@tonic-gate if (share_id <= irqptr->airq_share_id) 3410*0Sstevel@tonic-gate share_id = irqptr->airq_share_id + 1; 3411*0Sstevel@tonic-gate #ifdef DEBUG 3412*0Sstevel@tonic-gate tmpirqp = irqptr; 3413*0Sstevel@tonic-gate #endif /* DEBUG */ 3414*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 3415*0Sstevel@tonic-gate } 3416*0Sstevel@tonic-gate if (!irqptr) { 3417*0Sstevel@tonic-gate irqptr = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 3418*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 3419*0Sstevel@tonic-gate irqptr->airq_next = 3420*0Sstevel@tonic-gate apic_irq_table[chosen_irq]->airq_next; 3421*0Sstevel@tonic-gate apic_irq_table[chosen_irq]->airq_next = irqptr; 3422*0Sstevel@tonic-gate #ifdef DEBUG 3423*0Sstevel@tonic-gate tmpirqp = apic_irq_table[chosen_irq]; 3424*0Sstevel@tonic-gate #endif /* DEBUG */ 3425*0Sstevel@tonic-gate } 3426*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = intr_index; 3427*0Sstevel@tonic-gate irqptr->airq_ioapicindex = ioapicindex; 3428*0Sstevel@tonic-gate irqptr->airq_intin_no = ipin; 3429*0Sstevel@tonic-gate if (intr_flagp) 3430*0Sstevel@tonic-gate irqptr->airq_iflag = *intr_flagp; 3431*0Sstevel@tonic-gate irqptr->airq_vector = apic_irq_table[chosen_irq]->airq_vector; 3432*0Sstevel@tonic-gate irqptr->airq_share_id = share_id; 3433*0Sstevel@tonic-gate apic_record_rdt_entry(irqptr, irqno); 3434*0Sstevel@tonic-gate *irqptrp = irqptr; 3435*0Sstevel@tonic-gate #ifdef DEBUG 3436*0Sstevel@tonic-gate /* shuffle the pointers to test apic_delspl path */ 3437*0Sstevel@tonic-gate if (tmpirqp) { 3438*0Sstevel@tonic-gate tmpirqp->airq_next = irqptr->airq_next; 3439*0Sstevel@tonic-gate irqptr->airq_next = apic_irq_table[chosen_irq]; 3440*0Sstevel@tonic-gate apic_irq_table[chosen_irq] = irqptr; 3441*0Sstevel@tonic-gate } 3442*0Sstevel@tonic-gate #endif /* DEBUG */ 3443*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3444*0Sstevel@tonic-gate return (VIRTIRQ(chosen_irq, share_id)); 3445*0Sstevel@tonic-gate } 3446*0Sstevel@tonic-gate return (-1); 3447*0Sstevel@tonic-gate } 3448*0Sstevel@tonic-gate 3449*0Sstevel@tonic-gate /* 3450*0Sstevel@tonic-gate * 3451*0Sstevel@tonic-gate */ 3452*0Sstevel@tonic-gate static int 3453*0Sstevel@tonic-gate apic_setup_sci_irq_table(int irqno, uchar_t ipl, iflag_t *intr_flagp) 3454*0Sstevel@tonic-gate { 3455*0Sstevel@tonic-gate int intr_index; 3456*0Sstevel@tonic-gate uchar_t ipin, ioapicindex, vector; 3457*0Sstevel@tonic-gate apic_irq_t *irqptr; 3458*0Sstevel@tonic-gate 3459*0Sstevel@tonic-gate ASSERT(intr_flagp != NULL); 3460*0Sstevel@tonic-gate 3461*0Sstevel@tonic-gate intr_index = SCI_INDEX; 3462*0Sstevel@tonic-gate ioapicindex = acpi_find_ioapic(irqno); 3463*0Sstevel@tonic-gate ASSERT(ioapicindex != 0xFF); 3464*0Sstevel@tonic-gate ipin = irqno - apic_io_vectbase[ioapicindex]; 3465*0Sstevel@tonic-gate if (apic_irq_table[irqno] && 3466*0Sstevel@tonic-gate apic_irq_table[irqno]->airq_mps_intr_index == SCI_INDEX) { 3467*0Sstevel@tonic-gate ASSERT(apic_irq_table[irqno]->airq_intin_no == ipin && 3468*0Sstevel@tonic-gate apic_irq_table[irqno]->airq_ioapicindex == 3469*0Sstevel@tonic-gate ioapicindex); 3470*0Sstevel@tonic-gate return (irqno); 3471*0Sstevel@tonic-gate } 3472*0Sstevel@tonic-gate 3473*0Sstevel@tonic-gate if ((vector = apic_allocate_vector(ipl, irqno, 0)) == 0) { 3474*0Sstevel@tonic-gate cmn_err(CE_WARN, "!apic: failed to allocate vector for SCI"); 3475*0Sstevel@tonic-gate return (-1); 3476*0Sstevel@tonic-gate } 3477*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 3478*0Sstevel@tonic-gate if (apic_irq_table[irqno] == NULL) { 3479*0Sstevel@tonic-gate irqptr = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 3480*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 3481*0Sstevel@tonic-gate apic_irq_table[irqno] = irqptr; 3482*0Sstevel@tonic-gate } else { 3483*0Sstevel@tonic-gate /* 3484*0Sstevel@tonic-gate * We assume that SCI is the first to attach this IRQ 3485*0Sstevel@tonic-gate */ 3486*0Sstevel@tonic-gate cmn_err(CE_WARN, "!acpi: apic_irq_t not empty for SCI"); 3487*0Sstevel@tonic-gate return (-1); 3488*0Sstevel@tonic-gate } 3489*0Sstevel@tonic-gate 3490*0Sstevel@tonic-gate apic_max_device_irq = max(irqno, apic_max_device_irq); 3491*0Sstevel@tonic-gate apic_min_device_irq = min(irqno, apic_min_device_irq); 3492*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3493*0Sstevel@tonic-gate irqptr->airq_ioapicindex = ioapicindex; 3494*0Sstevel@tonic-gate irqptr->airq_intin_no = ipin; 3495*0Sstevel@tonic-gate irqptr->airq_ipl = ipl; 3496*0Sstevel@tonic-gate irqptr->airq_vector = vector; 3497*0Sstevel@tonic-gate irqptr->airq_origirq = (uchar_t)irqno; 3498*0Sstevel@tonic-gate irqptr->airq_share_id = 0; 3499*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = (short)intr_index; 3500*0Sstevel@tonic-gate irqptr->airq_dip = NULL; 3501*0Sstevel@tonic-gate irqptr->airq_major = 0; 3502*0Sstevel@tonic-gate irqptr->airq_cpu = 0; /* SCI always on CPU 0 */ 3503*0Sstevel@tonic-gate irqptr->airq_iflag = *intr_flagp; 3504*0Sstevel@tonic-gate apic_record_rdt_entry(irqptr, irqno); 3505*0Sstevel@tonic-gate return (irqno); 3506*0Sstevel@tonic-gate } 3507*0Sstevel@tonic-gate 3508*0Sstevel@tonic-gate /* 3509*0Sstevel@tonic-gate * 3510*0Sstevel@tonic-gate */ 3511*0Sstevel@tonic-gate static int 3512*0Sstevel@tonic-gate apic_setup_irq_table(dev_info_t *dip, int irqno, struct apic_io_intr *intrp, 3513*0Sstevel@tonic-gate struct intrspec *ispec, iflag_t *intr_flagp, int type) 3514*0Sstevel@tonic-gate { 3515*0Sstevel@tonic-gate int origirq = ispec->intrspec_vec; 3516*0Sstevel@tonic-gate uchar_t ipl = ispec->intrspec_pri; 3517*0Sstevel@tonic-gate int newirq, intr_index; 3518*0Sstevel@tonic-gate uchar_t ipin, ioapic, ioapicindex, vector; 3519*0Sstevel@tonic-gate apic_irq_t *irqptr; 3520*0Sstevel@tonic-gate major_t major; 3521*0Sstevel@tonic-gate dev_info_t *sdip; 3522*0Sstevel@tonic-gate 3523*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_setup_irq_table: dip=0x%p type=%d " 3524*0Sstevel@tonic-gate "irqno=0x%x origirq=0x%x\n", (void *)dip, type, irqno, origirq)); 3525*0Sstevel@tonic-gate 3526*0Sstevel@tonic-gate ASSERT(ispec != NULL); 3527*0Sstevel@tonic-gate 3528*0Sstevel@tonic-gate major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 3529*0Sstevel@tonic-gate 3530*0Sstevel@tonic-gate if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 3531*0Sstevel@tonic-gate /* MSI/X doesn't need to setup ioapic stuffs */ 3532*0Sstevel@tonic-gate ioapicindex = 0xff; 3533*0Sstevel@tonic-gate ioapic = 0xff; 3534*0Sstevel@tonic-gate ipin = (uchar_t)0xff; 3535*0Sstevel@tonic-gate intr_index = (type == DDI_INTR_TYPE_MSI) ? MSI_INDEX : 3536*0Sstevel@tonic-gate MSIX_INDEX; 3537*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 3538*0Sstevel@tonic-gate if ((irqno = apic_allocate_irq(APIC_FIRST_FREE_IRQ)) == -1) { 3539*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3540*0Sstevel@tonic-gate /* need an irq for MSI/X to index into autovect[] */ 3541*0Sstevel@tonic-gate cmn_err(CE_WARN, "No interrupt irq: %s instance %d", 3542*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip)); 3543*0Sstevel@tonic-gate return (-1); 3544*0Sstevel@tonic-gate } 3545*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3546*0Sstevel@tonic-gate 3547*0Sstevel@tonic-gate } else if (intrp != NULL) { 3548*0Sstevel@tonic-gate intr_index = (int)(intrp - apic_io_intrp); 3549*0Sstevel@tonic-gate ioapic = intrp->intr_destid; 3550*0Sstevel@tonic-gate ipin = intrp->intr_destintin; 3551*0Sstevel@tonic-gate /* Find ioapicindex. If destid was ALL, we will exit with 0. */ 3552*0Sstevel@tonic-gate for (ioapicindex = apic_io_max - 1; ioapicindex; ioapicindex--) 3553*0Sstevel@tonic-gate if (apic_io_id[ioapicindex] == ioapic) 3554*0Sstevel@tonic-gate break; 3555*0Sstevel@tonic-gate ASSERT((ioapic == apic_io_id[ioapicindex]) || 3556*0Sstevel@tonic-gate (ioapic == INTR_ALL_APIC)); 3557*0Sstevel@tonic-gate 3558*0Sstevel@tonic-gate /* check whether this intin# has been used by another irqno */ 3559*0Sstevel@tonic-gate if ((newirq = apic_find_intin(ioapicindex, ipin)) != -1) { 3560*0Sstevel@tonic-gate return (newirq); 3561*0Sstevel@tonic-gate } 3562*0Sstevel@tonic-gate 3563*0Sstevel@tonic-gate } else if (intr_flagp != NULL) { 3564*0Sstevel@tonic-gate /* ACPI case */ 3565*0Sstevel@tonic-gate intr_index = ACPI_INDEX; 3566*0Sstevel@tonic-gate ioapicindex = acpi_find_ioapic(irqno); 3567*0Sstevel@tonic-gate ASSERT(ioapicindex != 0xFF); 3568*0Sstevel@tonic-gate ioapic = apic_io_id[ioapicindex]; 3569*0Sstevel@tonic-gate ipin = irqno - apic_io_vectbase[ioapicindex]; 3570*0Sstevel@tonic-gate if (apic_irq_table[irqno] && 3571*0Sstevel@tonic-gate apic_irq_table[irqno]->airq_mps_intr_index == ACPI_INDEX) { 3572*0Sstevel@tonic-gate ASSERT(apic_irq_table[irqno]->airq_intin_no == ipin && 3573*0Sstevel@tonic-gate apic_irq_table[irqno]->airq_ioapicindex == 3574*0Sstevel@tonic-gate ioapicindex); 3575*0Sstevel@tonic-gate return (irqno); 3576*0Sstevel@tonic-gate } 3577*0Sstevel@tonic-gate 3578*0Sstevel@tonic-gate } else { 3579*0Sstevel@tonic-gate /* default configuration */ 3580*0Sstevel@tonic-gate ioapicindex = 0; 3581*0Sstevel@tonic-gate ioapic = apic_io_id[ioapicindex]; 3582*0Sstevel@tonic-gate ipin = (uchar_t)irqno; 3583*0Sstevel@tonic-gate intr_index = DEFAULT_INDEX; 3584*0Sstevel@tonic-gate } 3585*0Sstevel@tonic-gate 3586*0Sstevel@tonic-gate if (ispec == NULL) { 3587*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_WARN, "No intrspec for irqno = %x\n", 3588*0Sstevel@tonic-gate irqno)); 3589*0Sstevel@tonic-gate } else if ((vector = apic_allocate_vector(ipl, irqno, 0)) == 0) { 3590*0Sstevel@tonic-gate if ((newirq = apic_share_vector(irqno, intr_flagp, intr_index, 3591*0Sstevel@tonic-gate ipl, ioapicindex, ipin, &irqptr)) != -1) { 3592*0Sstevel@tonic-gate irqptr->airq_ipl = ipl; 3593*0Sstevel@tonic-gate irqptr->airq_origirq = (uchar_t)origirq; 3594*0Sstevel@tonic-gate irqptr->airq_dip = dip; 3595*0Sstevel@tonic-gate irqptr->airq_major = major; 3596*0Sstevel@tonic-gate sdip = apic_irq_table[IRQINDEX(newirq)]->airq_dip; 3597*0Sstevel@tonic-gate if (sdip == NULL) { 3598*0Sstevel@tonic-gate cmn_err(CE_WARN, "Sharing vectors: %s" 3599*0Sstevel@tonic-gate " instance %d and SCI", 3600*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip)); 3601*0Sstevel@tonic-gate } else { 3602*0Sstevel@tonic-gate cmn_err(CE_WARN, "Sharing vectors: %s" 3603*0Sstevel@tonic-gate " instance %d and %s instance %d", 3604*0Sstevel@tonic-gate ddi_get_name(sdip), ddi_get_instance(sdip), 3605*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip)); 3606*0Sstevel@tonic-gate } 3607*0Sstevel@tonic-gate return (newirq); 3608*0Sstevel@tonic-gate } 3609*0Sstevel@tonic-gate /* try high priority allocation now that share has failed */ 3610*0Sstevel@tonic-gate if ((vector = apic_allocate_vector(ipl, irqno, 1)) == 0) { 3611*0Sstevel@tonic-gate cmn_err(CE_WARN, "No interrupt vector: %s instance %d", 3612*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip)); 3613*0Sstevel@tonic-gate return (-1); 3614*0Sstevel@tonic-gate } 3615*0Sstevel@tonic-gate } 3616*0Sstevel@tonic-gate 3617*0Sstevel@tonic-gate mutex_enter(&airq_mutex); 3618*0Sstevel@tonic-gate if (apic_irq_table[irqno] == NULL) { 3619*0Sstevel@tonic-gate irqptr = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 3620*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 3621*0Sstevel@tonic-gate apic_irq_table[irqno] = irqptr; 3622*0Sstevel@tonic-gate } else { 3623*0Sstevel@tonic-gate irqptr = apic_irq_table[irqno]; 3624*0Sstevel@tonic-gate if (irqptr->airq_mps_intr_index != FREE_INDEX) { 3625*0Sstevel@tonic-gate /* 3626*0Sstevel@tonic-gate * The slot is used by another irqno, so allocate 3627*0Sstevel@tonic-gate * a free irqno for this interrupt 3628*0Sstevel@tonic-gate */ 3629*0Sstevel@tonic-gate newirq = apic_allocate_irq(APIC_FIRST_FREE_IRQ); 3630*0Sstevel@tonic-gate if (newirq == -1) { 3631*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3632*0Sstevel@tonic-gate return (-1); 3633*0Sstevel@tonic-gate } 3634*0Sstevel@tonic-gate irqno = newirq; 3635*0Sstevel@tonic-gate irqptr = apic_irq_table[irqno]; 3636*0Sstevel@tonic-gate if (irqptr == NULL) { 3637*0Sstevel@tonic-gate irqptr = kmem_zalloc(sizeof (apic_irq_t), 3638*0Sstevel@tonic-gate KM_SLEEP); 3639*0Sstevel@tonic-gate irqptr->airq_temp_cpu = IRQ_UNINIT; 3640*0Sstevel@tonic-gate apic_irq_table[irqno] = irqptr; 3641*0Sstevel@tonic-gate } 3642*0Sstevel@tonic-gate apic_modify_vector(vector, newirq); 3643*0Sstevel@tonic-gate } 3644*0Sstevel@tonic-gate } 3645*0Sstevel@tonic-gate apic_max_device_irq = max(irqno, apic_max_device_irq); 3646*0Sstevel@tonic-gate apic_min_device_irq = min(irqno, apic_min_device_irq); 3647*0Sstevel@tonic-gate mutex_exit(&airq_mutex); 3648*0Sstevel@tonic-gate irqptr->airq_ioapicindex = ioapicindex; 3649*0Sstevel@tonic-gate irqptr->airq_intin_no = ipin; 3650*0Sstevel@tonic-gate irqptr->airq_ipl = ipl; 3651*0Sstevel@tonic-gate irqptr->airq_vector = vector; 3652*0Sstevel@tonic-gate irqptr->airq_origirq = (uchar_t)origirq; 3653*0Sstevel@tonic-gate irqptr->airq_share_id = 0; 3654*0Sstevel@tonic-gate irqptr->airq_mps_intr_index = (short)intr_index; 3655*0Sstevel@tonic-gate irqptr->airq_dip = dip; 3656*0Sstevel@tonic-gate irqptr->airq_major = major; 3657*0Sstevel@tonic-gate irqptr->airq_cpu = apic_bind_intr(dip, irqno, ioapic, ipin); 3658*0Sstevel@tonic-gate if (intr_flagp) 3659*0Sstevel@tonic-gate irqptr->airq_iflag = *intr_flagp; 3660*0Sstevel@tonic-gate 3661*0Sstevel@tonic-gate if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { 3662*0Sstevel@tonic-gate /* setup I/O APIC entry for non-MSI/X interrupts */ 3663*0Sstevel@tonic-gate apic_record_rdt_entry(irqptr, irqno); 3664*0Sstevel@tonic-gate } 3665*0Sstevel@tonic-gate return (irqno); 3666*0Sstevel@tonic-gate } 3667*0Sstevel@tonic-gate 3668*0Sstevel@tonic-gate /* 3669*0Sstevel@tonic-gate * return the cpu to which this intr should be bound. 3670*0Sstevel@tonic-gate * Check properties or any other mechanism to see if user wants it 3671*0Sstevel@tonic-gate * bound to a specific CPU. If so, return the cpu id with high bit set. 3672*0Sstevel@tonic-gate * If not, use the policy to choose a cpu and return the id. 3673*0Sstevel@tonic-gate */ 3674*0Sstevel@tonic-gate uchar_t 3675*0Sstevel@tonic-gate apic_bind_intr(dev_info_t *dip, int irq, uchar_t ioapicid, uchar_t intin) 3676*0Sstevel@tonic-gate { 3677*0Sstevel@tonic-gate int instance, instno, prop_len, bind_cpu, count; 3678*0Sstevel@tonic-gate uint_t i, rc; 3679*0Sstevel@tonic-gate uchar_t cpu; 3680*0Sstevel@tonic-gate major_t major; 3681*0Sstevel@tonic-gate char *name, *drv_name, *prop_val, *cptr; 3682*0Sstevel@tonic-gate char prop_name[32]; 3683*0Sstevel@tonic-gate 3684*0Sstevel@tonic-gate 3685*0Sstevel@tonic-gate if (apic_intr_policy == INTR_LOWEST_PRIORITY) 3686*0Sstevel@tonic-gate return (IRQ_UNBOUND); 3687*0Sstevel@tonic-gate 3688*0Sstevel@tonic-gate drv_name = NULL; 3689*0Sstevel@tonic-gate rc = DDI_PROP_NOT_FOUND; 3690*0Sstevel@tonic-gate major = (major_t)-1; 3691*0Sstevel@tonic-gate if (dip != NULL) { 3692*0Sstevel@tonic-gate name = ddi_get_name(dip); 3693*0Sstevel@tonic-gate major = ddi_name_to_major(name); 3694*0Sstevel@tonic-gate drv_name = ddi_major_to_name(major); 3695*0Sstevel@tonic-gate instance = ddi_get_instance(dip); 3696*0Sstevel@tonic-gate if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) { 3697*0Sstevel@tonic-gate i = apic_min_device_irq; 3698*0Sstevel@tonic-gate for (; i <= apic_max_device_irq; i++) { 3699*0Sstevel@tonic-gate 3700*0Sstevel@tonic-gate if ((i == irq) || (apic_irq_table[i] == NULL) || 3701*0Sstevel@tonic-gate (apic_irq_table[i]->airq_mps_intr_index 3702*0Sstevel@tonic-gate == FREE_INDEX)) 3703*0Sstevel@tonic-gate continue; 3704*0Sstevel@tonic-gate 3705*0Sstevel@tonic-gate if ((apic_irq_table[i]->airq_major == major) && 3706*0Sstevel@tonic-gate (!(apic_irq_table[i]->airq_cpu & 3707*0Sstevel@tonic-gate IRQ_USER_BOUND))) { 3708*0Sstevel@tonic-gate 3709*0Sstevel@tonic-gate cpu = apic_irq_table[i]->airq_cpu; 3710*0Sstevel@tonic-gate 3711*0Sstevel@tonic-gate cmn_err(CE_CONT, 3712*0Sstevel@tonic-gate "!pcplusmp: %s (%s) instance #%d " 3713*0Sstevel@tonic-gate "vector 0x%x ioapic 0x%x " 3714*0Sstevel@tonic-gate "intin 0x%x is bound to cpu %d\n", 3715*0Sstevel@tonic-gate name, drv_name, instance, irq, 3716*0Sstevel@tonic-gate ioapicid, intin, cpu); 3717*0Sstevel@tonic-gate return (cpu); 3718*0Sstevel@tonic-gate } 3719*0Sstevel@tonic-gate } 3720*0Sstevel@tonic-gate } 3721*0Sstevel@tonic-gate /* 3722*0Sstevel@tonic-gate * search for "drvname"_intpt_bind_cpus property first, the 3723*0Sstevel@tonic-gate * syntax of the property should be "a[,b,c,...]" where 3724*0Sstevel@tonic-gate * instance 0 binds to cpu a, instance 1 binds to cpu b, 3725*0Sstevel@tonic-gate * instance 3 binds to cpu c... 3726*0Sstevel@tonic-gate * ddi_getlongprop() will search /option first, then / 3727*0Sstevel@tonic-gate * if "drvname"_intpt_bind_cpus doesn't exist, then find 3728*0Sstevel@tonic-gate * intpt_bind_cpus property. The syntax is the same, and 3729*0Sstevel@tonic-gate * it applies to all the devices if its "drvname" specific 3730*0Sstevel@tonic-gate * property doesn't exist 3731*0Sstevel@tonic-gate */ 3732*0Sstevel@tonic-gate (void) strcpy(prop_name, drv_name); 3733*0Sstevel@tonic-gate (void) strcat(prop_name, "_intpt_bind_cpus"); 3734*0Sstevel@tonic-gate rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, prop_name, 3735*0Sstevel@tonic-gate (caddr_t)&prop_val, &prop_len); 3736*0Sstevel@tonic-gate if (rc != DDI_PROP_SUCCESS) { 3737*0Sstevel@tonic-gate rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, 3738*0Sstevel@tonic-gate "intpt_bind_cpus", (caddr_t)&prop_val, &prop_len); 3739*0Sstevel@tonic-gate } 3740*0Sstevel@tonic-gate } 3741*0Sstevel@tonic-gate if (rc == DDI_PROP_SUCCESS) { 3742*0Sstevel@tonic-gate for (i = count = 0; i < (prop_len - 1); i++) 3743*0Sstevel@tonic-gate if (prop_val[i] == ',') 3744*0Sstevel@tonic-gate count++; 3745*0Sstevel@tonic-gate if (prop_val[i-1] != ',') 3746*0Sstevel@tonic-gate count++; 3747*0Sstevel@tonic-gate /* 3748*0Sstevel@tonic-gate * if somehow the binding instances defined in the 3749*0Sstevel@tonic-gate * property are not enough for this instno., then 3750*0Sstevel@tonic-gate * reuse the pattern for the next instance until 3751*0Sstevel@tonic-gate * it reaches the requested instno 3752*0Sstevel@tonic-gate */ 3753*0Sstevel@tonic-gate instno = instance % count; 3754*0Sstevel@tonic-gate i = 0; 3755*0Sstevel@tonic-gate cptr = prop_val; 3756*0Sstevel@tonic-gate while (i < instno) 3757*0Sstevel@tonic-gate if (*cptr++ == ',') 3758*0Sstevel@tonic-gate i++; 3759*0Sstevel@tonic-gate bind_cpu = stoi(&cptr); 3760*0Sstevel@tonic-gate kmem_free(prop_val, prop_len); 3761*0Sstevel@tonic-gate /* if specific cpu is bogus, then default to cpu 0 */ 3762*0Sstevel@tonic-gate if (bind_cpu >= apic_nproc) { 3763*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: %s=%s: CPU %d not present", 3764*0Sstevel@tonic-gate prop_name, prop_val, bind_cpu); 3765*0Sstevel@tonic-gate bind_cpu = 0; 3766*0Sstevel@tonic-gate } else { 3767*0Sstevel@tonic-gate /* indicate that we are bound at user request */ 3768*0Sstevel@tonic-gate bind_cpu |= IRQ_USER_BOUND; 3769*0Sstevel@tonic-gate } 3770*0Sstevel@tonic-gate /* 3771*0Sstevel@tonic-gate * no need to check apic_cpus[].aci_status, if specific cpu is 3772*0Sstevel@tonic-gate * not up, then post_cpu_start will handle it. 3773*0Sstevel@tonic-gate */ 3774*0Sstevel@tonic-gate } else { 3775*0Sstevel@tonic-gate /* 3776*0Sstevel@tonic-gate * We change bind_cpu only for every two calls 3777*0Sstevel@tonic-gate * as most drivers still do 2 add_intrs for every 3778*0Sstevel@tonic-gate * interrupt 3779*0Sstevel@tonic-gate */ 3780*0Sstevel@tonic-gate bind_cpu = (apic_next_bind_cpu++) / 2; 3781*0Sstevel@tonic-gate if (bind_cpu >= apic_nproc) { 3782*0Sstevel@tonic-gate apic_next_bind_cpu = 1; 3783*0Sstevel@tonic-gate bind_cpu = 0; 3784*0Sstevel@tonic-gate } 3785*0Sstevel@tonic-gate } 3786*0Sstevel@tonic-gate if (drv_name != NULL) 3787*0Sstevel@tonic-gate cmn_err(CE_CONT, "!pcplusmp: %s (%s) instance %d " 3788*0Sstevel@tonic-gate "vector 0x%x ioapic 0x%x intin 0x%x is bound to cpu %d\n", 3789*0Sstevel@tonic-gate name, drv_name, instance, 3790*0Sstevel@tonic-gate irq, ioapicid, intin, bind_cpu & ~IRQ_USER_BOUND); 3791*0Sstevel@tonic-gate else 3792*0Sstevel@tonic-gate cmn_err(CE_CONT, "!pcplusmp: " 3793*0Sstevel@tonic-gate "vector 0x%x ioapic 0x%x intin 0x%x is bound to cpu %d\n", 3794*0Sstevel@tonic-gate irq, ioapicid, intin, bind_cpu & ~IRQ_USER_BOUND); 3795*0Sstevel@tonic-gate 3796*0Sstevel@tonic-gate return ((uchar_t)bind_cpu); 3797*0Sstevel@tonic-gate } 3798*0Sstevel@tonic-gate 3799*0Sstevel@tonic-gate static struct apic_io_intr * 3800*0Sstevel@tonic-gate apic_find_io_intr_w_busid(int irqno, int busid) 3801*0Sstevel@tonic-gate { 3802*0Sstevel@tonic-gate struct apic_io_intr *intrp; 3803*0Sstevel@tonic-gate 3804*0Sstevel@tonic-gate /* 3805*0Sstevel@tonic-gate * It can have more than 1 entry with same source bus IRQ, 3806*0Sstevel@tonic-gate * but unique with the source bus id 3807*0Sstevel@tonic-gate */ 3808*0Sstevel@tonic-gate intrp = apic_io_intrp; 3809*0Sstevel@tonic-gate if (intrp != NULL) { 3810*0Sstevel@tonic-gate while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 3811*0Sstevel@tonic-gate if (intrp->intr_irq == irqno && 3812*0Sstevel@tonic-gate intrp->intr_busid == busid && 3813*0Sstevel@tonic-gate intrp->intr_type == IO_INTR_INT) 3814*0Sstevel@tonic-gate return (intrp); 3815*0Sstevel@tonic-gate intrp++; 3816*0Sstevel@tonic-gate } 3817*0Sstevel@tonic-gate } 3818*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_NOTE, "Did not find io intr for irqno:" 3819*0Sstevel@tonic-gate "busid %x:%x\n", irqno, busid)); 3820*0Sstevel@tonic-gate return ((struct apic_io_intr *)NULL); 3821*0Sstevel@tonic-gate } 3822*0Sstevel@tonic-gate 3823*0Sstevel@tonic-gate 3824*0Sstevel@tonic-gate struct mps_bus_info { 3825*0Sstevel@tonic-gate char *bus_name; 3826*0Sstevel@tonic-gate int bus_id; 3827*0Sstevel@tonic-gate } bus_info_array[] = { 3828*0Sstevel@tonic-gate "ISA ", BUS_ISA, 3829*0Sstevel@tonic-gate "PCI ", BUS_PCI, 3830*0Sstevel@tonic-gate "EISA ", BUS_EISA, 3831*0Sstevel@tonic-gate "XPRESS", BUS_XPRESS, 3832*0Sstevel@tonic-gate "PCMCIA", BUS_PCMCIA, 3833*0Sstevel@tonic-gate "VL ", BUS_VL, 3834*0Sstevel@tonic-gate "CBUS ", BUS_CBUS, 3835*0Sstevel@tonic-gate "CBUSII", BUS_CBUSII, 3836*0Sstevel@tonic-gate "FUTURE", BUS_FUTURE, 3837*0Sstevel@tonic-gate "INTERN", BUS_INTERN, 3838*0Sstevel@tonic-gate "MBI ", BUS_MBI, 3839*0Sstevel@tonic-gate "MBII ", BUS_MBII, 3840*0Sstevel@tonic-gate "MPI ", BUS_MPI, 3841*0Sstevel@tonic-gate "MPSA ", BUS_MPSA, 3842*0Sstevel@tonic-gate "NUBUS ", BUS_NUBUS, 3843*0Sstevel@tonic-gate "TC ", BUS_TC, 3844*0Sstevel@tonic-gate "VME ", BUS_VME 3845*0Sstevel@tonic-gate }; 3846*0Sstevel@tonic-gate 3847*0Sstevel@tonic-gate static int 3848*0Sstevel@tonic-gate apic_find_bus_type(char *bus) 3849*0Sstevel@tonic-gate { 3850*0Sstevel@tonic-gate int i = 0; 3851*0Sstevel@tonic-gate 3852*0Sstevel@tonic-gate for (; i < sizeof (bus_info_array)/sizeof (struct mps_bus_info); i++) 3853*0Sstevel@tonic-gate if (strncmp(bus, bus_info_array[i].bus_name, 3854*0Sstevel@tonic-gate strlen(bus_info_array[i].bus_name)) == 0) 3855*0Sstevel@tonic-gate return (bus_info_array[i].bus_id); 3856*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus type for bus %s", bus)); 3857*0Sstevel@tonic-gate return (0); 3858*0Sstevel@tonic-gate } 3859*0Sstevel@tonic-gate 3860*0Sstevel@tonic-gate static int 3861*0Sstevel@tonic-gate apic_find_bus(int busid) 3862*0Sstevel@tonic-gate { 3863*0Sstevel@tonic-gate struct apic_bus *busp; 3864*0Sstevel@tonic-gate 3865*0Sstevel@tonic-gate busp = apic_busp; 3866*0Sstevel@tonic-gate while (busp->bus_entry == APIC_BUS_ENTRY) { 3867*0Sstevel@tonic-gate if (busp->bus_id == busid) 3868*0Sstevel@tonic-gate return (apic_find_bus_type((char *)&busp->bus_str1)); 3869*0Sstevel@tonic-gate busp++; 3870*0Sstevel@tonic-gate } 3871*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus for bus id %x", busid)); 3872*0Sstevel@tonic-gate return (0); 3873*0Sstevel@tonic-gate } 3874*0Sstevel@tonic-gate 3875*0Sstevel@tonic-gate static int 3876*0Sstevel@tonic-gate apic_find_bus_id(int bustype) 3877*0Sstevel@tonic-gate { 3878*0Sstevel@tonic-gate struct apic_bus *busp; 3879*0Sstevel@tonic-gate 3880*0Sstevel@tonic-gate busp = apic_busp; 3881*0Sstevel@tonic-gate while (busp->bus_entry == APIC_BUS_ENTRY) { 3882*0Sstevel@tonic-gate if (apic_find_bus_type((char *)&busp->bus_str1) == bustype) 3883*0Sstevel@tonic-gate return (busp->bus_id); 3884*0Sstevel@tonic-gate busp++; 3885*0Sstevel@tonic-gate } 3886*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus id for bustype %x", 3887*0Sstevel@tonic-gate bustype)); 3888*0Sstevel@tonic-gate return (-1); 3889*0Sstevel@tonic-gate } 3890*0Sstevel@tonic-gate 3891*0Sstevel@tonic-gate /* 3892*0Sstevel@tonic-gate * Check if a particular irq need to be reserved for any io_intr 3893*0Sstevel@tonic-gate */ 3894*0Sstevel@tonic-gate static struct apic_io_intr * 3895*0Sstevel@tonic-gate apic_find_io_intr(int irqno) 3896*0Sstevel@tonic-gate { 3897*0Sstevel@tonic-gate struct apic_io_intr *intrp; 3898*0Sstevel@tonic-gate 3899*0Sstevel@tonic-gate intrp = apic_io_intrp; 3900*0Sstevel@tonic-gate if (intrp != NULL) { 3901*0Sstevel@tonic-gate while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 3902*0Sstevel@tonic-gate if (intrp->intr_irq == irqno && 3903*0Sstevel@tonic-gate intrp->intr_type == IO_INTR_INT) 3904*0Sstevel@tonic-gate return (intrp); 3905*0Sstevel@tonic-gate intrp++; 3906*0Sstevel@tonic-gate } 3907*0Sstevel@tonic-gate } 3908*0Sstevel@tonic-gate return ((struct apic_io_intr *)NULL); 3909*0Sstevel@tonic-gate } 3910*0Sstevel@tonic-gate 3911*0Sstevel@tonic-gate /* 3912*0Sstevel@tonic-gate * Check if the given ioapicindex intin combination has already been assigned 3913*0Sstevel@tonic-gate * an irq. If so return irqno. Else -1 3914*0Sstevel@tonic-gate */ 3915*0Sstevel@tonic-gate static int 3916*0Sstevel@tonic-gate apic_find_intin(uchar_t ioapic, uchar_t intin) 3917*0Sstevel@tonic-gate { 3918*0Sstevel@tonic-gate apic_irq_t *irqptr; 3919*0Sstevel@tonic-gate int i; 3920*0Sstevel@tonic-gate 3921*0Sstevel@tonic-gate /* find ioapic and intin in the apic_irq_table[] and return the index */ 3922*0Sstevel@tonic-gate for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 3923*0Sstevel@tonic-gate irqptr = apic_irq_table[i]; 3924*0Sstevel@tonic-gate while (irqptr) { 3925*0Sstevel@tonic-gate if ((irqptr->airq_mps_intr_index >= 0) && 3926*0Sstevel@tonic-gate (irqptr->airq_intin_no == intin) && 3927*0Sstevel@tonic-gate (irqptr->airq_ioapicindex == ioapic)) { 3928*0Sstevel@tonic-gate APIC_VERBOSE_IOAPIC((CE_NOTE, "!Found irq " 3929*0Sstevel@tonic-gate "entry for ioapic:intin %x:%x " 3930*0Sstevel@tonic-gate "shared interrupts ?", ioapic, intin)); 3931*0Sstevel@tonic-gate return (i); 3932*0Sstevel@tonic-gate } 3933*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 3934*0Sstevel@tonic-gate } 3935*0Sstevel@tonic-gate } 3936*0Sstevel@tonic-gate return (-1); 3937*0Sstevel@tonic-gate } 3938*0Sstevel@tonic-gate 3939*0Sstevel@tonic-gate int 3940*0Sstevel@tonic-gate apic_allocate_irq(int irq) 3941*0Sstevel@tonic-gate { 3942*0Sstevel@tonic-gate int freeirq, i; 3943*0Sstevel@tonic-gate 3944*0Sstevel@tonic-gate if ((freeirq = apic_find_free_irq(irq, (APIC_RESV_IRQ - 1))) == -1) 3945*0Sstevel@tonic-gate if ((freeirq = apic_find_free_irq(APIC_FIRST_FREE_IRQ, 3946*0Sstevel@tonic-gate (irq - 1))) == -1) { 3947*0Sstevel@tonic-gate /* 3948*0Sstevel@tonic-gate * if BIOS really defines every single irq in the mps 3949*0Sstevel@tonic-gate * table, then don't worry about conflicting with 3950*0Sstevel@tonic-gate * them, just use any free slot in apic_irq_table 3951*0Sstevel@tonic-gate */ 3952*0Sstevel@tonic-gate for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) { 3953*0Sstevel@tonic-gate if ((apic_irq_table[i] == NULL) || 3954*0Sstevel@tonic-gate apic_irq_table[i]->airq_mps_intr_index == 3955*0Sstevel@tonic-gate FREE_INDEX) { 3956*0Sstevel@tonic-gate freeirq = i; 3957*0Sstevel@tonic-gate break; 3958*0Sstevel@tonic-gate } 3959*0Sstevel@tonic-gate } 3960*0Sstevel@tonic-gate if (freeirq == -1) { 3961*0Sstevel@tonic-gate /* This shouldn't happen, but just in case */ 3962*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: NO available IRQ"); 3963*0Sstevel@tonic-gate return (-1); 3964*0Sstevel@tonic-gate } 3965*0Sstevel@tonic-gate } 3966*0Sstevel@tonic-gate if (apic_irq_table[freeirq] == NULL) { 3967*0Sstevel@tonic-gate apic_irq_table[freeirq] = 3968*0Sstevel@tonic-gate kmem_zalloc(sizeof (apic_irq_t), KM_NOSLEEP); 3969*0Sstevel@tonic-gate if (apic_irq_table[freeirq] == NULL) { 3970*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: NO memory to allocate IRQ"); 3971*0Sstevel@tonic-gate return (-1); 3972*0Sstevel@tonic-gate } 3973*0Sstevel@tonic-gate apic_irq_table[freeirq]->airq_mps_intr_index = FREE_INDEX; 3974*0Sstevel@tonic-gate } 3975*0Sstevel@tonic-gate return (freeirq); 3976*0Sstevel@tonic-gate } 3977*0Sstevel@tonic-gate 3978*0Sstevel@tonic-gate static int 3979*0Sstevel@tonic-gate apic_find_free_irq(int start, int end) 3980*0Sstevel@tonic-gate { 3981*0Sstevel@tonic-gate int i; 3982*0Sstevel@tonic-gate 3983*0Sstevel@tonic-gate for (i = start; i <= end; i++) 3984*0Sstevel@tonic-gate /* Check if any I/O entry needs this IRQ */ 3985*0Sstevel@tonic-gate if (apic_find_io_intr(i) == NULL) { 3986*0Sstevel@tonic-gate /* Then see if it is free */ 3987*0Sstevel@tonic-gate if ((apic_irq_table[i] == NULL) || 3988*0Sstevel@tonic-gate (apic_irq_table[i]->airq_mps_intr_index == 3989*0Sstevel@tonic-gate FREE_INDEX)) { 3990*0Sstevel@tonic-gate return (i); 3991*0Sstevel@tonic-gate } 3992*0Sstevel@tonic-gate } 3993*0Sstevel@tonic-gate return (-1); 3994*0Sstevel@tonic-gate } 3995*0Sstevel@tonic-gate 3996*0Sstevel@tonic-gate /* 3997*0Sstevel@tonic-gate * Allocate a free vector for irq at ipl. Takes care of merging of multiple 3998*0Sstevel@tonic-gate * IPLs into a single APIC level as well as stretching some IPLs onto multiple 3999*0Sstevel@tonic-gate * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority 4000*0Sstevel@tonic-gate * requests and allocated only when pri is set. 4001*0Sstevel@tonic-gate */ 4002*0Sstevel@tonic-gate static uchar_t 4003*0Sstevel@tonic-gate apic_allocate_vector(int ipl, int irq, int pri) 4004*0Sstevel@tonic-gate { 4005*0Sstevel@tonic-gate int lowest, highest, i; 4006*0Sstevel@tonic-gate 4007*0Sstevel@tonic-gate highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK; 4008*0Sstevel@tonic-gate lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL; 4009*0Sstevel@tonic-gate 4010*0Sstevel@tonic-gate if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */ 4011*0Sstevel@tonic-gate lowest -= APIC_VECTOR_PER_IPL; 4012*0Sstevel@tonic-gate 4013*0Sstevel@tonic-gate #ifdef DEBUG 4014*0Sstevel@tonic-gate if (apic_restrict_vector) /* for testing shared interrupt logic */ 4015*0Sstevel@tonic-gate highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS; 4016*0Sstevel@tonic-gate #endif /* DEBUG */ 4017*0Sstevel@tonic-gate if (pri == 0) 4018*0Sstevel@tonic-gate highest -= APIC_HI_PRI_VECTS; 4019*0Sstevel@tonic-gate 4020*0Sstevel@tonic-gate for (i = lowest; i < highest; i++) { 4021*0Sstevel@tonic-gate if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR) || 4022*0Sstevel@tonic-gate (i == T_SYSCALLINT) || (i == T_DTRACE_PROBE) || 4023*0Sstevel@tonic-gate (i == T_DTRACE_RET)) 4024*0Sstevel@tonic-gate continue; 4025*0Sstevel@tonic-gate if (apic_vector_to_irq[i] == APIC_RESV_IRQ) { 4026*0Sstevel@tonic-gate apic_vector_to_irq[i] = (uchar_t)irq; 4027*0Sstevel@tonic-gate return (i); 4028*0Sstevel@tonic-gate } 4029*0Sstevel@tonic-gate } 4030*0Sstevel@tonic-gate 4031*0Sstevel@tonic-gate return (0); 4032*0Sstevel@tonic-gate } 4033*0Sstevel@tonic-gate 4034*0Sstevel@tonic-gate static void 4035*0Sstevel@tonic-gate apic_modify_vector(uchar_t vector, int irq) 4036*0Sstevel@tonic-gate { 4037*0Sstevel@tonic-gate apic_vector_to_irq[vector] = (uchar_t)irq; 4038*0Sstevel@tonic-gate } 4039*0Sstevel@tonic-gate 4040*0Sstevel@tonic-gate /* 4041*0Sstevel@tonic-gate * Mark vector as being in the process of being deleted. Interrupts 4042*0Sstevel@tonic-gate * may still come in on some CPU. The moment an interrupt comes with 4043*0Sstevel@tonic-gate * the new vector, we know we can free the old one. Called only from 4044*0Sstevel@tonic-gate * addspl and delspl with interrupts disabled. Because an interrupt 4045*0Sstevel@tonic-gate * can be shared, but no interrupt from either device may come in, 4046*0Sstevel@tonic-gate * we also use a timeout mechanism, which we arbitrarily set to 4047*0Sstevel@tonic-gate * apic_revector_timeout microseconds. 4048*0Sstevel@tonic-gate */ 4049*0Sstevel@tonic-gate static void 4050*0Sstevel@tonic-gate apic_mark_vector(uchar_t oldvector, uchar_t newvector) 4051*0Sstevel@tonic-gate { 4052*0Sstevel@tonic-gate int iflag = intr_clear(); 4053*0Sstevel@tonic-gate lock_set(&apic_revector_lock); 4054*0Sstevel@tonic-gate if (!apic_oldvec_to_newvec) { 4055*0Sstevel@tonic-gate apic_oldvec_to_newvec = 4056*0Sstevel@tonic-gate kmem_zalloc(sizeof (newvector) * APIC_MAX_VECTOR * 2, 4057*0Sstevel@tonic-gate KM_NOSLEEP); 4058*0Sstevel@tonic-gate 4059*0Sstevel@tonic-gate if (!apic_oldvec_to_newvec) { 4060*0Sstevel@tonic-gate /* 4061*0Sstevel@tonic-gate * This failure is not catastrophic. 4062*0Sstevel@tonic-gate * But, the oldvec will never be freed. 4063*0Sstevel@tonic-gate */ 4064*0Sstevel@tonic-gate apic_error |= APIC_ERR_MARK_VECTOR_FAIL; 4065*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4066*0Sstevel@tonic-gate intr_restore(iflag); 4067*0Sstevel@tonic-gate return; 4068*0Sstevel@tonic-gate } 4069*0Sstevel@tonic-gate apic_newvec_to_oldvec = &apic_oldvec_to_newvec[APIC_MAX_VECTOR]; 4070*0Sstevel@tonic-gate } 4071*0Sstevel@tonic-gate 4072*0Sstevel@tonic-gate /* See if we already did this for drivers which do double addintrs */ 4073*0Sstevel@tonic-gate if (apic_oldvec_to_newvec[oldvector] != newvector) { 4074*0Sstevel@tonic-gate apic_oldvec_to_newvec[oldvector] = newvector; 4075*0Sstevel@tonic-gate apic_newvec_to_oldvec[newvector] = oldvector; 4076*0Sstevel@tonic-gate apic_revector_pending++; 4077*0Sstevel@tonic-gate } 4078*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4079*0Sstevel@tonic-gate intr_restore(iflag); 4080*0Sstevel@tonic-gate (void) timeout(apic_xlate_vector_free_timeout_handler, 4081*0Sstevel@tonic-gate (void *)(uintptr_t)oldvector, drv_usectohz(apic_revector_timeout)); 4082*0Sstevel@tonic-gate } 4083*0Sstevel@tonic-gate 4084*0Sstevel@tonic-gate /* 4085*0Sstevel@tonic-gate * xlate_vector is called from intr_enter if revector_pending is set. 4086*0Sstevel@tonic-gate * It will xlate it if needed and mark the old vector as free. 4087*0Sstevel@tonic-gate */ 4088*0Sstevel@tonic-gate static uchar_t 4089*0Sstevel@tonic-gate apic_xlate_vector(uchar_t vector) 4090*0Sstevel@tonic-gate { 4091*0Sstevel@tonic-gate uchar_t newvector, oldvector = 0; 4092*0Sstevel@tonic-gate 4093*0Sstevel@tonic-gate lock_set(&apic_revector_lock); 4094*0Sstevel@tonic-gate /* Do we really need to do this ? */ 4095*0Sstevel@tonic-gate if (!apic_revector_pending) { 4096*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4097*0Sstevel@tonic-gate return (vector); 4098*0Sstevel@tonic-gate } 4099*0Sstevel@tonic-gate if ((newvector = apic_oldvec_to_newvec[vector]) != 0) 4100*0Sstevel@tonic-gate oldvector = vector; 4101*0Sstevel@tonic-gate else { 4102*0Sstevel@tonic-gate /* 4103*0Sstevel@tonic-gate * The incoming vector is new . See if a stale entry is 4104*0Sstevel@tonic-gate * remaining 4105*0Sstevel@tonic-gate */ 4106*0Sstevel@tonic-gate if ((oldvector = apic_newvec_to_oldvec[vector]) != 0) 4107*0Sstevel@tonic-gate newvector = vector; 4108*0Sstevel@tonic-gate } 4109*0Sstevel@tonic-gate 4110*0Sstevel@tonic-gate if (oldvector) { 4111*0Sstevel@tonic-gate apic_revector_pending--; 4112*0Sstevel@tonic-gate apic_oldvec_to_newvec[oldvector] = 0; 4113*0Sstevel@tonic-gate apic_newvec_to_oldvec[newvector] = 0; 4114*0Sstevel@tonic-gate apic_free_vector(oldvector); 4115*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4116*0Sstevel@tonic-gate /* There could have been more than one reprogramming! */ 4117*0Sstevel@tonic-gate return (apic_xlate_vector(newvector)); 4118*0Sstevel@tonic-gate } 4119*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4120*0Sstevel@tonic-gate return (vector); 4121*0Sstevel@tonic-gate } 4122*0Sstevel@tonic-gate 4123*0Sstevel@tonic-gate void 4124*0Sstevel@tonic-gate apic_xlate_vector_free_timeout_handler(void *arg) 4125*0Sstevel@tonic-gate { 4126*0Sstevel@tonic-gate int iflag; 4127*0Sstevel@tonic-gate uchar_t oldvector, newvector; 4128*0Sstevel@tonic-gate 4129*0Sstevel@tonic-gate oldvector = (uchar_t)(uintptr_t)arg; 4130*0Sstevel@tonic-gate iflag = intr_clear(); 4131*0Sstevel@tonic-gate lock_set(&apic_revector_lock); 4132*0Sstevel@tonic-gate if ((newvector = apic_oldvec_to_newvec[oldvector]) != 0) { 4133*0Sstevel@tonic-gate apic_free_vector(oldvector); 4134*0Sstevel@tonic-gate apic_oldvec_to_newvec[oldvector] = 0; 4135*0Sstevel@tonic-gate apic_newvec_to_oldvec[newvector] = 0; 4136*0Sstevel@tonic-gate apic_revector_pending--; 4137*0Sstevel@tonic-gate } 4138*0Sstevel@tonic-gate 4139*0Sstevel@tonic-gate lock_clear(&apic_revector_lock); 4140*0Sstevel@tonic-gate intr_restore(iflag); 4141*0Sstevel@tonic-gate } 4142*0Sstevel@tonic-gate 4143*0Sstevel@tonic-gate 4144*0Sstevel@tonic-gate /* Mark vector as not being used by any irq */ 4145*0Sstevel@tonic-gate static void 4146*0Sstevel@tonic-gate apic_free_vector(uchar_t vector) 4147*0Sstevel@tonic-gate { 4148*0Sstevel@tonic-gate apic_vector_to_irq[vector] = APIC_RESV_IRQ; 4149*0Sstevel@tonic-gate } 4150*0Sstevel@tonic-gate 4151*0Sstevel@tonic-gate /* 4152*0Sstevel@tonic-gate * compute the polarity, trigger mode and vector for programming into 4153*0Sstevel@tonic-gate * the I/O apic and record in airq_rdt_entry. 4154*0Sstevel@tonic-gate */ 4155*0Sstevel@tonic-gate static void 4156*0Sstevel@tonic-gate apic_record_rdt_entry(apic_irq_t *irqptr, int irq) 4157*0Sstevel@tonic-gate { 4158*0Sstevel@tonic-gate int ioapicindex, bus_type, vector; 4159*0Sstevel@tonic-gate short intr_index; 4160*0Sstevel@tonic-gate uint_t level, po, io_po; 4161*0Sstevel@tonic-gate struct apic_io_intr *iointrp; 4162*0Sstevel@tonic-gate 4163*0Sstevel@tonic-gate intr_index = irqptr->airq_mps_intr_index; 4164*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_record_rdt_entry: intr_index=%d " 4165*0Sstevel@tonic-gate "irq = 0x%x dip = 0x%p vector = 0x%x\n", intr_index, irq, 4166*0Sstevel@tonic-gate (void *)irqptr->airq_dip, irqptr->airq_vector)); 4167*0Sstevel@tonic-gate 4168*0Sstevel@tonic-gate if (intr_index == RESERVE_INDEX) { 4169*0Sstevel@tonic-gate apic_error |= APIC_ERR_INVALID_INDEX; 4170*0Sstevel@tonic-gate return; 4171*0Sstevel@tonic-gate } else if (APIC_IS_MSI_OR_MSIX_INDEX(intr_index)) { 4172*0Sstevel@tonic-gate return; 4173*0Sstevel@tonic-gate } 4174*0Sstevel@tonic-gate 4175*0Sstevel@tonic-gate vector = irqptr->airq_vector; 4176*0Sstevel@tonic-gate ioapicindex = irqptr->airq_ioapicindex; 4177*0Sstevel@tonic-gate /* Assume edge triggered by default */ 4178*0Sstevel@tonic-gate level = 0; 4179*0Sstevel@tonic-gate /* Assume active high by default */ 4180*0Sstevel@tonic-gate po = 0; 4181*0Sstevel@tonic-gate 4182*0Sstevel@tonic-gate if (intr_index == DEFAULT_INDEX || intr_index == FREE_INDEX) { 4183*0Sstevel@tonic-gate ASSERT(irq < 16); 4184*0Sstevel@tonic-gate if (eisa_level_intr_mask & (1 << irq)) 4185*0Sstevel@tonic-gate level = AV_LEVEL; 4186*0Sstevel@tonic-gate if (intr_index == FREE_INDEX && apic_defconf == 0) 4187*0Sstevel@tonic-gate apic_error |= APIC_ERR_INVALID_INDEX; 4188*0Sstevel@tonic-gate } else if (intr_index == ACPI_INDEX || intr_index == SCI_INDEX) { 4189*0Sstevel@tonic-gate bus_type = irqptr->airq_iflag.bustype; 4190*0Sstevel@tonic-gate if (irqptr->airq_iflag.intr_el == INTR_EL_CONFORM) { 4191*0Sstevel@tonic-gate if (bus_type == BUS_PCI) 4192*0Sstevel@tonic-gate level = AV_LEVEL; 4193*0Sstevel@tonic-gate } else 4194*0Sstevel@tonic-gate level = (irqptr->airq_iflag.intr_el == INTR_EL_LEVEL) ? 4195*0Sstevel@tonic-gate AV_LEVEL : 0; 4196*0Sstevel@tonic-gate if (level && 4197*0Sstevel@tonic-gate ((irqptr->airq_iflag.intr_po == INTR_PO_ACTIVE_LOW) || 4198*0Sstevel@tonic-gate (irqptr->airq_iflag.intr_po == INTR_PO_CONFORM && 4199*0Sstevel@tonic-gate bus_type == BUS_PCI))) 4200*0Sstevel@tonic-gate po = AV_ACTIVE_LOW; 4201*0Sstevel@tonic-gate } else { 4202*0Sstevel@tonic-gate iointrp = apic_io_intrp + intr_index; 4203*0Sstevel@tonic-gate bus_type = apic_find_bus(iointrp->intr_busid); 4204*0Sstevel@tonic-gate if (iointrp->intr_el == INTR_EL_CONFORM) { 4205*0Sstevel@tonic-gate if ((irq < 16) && (eisa_level_intr_mask & (1 << irq))) 4206*0Sstevel@tonic-gate level = AV_LEVEL; 4207*0Sstevel@tonic-gate else if (bus_type == BUS_PCI) 4208*0Sstevel@tonic-gate level = AV_LEVEL; 4209*0Sstevel@tonic-gate } else 4210*0Sstevel@tonic-gate level = (iointrp->intr_el == INTR_EL_LEVEL) ? 4211*0Sstevel@tonic-gate AV_LEVEL : 0; 4212*0Sstevel@tonic-gate if (level && ((iointrp->intr_po == INTR_PO_ACTIVE_LOW) || 4213*0Sstevel@tonic-gate (iointrp->intr_po == INTR_PO_CONFORM && 4214*0Sstevel@tonic-gate bus_type == BUS_PCI))) 4215*0Sstevel@tonic-gate po = AV_ACTIVE_LOW; 4216*0Sstevel@tonic-gate } 4217*0Sstevel@tonic-gate if (level) 4218*0Sstevel@tonic-gate apic_level_intr[irq] = 1; 4219*0Sstevel@tonic-gate /* 4220*0Sstevel@tonic-gate * The 82489DX External APIC cannot do active low polarity interrupts. 4221*0Sstevel@tonic-gate */ 4222*0Sstevel@tonic-gate if (po && (apic_io_ver[ioapicindex] != IOAPIC_VER_82489DX)) 4223*0Sstevel@tonic-gate io_po = po; 4224*0Sstevel@tonic-gate else 4225*0Sstevel@tonic-gate io_po = 0; 4226*0Sstevel@tonic-gate 4227*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) 4228*0Sstevel@tonic-gate printf("setio: ioapic=%x intin=%x level=%x po=%x vector=%x\n", 4229*0Sstevel@tonic-gate ioapicindex, irqptr->airq_intin_no, level, io_po, vector); 4230*0Sstevel@tonic-gate 4231*0Sstevel@tonic-gate irqptr->airq_rdt_entry = level|io_po|vector; 4232*0Sstevel@tonic-gate } 4233*0Sstevel@tonic-gate 4234*0Sstevel@tonic-gate /* 4235*0Sstevel@tonic-gate * Call rebind to do the actual programming. 4236*0Sstevel@tonic-gate */ 4237*0Sstevel@tonic-gate static int 4238*0Sstevel@tonic-gate apic_setup_io_intr(apic_irq_t *irqptr, int irq) 4239*0Sstevel@tonic-gate { 4240*0Sstevel@tonic-gate int rv; 4241*0Sstevel@tonic-gate 4242*0Sstevel@tonic-gate if (rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, 1, 4243*0Sstevel@tonic-gate IMMEDIATE)) 4244*0Sstevel@tonic-gate /* CPU is not up or interrupt is disabled. Fall back to 0 */ 4245*0Sstevel@tonic-gate rv = apic_rebind(irqptr, 0, 1, IMMEDIATE); 4246*0Sstevel@tonic-gate 4247*0Sstevel@tonic-gate return (rv); 4248*0Sstevel@tonic-gate } 4249*0Sstevel@tonic-gate 4250*0Sstevel@tonic-gate /* 4251*0Sstevel@tonic-gate * Deferred reprogramming: Call apic_rebind to do the real work. 4252*0Sstevel@tonic-gate */ 4253*0Sstevel@tonic-gate static int 4254*0Sstevel@tonic-gate apic_setup_io_intr_deferred(apic_irq_t *irqptr, int irq) 4255*0Sstevel@tonic-gate { 4256*0Sstevel@tonic-gate int rv; 4257*0Sstevel@tonic-gate 4258*0Sstevel@tonic-gate if (rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, 1, 4259*0Sstevel@tonic-gate DEFERRED)) 4260*0Sstevel@tonic-gate /* CPU is not up or interrupt is disabled. Fall back to 0 */ 4261*0Sstevel@tonic-gate rv = apic_rebind(irqptr, 0, 1, DEFERRED); 4262*0Sstevel@tonic-gate 4263*0Sstevel@tonic-gate return (rv); 4264*0Sstevel@tonic-gate } 4265*0Sstevel@tonic-gate 4266*0Sstevel@tonic-gate /* 4267*0Sstevel@tonic-gate * Bind interrupt corresponding to irq_ptr to bind_cpu. acquire_lock 4268*0Sstevel@tonic-gate * if false (0) means lock is already held (e.g: in rebind_all). 4269*0Sstevel@tonic-gate */ 4270*0Sstevel@tonic-gate static int 4271*0Sstevel@tonic-gate apic_rebind(apic_irq_t *irq_ptr, int bind_cpu, int acquire_lock, int when) 4272*0Sstevel@tonic-gate { 4273*0Sstevel@tonic-gate int intin_no; 4274*0Sstevel@tonic-gate volatile int32_t *ioapic; 4275*0Sstevel@tonic-gate uchar_t airq_temp_cpu; 4276*0Sstevel@tonic-gate apic_cpus_info_t *cpu_infop; 4277*0Sstevel@tonic-gate int iflag; 4278*0Sstevel@tonic-gate int which_irq = apic_vector_to_irq[irq_ptr->airq_vector]; 4279*0Sstevel@tonic-gate 4280*0Sstevel@tonic-gate intin_no = irq_ptr->airq_intin_no; 4281*0Sstevel@tonic-gate ioapic = apicioadr[irq_ptr->airq_ioapicindex]; 4282*0Sstevel@tonic-gate airq_temp_cpu = irq_ptr->airq_temp_cpu; 4283*0Sstevel@tonic-gate if (airq_temp_cpu != IRQ_UNINIT && airq_temp_cpu != IRQ_UNBOUND) { 4284*0Sstevel@tonic-gate if (airq_temp_cpu & IRQ_USER_BOUND) 4285*0Sstevel@tonic-gate /* Mask off high bit so it can be used as array index */ 4286*0Sstevel@tonic-gate airq_temp_cpu &= ~IRQ_USER_BOUND; 4287*0Sstevel@tonic-gate 4288*0Sstevel@tonic-gate ASSERT(airq_temp_cpu < apic_nproc); 4289*0Sstevel@tonic-gate } 4290*0Sstevel@tonic-gate 4291*0Sstevel@tonic-gate iflag = intr_clear(); 4292*0Sstevel@tonic-gate 4293*0Sstevel@tonic-gate if (acquire_lock) 4294*0Sstevel@tonic-gate lock_set(&apic_ioapic_lock); 4295*0Sstevel@tonic-gate 4296*0Sstevel@tonic-gate /* 4297*0Sstevel@tonic-gate * Can't bind to a CPU that's not online: 4298*0Sstevel@tonic-gate */ 4299*0Sstevel@tonic-gate cpu_infop = &apic_cpus[bind_cpu & ~IRQ_USER_BOUND]; 4300*0Sstevel@tonic-gate if (!(cpu_infop->aci_status & APIC_CPU_INTR_ENABLE)) { 4301*0Sstevel@tonic-gate 4302*0Sstevel@tonic-gate if (acquire_lock) 4303*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4304*0Sstevel@tonic-gate 4305*0Sstevel@tonic-gate intr_restore(iflag); 4306*0Sstevel@tonic-gate return (1); 4307*0Sstevel@tonic-gate } 4308*0Sstevel@tonic-gate 4309*0Sstevel@tonic-gate /* 4310*0Sstevel@tonic-gate * If this is a deferred reprogramming attempt, ensure we have 4311*0Sstevel@tonic-gate * not been passed stale data: 4312*0Sstevel@tonic-gate */ 4313*0Sstevel@tonic-gate if ((when == DEFERRED) && 4314*0Sstevel@tonic-gate (apic_reprogram_info[which_irq].valid == 0)) { 4315*0Sstevel@tonic-gate /* stale info, so just return */ 4316*0Sstevel@tonic-gate if (acquire_lock) 4317*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4318*0Sstevel@tonic-gate 4319*0Sstevel@tonic-gate intr_restore(iflag); 4320*0Sstevel@tonic-gate return (0); 4321*0Sstevel@tonic-gate } 4322*0Sstevel@tonic-gate 4323*0Sstevel@tonic-gate /* 4324*0Sstevel@tonic-gate * If this interrupt has been delivered to a CPU and that CPU 4325*0Sstevel@tonic-gate * has not handled it yet, we cannot reprogram the IOAPIC now: 4326*0Sstevel@tonic-gate */ 4327*0Sstevel@tonic-gate if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index) && 4328*0Sstevel@tonic-gate apic_check_stuck_interrupt(irq_ptr, airq_temp_cpu, bind_cpu, 4329*0Sstevel@tonic-gate ioapic, intin_no, which_irq) != 0) { 4330*0Sstevel@tonic-gate 4331*0Sstevel@tonic-gate if (acquire_lock) 4332*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4333*0Sstevel@tonic-gate 4334*0Sstevel@tonic-gate intr_restore(iflag); 4335*0Sstevel@tonic-gate return (0); 4336*0Sstevel@tonic-gate } 4337*0Sstevel@tonic-gate 4338*0Sstevel@tonic-gate /* 4339*0Sstevel@tonic-gate * NOTE: We do not unmask the RDT here, as an interrupt MAY still 4340*0Sstevel@tonic-gate * come in before we have a chance to reprogram it below. The 4341*0Sstevel@tonic-gate * reprogramming below will simultaneously change and unmask the 4342*0Sstevel@tonic-gate * RDT entry. 4343*0Sstevel@tonic-gate */ 4344*0Sstevel@tonic-gate 4345*0Sstevel@tonic-gate if ((uchar_t)bind_cpu == IRQ_UNBOUND) { 4346*0Sstevel@tonic-gate /* Write the RDT entry -- no specific CPU binding */ 4347*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapic, intin_no, AV_TOALL); 4348*0Sstevel@tonic-gate 4349*0Sstevel@tonic-gate if (airq_temp_cpu != IRQ_UNINIT && airq_temp_cpu != IRQ_UNBOUND) 4350*0Sstevel@tonic-gate apic_cpus[airq_temp_cpu].aci_temp_bound--; 4351*0Sstevel@tonic-gate 4352*0Sstevel@tonic-gate /* Write the vector, trigger, and polarity portion of the RDT */ 4353*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4354*0Sstevel@tonic-gate AV_LDEST | AV_LOPRI | irq_ptr->airq_rdt_entry); 4355*0Sstevel@tonic-gate if (acquire_lock) 4356*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4357*0Sstevel@tonic-gate irq_ptr->airq_temp_cpu = IRQ_UNBOUND; 4358*0Sstevel@tonic-gate intr_restore(iflag); 4359*0Sstevel@tonic-gate return (0); 4360*0Sstevel@tonic-gate } 4361*0Sstevel@tonic-gate 4362*0Sstevel@tonic-gate if (bind_cpu & IRQ_USER_BOUND) { 4363*0Sstevel@tonic-gate cpu_infop->aci_bound++; 4364*0Sstevel@tonic-gate } else { 4365*0Sstevel@tonic-gate cpu_infop->aci_temp_bound++; 4366*0Sstevel@tonic-gate } 4367*0Sstevel@tonic-gate ASSERT((bind_cpu & ~IRQ_USER_BOUND) < apic_nproc); 4368*0Sstevel@tonic-gate if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) { 4369*0Sstevel@tonic-gate /* Write the RDT entry -- bind to a specific CPU: */ 4370*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapic, intin_no, 4371*0Sstevel@tonic-gate cpu_infop->aci_local_id << APIC_ID_BIT_OFFSET); 4372*0Sstevel@tonic-gate } 4373*0Sstevel@tonic-gate if ((airq_temp_cpu != IRQ_UNBOUND) && (airq_temp_cpu != IRQ_UNINIT)) { 4374*0Sstevel@tonic-gate apic_cpus[airq_temp_cpu].aci_temp_bound--; 4375*0Sstevel@tonic-gate } 4376*0Sstevel@tonic-gate if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) { 4377*0Sstevel@tonic-gate /* Write the vector, trigger, and polarity portion of the RDT */ 4378*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4379*0Sstevel@tonic-gate AV_PDEST | AV_FIXED | irq_ptr->airq_rdt_entry); 4380*0Sstevel@tonic-gate } else { 4381*0Sstevel@tonic-gate if (irq_ptr->airq_ioapicindex == irq_ptr->airq_origirq) { 4382*0Sstevel@tonic-gate /* first one */ 4383*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_rebind: call " 4384*0Sstevel@tonic-gate "apic_pci_msi_enable_vector\n")); 4385*0Sstevel@tonic-gate if (apic_pci_msi_enable_vector(irq_ptr->airq_dip, 4386*0Sstevel@tonic-gate (irq_ptr->airq_mps_intr_index == MSI_INDEX) ? 4387*0Sstevel@tonic-gate DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX, which_irq, 4388*0Sstevel@tonic-gate irq_ptr->airq_vector, irq_ptr->airq_intin_no, 4389*0Sstevel@tonic-gate cpu_infop->aci_local_id) != PSM_SUCCESS) { 4390*0Sstevel@tonic-gate cmn_err(CE_WARN, "pcplusmp: " 4391*0Sstevel@tonic-gate "apic_pci_msi_enable_vector " 4392*0Sstevel@tonic-gate "returned PSM_FAILURE"); 4393*0Sstevel@tonic-gate } 4394*0Sstevel@tonic-gate } 4395*0Sstevel@tonic-gate if ((irq_ptr->airq_ioapicindex + irq_ptr->airq_intin_no - 1) == 4396*0Sstevel@tonic-gate irq_ptr->airq_origirq) { /* last one */ 4397*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "apic_rebind: call " 4398*0Sstevel@tonic-gate "pci_msi_enable_mode\n")); 4399*0Sstevel@tonic-gate if (pci_msi_enable_mode(irq_ptr->airq_dip, 4400*0Sstevel@tonic-gate (irq_ptr->airq_mps_intr_index == MSI_INDEX) ? 4401*0Sstevel@tonic-gate DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX, 4402*0Sstevel@tonic-gate which_irq) != DDI_SUCCESS) { 4403*0Sstevel@tonic-gate DDI_INTR_IMPLDBG((CE_CONT, "pcplusmp: " 4404*0Sstevel@tonic-gate "pci_msi_enable failed\n")); 4405*0Sstevel@tonic-gate (void) pci_msi_unconfigure(irq_ptr->airq_dip, 4406*0Sstevel@tonic-gate (irq_ptr->airq_mps_intr_index == MSI_INDEX) ? 4407*0Sstevel@tonic-gate DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX, 4408*0Sstevel@tonic-gate which_irq); 4409*0Sstevel@tonic-gate } 4410*0Sstevel@tonic-gate } 4411*0Sstevel@tonic-gate } 4412*0Sstevel@tonic-gate if (acquire_lock) 4413*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4414*0Sstevel@tonic-gate irq_ptr->airq_temp_cpu = (uchar_t)bind_cpu; 4415*0Sstevel@tonic-gate apic_redist_cpu_skip &= ~(1 << (bind_cpu & ~IRQ_USER_BOUND)); 4416*0Sstevel@tonic-gate intr_restore(iflag); 4417*0Sstevel@tonic-gate return (0); 4418*0Sstevel@tonic-gate } 4419*0Sstevel@tonic-gate 4420*0Sstevel@tonic-gate /* 4421*0Sstevel@tonic-gate * Checks to see if the IOAPIC interrupt entry specified has its Remote IRR 4422*0Sstevel@tonic-gate * bit set. Sets up a timeout to perform the reprogramming at a later time 4423*0Sstevel@tonic-gate * if it cannot wait for the Remote IRR bit to clear (or if waiting did not 4424*0Sstevel@tonic-gate * result in the bit's clearing). 4425*0Sstevel@tonic-gate * 4426*0Sstevel@tonic-gate * This function will mask the RDT entry if the Remote IRR bit is set. 4427*0Sstevel@tonic-gate * 4428*0Sstevel@tonic-gate * Returns non-zero if the caller should defer IOAPIC reprogramming. 4429*0Sstevel@tonic-gate */ 4430*0Sstevel@tonic-gate static int 4431*0Sstevel@tonic-gate apic_check_stuck_interrupt(apic_irq_t *irq_ptr, int old_bind_cpu, 4432*0Sstevel@tonic-gate int new_bind_cpu, volatile int32_t *ioapic, int intin_no, int which_irq) 4433*0Sstevel@tonic-gate { 4434*0Sstevel@tonic-gate int32_t rdt_entry; 4435*0Sstevel@tonic-gate int waited; 4436*0Sstevel@tonic-gate 4437*0Sstevel@tonic-gate /* Mask the RDT entry, but only if it's a level-triggered interrupt */ 4438*0Sstevel@tonic-gate rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 4439*0Sstevel@tonic-gate if ((rdt_entry & (AV_LEVEL|AV_MASK)) == AV_LEVEL) { 4440*0Sstevel@tonic-gate 4441*0Sstevel@tonic-gate /* Mask it */ 4442*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4443*0Sstevel@tonic-gate AV_MASK | rdt_entry); 4444*0Sstevel@tonic-gate } 4445*0Sstevel@tonic-gate 4446*0Sstevel@tonic-gate /* 4447*0Sstevel@tonic-gate * Wait for the delivery pending bit to clear. 4448*0Sstevel@tonic-gate */ 4449*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4450*0Sstevel@tonic-gate (AV_LEVEL|AV_PENDING)) == (AV_LEVEL|AV_PENDING)) { 4451*0Sstevel@tonic-gate 4452*0Sstevel@tonic-gate /* 4453*0Sstevel@tonic-gate * If we're still waiting on the delivery of this interrupt, 4454*0Sstevel@tonic-gate * continue to wait here until it is delivered (this should be 4455*0Sstevel@tonic-gate * a very small amount of time, but include a timeout just in 4456*0Sstevel@tonic-gate * case). 4457*0Sstevel@tonic-gate */ 4458*0Sstevel@tonic-gate for (waited = 0; waited < apic_max_usecs_clear_pending; 4459*0Sstevel@tonic-gate waited += APIC_USECS_PER_WAIT_INTERVAL) { 4460*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) 4461*0Sstevel@tonic-gate & AV_PENDING) == 0) { 4462*0Sstevel@tonic-gate break; 4463*0Sstevel@tonic-gate } 4464*0Sstevel@tonic-gate drv_usecwait(APIC_USECS_PER_WAIT_INTERVAL); 4465*0Sstevel@tonic-gate } 4466*0Sstevel@tonic-gate 4467*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4468*0Sstevel@tonic-gate AV_PENDING) != 0) { 4469*0Sstevel@tonic-gate cmn_err(CE_WARN, "!IOAPIC %d intin %d: Could not " 4470*0Sstevel@tonic-gate "deliver interrupt to local APIC within " 4471*0Sstevel@tonic-gate "%d usecs.", irq_ptr->airq_ioapicindex, 4472*0Sstevel@tonic-gate irq_ptr->airq_intin_no, 4473*0Sstevel@tonic-gate apic_max_usecs_clear_pending); 4474*0Sstevel@tonic-gate } 4475*0Sstevel@tonic-gate } 4476*0Sstevel@tonic-gate 4477*0Sstevel@tonic-gate /* 4478*0Sstevel@tonic-gate * If the remote IRR bit is set, then the interrupt has been sent 4479*0Sstevel@tonic-gate * to a CPU for processing. We have no choice but to wait for 4480*0Sstevel@tonic-gate * that CPU to process the interrupt, at which point the remote IRR 4481*0Sstevel@tonic-gate * bit will be cleared. 4482*0Sstevel@tonic-gate */ 4483*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4484*0Sstevel@tonic-gate (AV_LEVEL|AV_REMOTE_IRR)) == (AV_LEVEL|AV_REMOTE_IRR)) { 4485*0Sstevel@tonic-gate 4486*0Sstevel@tonic-gate /* 4487*0Sstevel@tonic-gate * If the CPU that this RDT is bound to is NOT the current 4488*0Sstevel@tonic-gate * CPU, wait until that CPU handles the interrupt and ACKs 4489*0Sstevel@tonic-gate * it. If this interrupt is not bound to any CPU (that is, 4490*0Sstevel@tonic-gate * if it's bound to the logical destination of "anyone"), it 4491*0Sstevel@tonic-gate * may have been delivered to the current CPU so handle that 4492*0Sstevel@tonic-gate * case by deferring the reprogramming (below). 4493*0Sstevel@tonic-gate */ 4494*0Sstevel@tonic-gate kpreempt_disable(); 4495*0Sstevel@tonic-gate if ((old_bind_cpu != IRQ_UNBOUND) && 4496*0Sstevel@tonic-gate (old_bind_cpu != IRQ_UNINIT) && 4497*0Sstevel@tonic-gate (old_bind_cpu != psm_get_cpu_id())) { 4498*0Sstevel@tonic-gate for (waited = 0; waited < apic_max_usecs_clear_pending; 4499*0Sstevel@tonic-gate waited += APIC_USECS_PER_WAIT_INTERVAL) { 4500*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4501*0Sstevel@tonic-gate intin_no) & AV_REMOTE_IRR) == 0) { 4502*0Sstevel@tonic-gate 4503*0Sstevel@tonic-gate /* Clear the reprogramming state: */ 4504*0Sstevel@tonic-gate lock_set(&apic_ioapic_reprogram_lock); 4505*0Sstevel@tonic-gate 4506*0Sstevel@tonic-gate apic_reprogram_info[which_irq].valid 4507*0Sstevel@tonic-gate = 0; 4508*0Sstevel@tonic-gate apic_reprogram_info[which_irq].bindcpu 4509*0Sstevel@tonic-gate = 0; 4510*0Sstevel@tonic-gate apic_reprogram_info[which_irq].timeouts 4511*0Sstevel@tonic-gate = 0; 4512*0Sstevel@tonic-gate 4513*0Sstevel@tonic-gate lock_clear(&apic_ioapic_reprogram_lock); 4514*0Sstevel@tonic-gate 4515*0Sstevel@tonic-gate /* Remote IRR has cleared! */ 4516*0Sstevel@tonic-gate kpreempt_enable(); 4517*0Sstevel@tonic-gate return (0); 4518*0Sstevel@tonic-gate } 4519*0Sstevel@tonic-gate drv_usecwait(APIC_USECS_PER_WAIT_INTERVAL); 4520*0Sstevel@tonic-gate } 4521*0Sstevel@tonic-gate } 4522*0Sstevel@tonic-gate kpreempt_enable(); 4523*0Sstevel@tonic-gate 4524*0Sstevel@tonic-gate /* 4525*0Sstevel@tonic-gate * If we waited and the Remote IRR bit is still not cleared, 4526*0Sstevel@tonic-gate * AND if we've invoked the timeout APIC_REPROGRAM_MAX_TIMEOUTS 4527*0Sstevel@tonic-gate * times for this interrupt, try the last-ditch workarounds: 4528*0Sstevel@tonic-gate */ 4529*0Sstevel@tonic-gate if (apic_reprogram_info[which_irq].timeouts >= 4530*0Sstevel@tonic-gate APIC_REPROGRAM_MAX_TIMEOUTS) { 4531*0Sstevel@tonic-gate 4532*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) 4533*0Sstevel@tonic-gate & AV_REMOTE_IRR) != 0) { 4534*0Sstevel@tonic-gate /* 4535*0Sstevel@tonic-gate * Trying to clear the bit through normal 4536*0Sstevel@tonic-gate * channels has failed. So as a last-ditch 4537*0Sstevel@tonic-gate * effort, try to set the trigger mode to 4538*0Sstevel@tonic-gate * edge, then to level. This has been 4539*0Sstevel@tonic-gate * observed to work on many systems. 4540*0Sstevel@tonic-gate */ 4541*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4542*0Sstevel@tonic-gate intin_no, 4543*0Sstevel@tonic-gate READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4544*0Sstevel@tonic-gate intin_no) & ~AV_LEVEL); 4545*0Sstevel@tonic-gate 4546*0Sstevel@tonic-gate WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4547*0Sstevel@tonic-gate intin_no, 4548*0Sstevel@tonic-gate READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4549*0Sstevel@tonic-gate intin_no) | AV_LEVEL); 4550*0Sstevel@tonic-gate 4551*0Sstevel@tonic-gate /* 4552*0Sstevel@tonic-gate * If the bit's STILL set, declare total and 4553*0Sstevel@tonic-gate * utter failure 4554*0Sstevel@tonic-gate */ 4555*0Sstevel@tonic-gate if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4556*0Sstevel@tonic-gate intin_no) & AV_REMOTE_IRR) != 0) { 4557*0Sstevel@tonic-gate cmn_err(CE_WARN, "!IOAPIC %d intin %d: " 4558*0Sstevel@tonic-gate "Remote IRR failed to reset " 4559*0Sstevel@tonic-gate "within %d usecs. Interrupts to " 4560*0Sstevel@tonic-gate "this pin may cease to function.", 4561*0Sstevel@tonic-gate irq_ptr->airq_ioapicindex, 4562*0Sstevel@tonic-gate irq_ptr->airq_intin_no, 4563*0Sstevel@tonic-gate apic_max_usecs_clear_pending); 4564*0Sstevel@tonic-gate } 4565*0Sstevel@tonic-gate } 4566*0Sstevel@tonic-gate /* Clear the reprogramming state: */ 4567*0Sstevel@tonic-gate lock_set(&apic_ioapic_reprogram_lock); 4568*0Sstevel@tonic-gate 4569*0Sstevel@tonic-gate apic_reprogram_info[which_irq].valid = 0; 4570*0Sstevel@tonic-gate apic_reprogram_info[which_irq].bindcpu = 0; 4571*0Sstevel@tonic-gate apic_reprogram_info[which_irq].timeouts = 0; 4572*0Sstevel@tonic-gate 4573*0Sstevel@tonic-gate lock_clear(&apic_ioapic_reprogram_lock); 4574*0Sstevel@tonic-gate } else { 4575*0Sstevel@tonic-gate #ifdef DEBUG 4576*0Sstevel@tonic-gate cmn_err(CE_WARN, "Deferring reprogramming of irq %d", 4577*0Sstevel@tonic-gate which_irq); 4578*0Sstevel@tonic-gate #endif /* DEBUG */ 4579*0Sstevel@tonic-gate /* 4580*0Sstevel@tonic-gate * If waiting for the Remote IRR bit (above) didn't 4581*0Sstevel@tonic-gate * allow it to clear, defer the reprogramming: 4582*0Sstevel@tonic-gate */ 4583*0Sstevel@tonic-gate lock_set(&apic_ioapic_reprogram_lock); 4584*0Sstevel@tonic-gate 4585*0Sstevel@tonic-gate apic_reprogram_info[which_irq].valid = 1; 4586*0Sstevel@tonic-gate apic_reprogram_info[which_irq].bindcpu = new_bind_cpu; 4587*0Sstevel@tonic-gate apic_reprogram_info[which_irq].timeouts++; 4588*0Sstevel@tonic-gate 4589*0Sstevel@tonic-gate lock_clear(&apic_ioapic_reprogram_lock); 4590*0Sstevel@tonic-gate 4591*0Sstevel@tonic-gate /* Fire up a timeout to handle this later */ 4592*0Sstevel@tonic-gate (void) timeout(apic_reprogram_timeout_handler, 4593*0Sstevel@tonic-gate (void *) 0, 4594*0Sstevel@tonic-gate drv_usectohz(APIC_REPROGRAM_TIMEOUT_DELAY)); 4595*0Sstevel@tonic-gate 4596*0Sstevel@tonic-gate /* Inform caller to defer IOAPIC programming: */ 4597*0Sstevel@tonic-gate return (1); 4598*0Sstevel@tonic-gate } 4599*0Sstevel@tonic-gate } 4600*0Sstevel@tonic-gate return (0); 4601*0Sstevel@tonic-gate } 4602*0Sstevel@tonic-gate 4603*0Sstevel@tonic-gate /* 4604*0Sstevel@tonic-gate * Timeout handler that performs the APIC reprogramming 4605*0Sstevel@tonic-gate */ 4606*0Sstevel@tonic-gate /*ARGSUSED*/ 4607*0Sstevel@tonic-gate static void 4608*0Sstevel@tonic-gate apic_reprogram_timeout_handler(void *arg) 4609*0Sstevel@tonic-gate { 4610*0Sstevel@tonic-gate /*LINTED: set but not used in function*/ 4611*0Sstevel@tonic-gate int i, result; 4612*0Sstevel@tonic-gate 4613*0Sstevel@tonic-gate /* Serialize access to this function */ 4614*0Sstevel@tonic-gate mutex_enter(&apic_reprogram_timeout_mutex); 4615*0Sstevel@tonic-gate 4616*0Sstevel@tonic-gate /* 4617*0Sstevel@tonic-gate * For each entry in the reprogramming state that's valid, 4618*0Sstevel@tonic-gate * try the reprogramming again: 4619*0Sstevel@tonic-gate */ 4620*0Sstevel@tonic-gate for (i = 0; i < APIC_MAX_VECTOR; i++) { 4621*0Sstevel@tonic-gate if (apic_reprogram_info[i].valid == 0) 4622*0Sstevel@tonic-gate continue; 4623*0Sstevel@tonic-gate /* 4624*0Sstevel@tonic-gate * Though we can't really do anything about errors 4625*0Sstevel@tonic-gate * at this point, keep track of them for reporting. 4626*0Sstevel@tonic-gate * Note that it is very possible for apic_setup_io_intr 4627*0Sstevel@tonic-gate * to re-register this very timeout if the Remote IRR bit 4628*0Sstevel@tonic-gate * has not yet cleared. 4629*0Sstevel@tonic-gate */ 4630*0Sstevel@tonic-gate result = apic_setup_io_intr_deferred(apic_irq_table[i], i); 4631*0Sstevel@tonic-gate 4632*0Sstevel@tonic-gate #ifdef DEBUG 4633*0Sstevel@tonic-gate if (result) 4634*0Sstevel@tonic-gate cmn_err(CE_WARN, "apic_reprogram_timeout: " 4635*0Sstevel@tonic-gate "apic_setup_io_intr returned nonzero for " 4636*0Sstevel@tonic-gate "irq=%d!", i); 4637*0Sstevel@tonic-gate #endif /* DEBUG */ 4638*0Sstevel@tonic-gate } 4639*0Sstevel@tonic-gate 4640*0Sstevel@tonic-gate mutex_exit(&apic_reprogram_timeout_mutex); 4641*0Sstevel@tonic-gate } 4642*0Sstevel@tonic-gate 4643*0Sstevel@tonic-gate 4644*0Sstevel@tonic-gate /* 4645*0Sstevel@tonic-gate * Called to migrate all interrupts at an irq to another cpu. safe 4646*0Sstevel@tonic-gate * if true means we are not being called from an interrupt 4647*0Sstevel@tonic-gate * context and hence it is safe to do a lock_set. If false 4648*0Sstevel@tonic-gate * do only a lock_try and return failure ( non 0 ) if we cannot get it 4649*0Sstevel@tonic-gate */ 4650*0Sstevel@tonic-gate static int 4651*0Sstevel@tonic-gate apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe) 4652*0Sstevel@tonic-gate { 4653*0Sstevel@tonic-gate apic_irq_t *irqptr = irq_ptr; 4654*0Sstevel@tonic-gate int retval = 0; 4655*0Sstevel@tonic-gate int iflag; 4656*0Sstevel@tonic-gate 4657*0Sstevel@tonic-gate iflag = intr_clear(); 4658*0Sstevel@tonic-gate if (!safe) { 4659*0Sstevel@tonic-gate if (lock_try(&apic_ioapic_lock) == 0) { 4660*0Sstevel@tonic-gate intr_restore(iflag); 4661*0Sstevel@tonic-gate return (1); 4662*0Sstevel@tonic-gate } 4663*0Sstevel@tonic-gate } else 4664*0Sstevel@tonic-gate lock_set(&apic_ioapic_lock); 4665*0Sstevel@tonic-gate 4666*0Sstevel@tonic-gate while (irqptr) { 4667*0Sstevel@tonic-gate if (irqptr->airq_temp_cpu != IRQ_UNINIT) 4668*0Sstevel@tonic-gate retval |= apic_rebind(irqptr, bind_cpu, 0, IMMEDIATE); 4669*0Sstevel@tonic-gate irqptr = irqptr->airq_next; 4670*0Sstevel@tonic-gate } 4671*0Sstevel@tonic-gate lock_clear(&apic_ioapic_lock); 4672*0Sstevel@tonic-gate intr_restore(iflag); 4673*0Sstevel@tonic-gate return (retval); 4674*0Sstevel@tonic-gate } 4675*0Sstevel@tonic-gate 4676*0Sstevel@tonic-gate /* 4677*0Sstevel@tonic-gate * apic_intr_redistribute does all the messy computations for identifying 4678*0Sstevel@tonic-gate * which interrupt to move to which CPU. Currently we do just one interrupt 4679*0Sstevel@tonic-gate * at a time. This reduces the time we spent doing all this within clock 4680*0Sstevel@tonic-gate * interrupt. When it is done in idle, we could do more than 1. 4681*0Sstevel@tonic-gate * First we find the most busy and the most free CPU (time in ISR only) 4682*0Sstevel@tonic-gate * skipping those CPUs that has been identified as being ineligible (cpu_skip) 4683*0Sstevel@tonic-gate * Then we look for IRQs which are closest to the difference between the 4684*0Sstevel@tonic-gate * most busy CPU and the average ISR load. We try to find one whose load 4685*0Sstevel@tonic-gate * is less than difference.If none exists, then we chose one larger than the 4686*0Sstevel@tonic-gate * difference, provided it does not make the most idle CPU worse than the 4687*0Sstevel@tonic-gate * most busy one. In the end, we clear all the busy fields for CPUs. For 4688*0Sstevel@tonic-gate * IRQs, they are cleared as they are scanned. 4689*0Sstevel@tonic-gate */ 4690*0Sstevel@tonic-gate static void 4691*0Sstevel@tonic-gate apic_intr_redistribute() 4692*0Sstevel@tonic-gate { 4693*0Sstevel@tonic-gate int busiest_cpu, most_free_cpu; 4694*0Sstevel@tonic-gate int cpu_free, cpu_busy, max_busy, min_busy; 4695*0Sstevel@tonic-gate int min_free, diff; 4696*0Sstevel@tonic-gate int average_busy, cpus_online; 4697*0Sstevel@tonic-gate int i, busy; 4698*0Sstevel@tonic-gate apic_cpus_info_t *cpu_infop; 4699*0Sstevel@tonic-gate apic_irq_t *min_busy_irq = NULL; 4700*0Sstevel@tonic-gate apic_irq_t *max_busy_irq = NULL; 4701*0Sstevel@tonic-gate 4702*0Sstevel@tonic-gate busiest_cpu = most_free_cpu = -1; 4703*0Sstevel@tonic-gate cpu_free = cpu_busy = max_busy = average_busy = 0; 4704*0Sstevel@tonic-gate min_free = apic_sample_factor_redistribution; 4705*0Sstevel@tonic-gate cpus_online = 0; 4706*0Sstevel@tonic-gate /* 4707*0Sstevel@tonic-gate * Below we will check for CPU_INTR_ENABLE, bound, temp_bound, temp_cpu 4708*0Sstevel@tonic-gate * without ioapic_lock. That is OK as we are just doing statistical 4709*0Sstevel@tonic-gate * sampling anyway and any inaccuracy now will get corrected next time 4710*0Sstevel@tonic-gate * The call to rebind which actually changes things will make sure 4711*0Sstevel@tonic-gate * we are consistent. 4712*0Sstevel@tonic-gate */ 4713*0Sstevel@tonic-gate for (i = 0; i < apic_nproc; i++) { 4714*0Sstevel@tonic-gate if (!(apic_redist_cpu_skip & (1 << i)) && 4715*0Sstevel@tonic-gate (apic_cpus[i].aci_status & APIC_CPU_INTR_ENABLE)) { 4716*0Sstevel@tonic-gate 4717*0Sstevel@tonic-gate cpu_infop = &apic_cpus[i]; 4718*0Sstevel@tonic-gate /* 4719*0Sstevel@tonic-gate * If no unbound interrupts or only 1 total on this 4720*0Sstevel@tonic-gate * CPU, skip 4721*0Sstevel@tonic-gate */ 4722*0Sstevel@tonic-gate if (!cpu_infop->aci_temp_bound || 4723*0Sstevel@tonic-gate (cpu_infop->aci_bound + cpu_infop->aci_temp_bound) 4724*0Sstevel@tonic-gate == 1) { 4725*0Sstevel@tonic-gate apic_redist_cpu_skip |= 1 << i; 4726*0Sstevel@tonic-gate continue; 4727*0Sstevel@tonic-gate } 4728*0Sstevel@tonic-gate 4729*0Sstevel@tonic-gate busy = cpu_infop->aci_busy; 4730*0Sstevel@tonic-gate average_busy += busy; 4731*0Sstevel@tonic-gate cpus_online++; 4732*0Sstevel@tonic-gate if (max_busy < busy) { 4733*0Sstevel@tonic-gate max_busy = busy; 4734*0Sstevel@tonic-gate busiest_cpu = i; 4735*0Sstevel@tonic-gate } 4736*0Sstevel@tonic-gate if (min_free > busy) { 4737*0Sstevel@tonic-gate min_free = busy; 4738*0Sstevel@tonic-gate most_free_cpu = i; 4739*0Sstevel@tonic-gate } 4740*0Sstevel@tonic-gate if (busy > apic_int_busy_mark) { 4741*0Sstevel@tonic-gate cpu_busy |= 1 << i; 4742*0Sstevel@tonic-gate } else { 4743*0Sstevel@tonic-gate if (busy < apic_int_free_mark) 4744*0Sstevel@tonic-gate cpu_free |= 1 << i; 4745*0Sstevel@tonic-gate } 4746*0Sstevel@tonic-gate } 4747*0Sstevel@tonic-gate } 4748*0Sstevel@tonic-gate if ((cpu_busy && cpu_free) || 4749*0Sstevel@tonic-gate (max_busy >= (min_free + apic_diff_for_redistribution))) { 4750*0Sstevel@tonic-gate 4751*0Sstevel@tonic-gate apic_num_imbalance++; 4752*0Sstevel@tonic-gate #ifdef DEBUG 4753*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4754*0Sstevel@tonic-gate prom_printf( 4755*0Sstevel@tonic-gate "redistribute busy=%x free=%x max=%x min=%x", 4756*0Sstevel@tonic-gate cpu_busy, cpu_free, max_busy, min_free); 4757*0Sstevel@tonic-gate } 4758*0Sstevel@tonic-gate #endif /* DEBUG */ 4759*0Sstevel@tonic-gate 4760*0Sstevel@tonic-gate 4761*0Sstevel@tonic-gate average_busy /= cpus_online; 4762*0Sstevel@tonic-gate 4763*0Sstevel@tonic-gate diff = max_busy - average_busy; 4764*0Sstevel@tonic-gate min_busy = max_busy; /* start with the max possible value */ 4765*0Sstevel@tonic-gate max_busy = 0; 4766*0Sstevel@tonic-gate min_busy_irq = max_busy_irq = NULL; 4767*0Sstevel@tonic-gate i = apic_min_device_irq; 4768*0Sstevel@tonic-gate for (; i < apic_max_device_irq; i++) { 4769*0Sstevel@tonic-gate apic_irq_t *irq_ptr; 4770*0Sstevel@tonic-gate /* Change to linked list per CPU ? */ 4771*0Sstevel@tonic-gate if ((irq_ptr = apic_irq_table[i]) == NULL) 4772*0Sstevel@tonic-gate continue; 4773*0Sstevel@tonic-gate /* Check for irq_busy & decide which one to move */ 4774*0Sstevel@tonic-gate /* Also zero them for next round */ 4775*0Sstevel@tonic-gate if ((irq_ptr->airq_temp_cpu == busiest_cpu) && 4776*0Sstevel@tonic-gate irq_ptr->airq_busy) { 4777*0Sstevel@tonic-gate if (irq_ptr->airq_busy < diff) { 4778*0Sstevel@tonic-gate /* 4779*0Sstevel@tonic-gate * Check for least busy CPU, 4780*0Sstevel@tonic-gate * best fit or what ? 4781*0Sstevel@tonic-gate */ 4782*0Sstevel@tonic-gate if (max_busy < irq_ptr->airq_busy) { 4783*0Sstevel@tonic-gate /* 4784*0Sstevel@tonic-gate * Most busy within the 4785*0Sstevel@tonic-gate * required differential 4786*0Sstevel@tonic-gate */ 4787*0Sstevel@tonic-gate max_busy = irq_ptr->airq_busy; 4788*0Sstevel@tonic-gate max_busy_irq = irq_ptr; 4789*0Sstevel@tonic-gate } 4790*0Sstevel@tonic-gate } else { 4791*0Sstevel@tonic-gate if (min_busy > irq_ptr->airq_busy) { 4792*0Sstevel@tonic-gate /* 4793*0Sstevel@tonic-gate * least busy, but more than 4794*0Sstevel@tonic-gate * the reqd diff 4795*0Sstevel@tonic-gate */ 4796*0Sstevel@tonic-gate if (min_busy < 4797*0Sstevel@tonic-gate (diff + average_busy - 4798*0Sstevel@tonic-gate min_free)) { 4799*0Sstevel@tonic-gate /* 4800*0Sstevel@tonic-gate * Making sure new cpu 4801*0Sstevel@tonic-gate * will not end up 4802*0Sstevel@tonic-gate * worse 4803*0Sstevel@tonic-gate */ 4804*0Sstevel@tonic-gate min_busy = 4805*0Sstevel@tonic-gate irq_ptr->airq_busy; 4806*0Sstevel@tonic-gate 4807*0Sstevel@tonic-gate min_busy_irq = irq_ptr; 4808*0Sstevel@tonic-gate } 4809*0Sstevel@tonic-gate } 4810*0Sstevel@tonic-gate } 4811*0Sstevel@tonic-gate } 4812*0Sstevel@tonic-gate irq_ptr->airq_busy = 0; 4813*0Sstevel@tonic-gate } 4814*0Sstevel@tonic-gate 4815*0Sstevel@tonic-gate if (max_busy_irq != NULL) { 4816*0Sstevel@tonic-gate #ifdef DEBUG 4817*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4818*0Sstevel@tonic-gate prom_printf("rebinding %x to %x", 4819*0Sstevel@tonic-gate max_busy_irq->airq_vector, most_free_cpu); 4820*0Sstevel@tonic-gate } 4821*0Sstevel@tonic-gate #endif /* DEBUG */ 4822*0Sstevel@tonic-gate if (apic_rebind_all(max_busy_irq, most_free_cpu, 0) 4823*0Sstevel@tonic-gate == 0) 4824*0Sstevel@tonic-gate /* Make change permenant */ 4825*0Sstevel@tonic-gate max_busy_irq->airq_cpu = (uchar_t)most_free_cpu; 4826*0Sstevel@tonic-gate } else if (min_busy_irq != NULL) { 4827*0Sstevel@tonic-gate #ifdef DEBUG 4828*0Sstevel@tonic-gate if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4829*0Sstevel@tonic-gate prom_printf("rebinding %x to %x", 4830*0Sstevel@tonic-gate min_busy_irq->airq_vector, most_free_cpu); 4831*0Sstevel@tonic-gate } 4832*0Sstevel@tonic-gate #endif /* DEBUG */ 4833*0Sstevel@tonic-gate 4834*0Sstevel@tonic-gate if (apic_rebind_all(min_busy_irq, most_free_cpu, 0) == 4835*0Sstevel@tonic-gate 0) 4836*0Sstevel@tonic-gate /* Make change permenant */ 4837*0Sstevel@tonic-gate min_busy_irq->airq_cpu = (uchar_t)most_free_cpu; 4838*0Sstevel@tonic-gate } else { 4839*0Sstevel@tonic-gate if (cpu_busy != (1 << busiest_cpu)) { 4840*0Sstevel@tonic-gate apic_redist_cpu_skip |= 1 << busiest_cpu; 4841*0Sstevel@tonic-gate /* 4842*0Sstevel@tonic-gate * We leave cpu_skip set so that next time we 4843*0Sstevel@tonic-gate * can choose another cpu 4844*0Sstevel@tonic-gate */ 4845*0Sstevel@tonic-gate } 4846*0Sstevel@tonic-gate } 4847*0Sstevel@tonic-gate apic_num_rebind++; 4848*0Sstevel@tonic-gate } else { 4849*0Sstevel@tonic-gate /* 4850*0Sstevel@tonic-gate * found nothing. Could be that we skipped over valid CPUs 4851*0Sstevel@tonic-gate * or we have balanced everything. If we had a variable 4852*0Sstevel@tonic-gate * ticks_for_redistribution, it could be increased here. 4853*0Sstevel@tonic-gate * apic_int_busy, int_free etc would also need to be 4854*0Sstevel@tonic-gate * changed. 4855*0Sstevel@tonic-gate */ 4856*0Sstevel@tonic-gate if (apic_redist_cpu_skip) 4857*0Sstevel@tonic-gate apic_redist_cpu_skip = 0; 4858*0Sstevel@tonic-gate } 4859*0Sstevel@tonic-gate for (i = 0; i < apic_nproc; i++) { 4860*0Sstevel@tonic-gate apic_cpus[i].aci_busy = 0; 4861*0Sstevel@tonic-gate } 4862*0Sstevel@tonic-gate } 4863*0Sstevel@tonic-gate 4864*0Sstevel@tonic-gate static void 4865*0Sstevel@tonic-gate apic_cleanup_busy() 4866*0Sstevel@tonic-gate { 4867*0Sstevel@tonic-gate int i; 4868*0Sstevel@tonic-gate apic_irq_t *irq_ptr; 4869*0Sstevel@tonic-gate 4870*0Sstevel@tonic-gate for (i = 0; i < apic_nproc; i++) { 4871*0Sstevel@tonic-gate apic_cpus[i].aci_busy = 0; 4872*0Sstevel@tonic-gate } 4873*0Sstevel@tonic-gate 4874*0Sstevel@tonic-gate for (i = apic_min_device_irq; i < apic_max_device_irq; i++) { 4875*0Sstevel@tonic-gate if ((irq_ptr = apic_irq_table[i]) != NULL) 4876*0Sstevel@tonic-gate irq_ptr->airq_busy = 0; 4877*0Sstevel@tonic-gate } 4878*0Sstevel@tonic-gate apic_skipped_redistribute = 0; 4879*0Sstevel@tonic-gate } 4880*0Sstevel@tonic-gate 4881*0Sstevel@tonic-gate 4882*0Sstevel@tonic-gate /* 4883*0Sstevel@tonic-gate * This function will reprogram the timer. 4884*0Sstevel@tonic-gate * 4885*0Sstevel@tonic-gate * When in oneshot mode the argument is the absolute time in future to 4886*0Sstevel@tonic-gate * generate the interrupt at. 4887*0Sstevel@tonic-gate * 4888*0Sstevel@tonic-gate * When in periodic mode, the argument is the interval at which the 4889*0Sstevel@tonic-gate * interrupts should be generated. There is no need to support the periodic 4890*0Sstevel@tonic-gate * mode timer change at this time. 4891*0Sstevel@tonic-gate */ 4892*0Sstevel@tonic-gate static void 4893*0Sstevel@tonic-gate apic_timer_reprogram(hrtime_t time) 4894*0Sstevel@tonic-gate { 4895*0Sstevel@tonic-gate hrtime_t now; 4896*0Sstevel@tonic-gate uint_t ticks; 4897*0Sstevel@tonic-gate 4898*0Sstevel@tonic-gate /* 4899*0Sstevel@tonic-gate * We should be called from high PIL context (CBE_HIGH_PIL), 4900*0Sstevel@tonic-gate * so kpreempt is disabled. 4901*0Sstevel@tonic-gate */ 4902*0Sstevel@tonic-gate 4903*0Sstevel@tonic-gate if (!apic_oneshot) { 4904*0Sstevel@tonic-gate /* time is the interval for periodic mode */ 4905*0Sstevel@tonic-gate ticks = (uint_t)((time) / apic_nsec_per_tick); 4906*0Sstevel@tonic-gate } else { 4907*0Sstevel@tonic-gate /* one shot mode */ 4908*0Sstevel@tonic-gate 4909*0Sstevel@tonic-gate now = gethrtime(); 4910*0Sstevel@tonic-gate 4911*0Sstevel@tonic-gate if (time <= now) { 4912*0Sstevel@tonic-gate /* 4913*0Sstevel@tonic-gate * requested to generate an interrupt in the past 4914*0Sstevel@tonic-gate * generate an interrupt as soon as possible 4915*0Sstevel@tonic-gate */ 4916*0Sstevel@tonic-gate ticks = apic_min_timer_ticks; 4917*0Sstevel@tonic-gate } else if ((time - now) > apic_nsec_max) { 4918*0Sstevel@tonic-gate /* 4919*0Sstevel@tonic-gate * requested to generate an interrupt at a time 4920*0Sstevel@tonic-gate * further than what we are capable of. Set to max 4921*0Sstevel@tonic-gate * the hardware can handle 4922*0Sstevel@tonic-gate */ 4923*0Sstevel@tonic-gate 4924*0Sstevel@tonic-gate ticks = APIC_MAXVAL; 4925*0Sstevel@tonic-gate #ifdef DEBUG 4926*0Sstevel@tonic-gate cmn_err(CE_CONT, "apic_timer_reprogram, request at" 4927*0Sstevel@tonic-gate " %lld too far in future, current time" 4928*0Sstevel@tonic-gate " %lld \n", time, now); 4929*0Sstevel@tonic-gate #endif /* DEBUG */ 4930*0Sstevel@tonic-gate } else 4931*0Sstevel@tonic-gate ticks = (uint_t)((time - now) / apic_nsec_per_tick); 4932*0Sstevel@tonic-gate } 4933*0Sstevel@tonic-gate 4934*0Sstevel@tonic-gate if (ticks < apic_min_timer_ticks) 4935*0Sstevel@tonic-gate ticks = apic_min_timer_ticks; 4936*0Sstevel@tonic-gate 4937*0Sstevel@tonic-gate apicadr[APIC_INIT_COUNT] = ticks; 4938*0Sstevel@tonic-gate 4939*0Sstevel@tonic-gate } 4940*0Sstevel@tonic-gate 4941*0Sstevel@tonic-gate /* 4942*0Sstevel@tonic-gate * This function will enable timer interrupts. 4943*0Sstevel@tonic-gate */ 4944*0Sstevel@tonic-gate static void 4945*0Sstevel@tonic-gate apic_timer_enable(void) 4946*0Sstevel@tonic-gate { 4947*0Sstevel@tonic-gate /* 4948*0Sstevel@tonic-gate * We should be Called from high PIL context (CBE_HIGH_PIL), 4949*0Sstevel@tonic-gate * so kpreempt is disabled. 4950*0Sstevel@tonic-gate */ 4951*0Sstevel@tonic-gate 4952*0Sstevel@tonic-gate if (!apic_oneshot) 4953*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = 4954*0Sstevel@tonic-gate (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 4955*0Sstevel@tonic-gate else { 4956*0Sstevel@tonic-gate /* one shot */ 4957*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = (apic_clkvect + APIC_BASE_VECT); 4958*0Sstevel@tonic-gate } 4959*0Sstevel@tonic-gate } 4960*0Sstevel@tonic-gate 4961*0Sstevel@tonic-gate /* 4962*0Sstevel@tonic-gate * This function will disable timer interrupts. 4963*0Sstevel@tonic-gate */ 4964*0Sstevel@tonic-gate static void 4965*0Sstevel@tonic-gate apic_timer_disable(void) 4966*0Sstevel@tonic-gate { 4967*0Sstevel@tonic-gate /* 4968*0Sstevel@tonic-gate * We should be Called from high PIL context (CBE_HIGH_PIL), 4969*0Sstevel@tonic-gate * so kpreempt is disabled. 4970*0Sstevel@tonic-gate */ 4971*0Sstevel@tonic-gate 4972*0Sstevel@tonic-gate apicadr[APIC_LOCAL_TIMER] = (apic_clkvect + APIC_BASE_VECT) | AV_MASK; 4973*0Sstevel@tonic-gate } 4974*0Sstevel@tonic-gate 4975*0Sstevel@tonic-gate 4976*0Sstevel@tonic-gate cyclic_id_t apic_cyclic_id; 4977*0Sstevel@tonic-gate 4978*0Sstevel@tonic-gate /* 4979*0Sstevel@tonic-gate * If this module needs to be a consumer of cyclic subsystem, they 4980*0Sstevel@tonic-gate * can be added here, since at this time kernel cyclic subsystem is initialized 4981*0Sstevel@tonic-gate * argument is not currently used, and is reserved for future. 4982*0Sstevel@tonic-gate */ 4983*0Sstevel@tonic-gate static void 4984*0Sstevel@tonic-gate apic_post_cyclic_setup(void *arg) 4985*0Sstevel@tonic-gate { 4986*0Sstevel@tonic-gate _NOTE(ARGUNUSED(arg)) 4987*0Sstevel@tonic-gate cyc_handler_t hdlr; 4988*0Sstevel@tonic-gate cyc_time_t when; 4989*0Sstevel@tonic-gate 4990*0Sstevel@tonic-gate /* cpu_lock is held */ 4991*0Sstevel@tonic-gate 4992*0Sstevel@tonic-gate /* set up cyclics for intr redistribution */ 4993*0Sstevel@tonic-gate 4994*0Sstevel@tonic-gate /* 4995*0Sstevel@tonic-gate * In peridoc mode intr redistribution processing is done in 4996*0Sstevel@tonic-gate * apic_intr_enter during clk intr processing 4997*0Sstevel@tonic-gate */ 4998*0Sstevel@tonic-gate if (!apic_oneshot) 4999*0Sstevel@tonic-gate return; 5000*0Sstevel@tonic-gate 5001*0Sstevel@tonic-gate hdlr.cyh_level = CY_LOW_LEVEL; 5002*0Sstevel@tonic-gate hdlr.cyh_func = (cyc_func_t)apic_redistribute_compute; 5003*0Sstevel@tonic-gate hdlr.cyh_arg = NULL; 5004*0Sstevel@tonic-gate 5005*0Sstevel@tonic-gate when.cyt_when = 0; 5006*0Sstevel@tonic-gate when.cyt_interval = apic_redistribute_sample_interval; 5007*0Sstevel@tonic-gate apic_cyclic_id = cyclic_add(&hdlr, &when); 5008*0Sstevel@tonic-gate 5009*0Sstevel@tonic-gate 5010*0Sstevel@tonic-gate } 5011*0Sstevel@tonic-gate 5012*0Sstevel@tonic-gate static void 5013*0Sstevel@tonic-gate apic_redistribute_compute(void) 5014*0Sstevel@tonic-gate { 5015*0Sstevel@tonic-gate int i, j, max_busy; 5016*0Sstevel@tonic-gate 5017*0Sstevel@tonic-gate if (apic_enable_dynamic_migration) { 5018*0Sstevel@tonic-gate if (++apic_nticks == apic_sample_factor_redistribution) { 5019*0Sstevel@tonic-gate /* 5020*0Sstevel@tonic-gate * Time to call apic_intr_redistribute(). 5021*0Sstevel@tonic-gate * reset apic_nticks. This will cause max_busy 5022*0Sstevel@tonic-gate * to be calculated below and if it is more than 5023*0Sstevel@tonic-gate * apic_int_busy, we will do the whole thing 5024*0Sstevel@tonic-gate */ 5025*0Sstevel@tonic-gate apic_nticks = 0; 5026*0Sstevel@tonic-gate } 5027*0Sstevel@tonic-gate max_busy = 0; 5028*0Sstevel@tonic-gate for (i = 0; i < apic_nproc; i++) { 5029*0Sstevel@tonic-gate 5030*0Sstevel@tonic-gate /* 5031*0Sstevel@tonic-gate * Check if curipl is non zero & if ISR is in 5032*0Sstevel@tonic-gate * progress 5033*0Sstevel@tonic-gate */ 5034*0Sstevel@tonic-gate if (((j = apic_cpus[i].aci_curipl) != 0) && 5035*0Sstevel@tonic-gate (apic_cpus[i].aci_ISR_in_progress & (1 << j))) { 5036*0Sstevel@tonic-gate 5037*0Sstevel@tonic-gate int irq; 5038*0Sstevel@tonic-gate apic_cpus[i].aci_busy++; 5039*0Sstevel@tonic-gate irq = apic_cpus[i].aci_current[j]; 5040*0Sstevel@tonic-gate apic_irq_table[irq]->airq_busy++; 5041*0Sstevel@tonic-gate } 5042*0Sstevel@tonic-gate 5043*0Sstevel@tonic-gate if (!apic_nticks && 5044*0Sstevel@tonic-gate (apic_cpus[i].aci_busy > max_busy)) 5045*0Sstevel@tonic-gate max_busy = apic_cpus[i].aci_busy; 5046*0Sstevel@tonic-gate } 5047*0Sstevel@tonic-gate if (!apic_nticks) { 5048*0Sstevel@tonic-gate if (max_busy > apic_int_busy_mark) { 5049*0Sstevel@tonic-gate /* 5050*0Sstevel@tonic-gate * We could make the following check be 5051*0Sstevel@tonic-gate * skipped > 1 in which case, we get a 5052*0Sstevel@tonic-gate * redistribution at half the busy mark (due to 5053*0Sstevel@tonic-gate * double interval). Need to be able to collect 5054*0Sstevel@tonic-gate * more empirical data to decide if that is a 5055*0Sstevel@tonic-gate * good strategy. Punt for now. 5056*0Sstevel@tonic-gate */ 5057*0Sstevel@tonic-gate if (apic_skipped_redistribute) 5058*0Sstevel@tonic-gate apic_cleanup_busy(); 5059*0Sstevel@tonic-gate else 5060*0Sstevel@tonic-gate apic_intr_redistribute(); 5061*0Sstevel@tonic-gate } else 5062*0Sstevel@tonic-gate apic_skipped_redistribute++; 5063*0Sstevel@tonic-gate } 5064*0Sstevel@tonic-gate } 5065*0Sstevel@tonic-gate } 5066*0Sstevel@tonic-gate 5067*0Sstevel@tonic-gate 5068*0Sstevel@tonic-gate static int 5069*0Sstevel@tonic-gate apic_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid, 5070*0Sstevel@tonic-gate int ipin, int *pci_irqp, iflag_t *intr_flagp) 5071*0Sstevel@tonic-gate { 5072*0Sstevel@tonic-gate 5073*0Sstevel@tonic-gate int status; 5074*0Sstevel@tonic-gate acpi_psm_lnk_t acpipsmlnk; 5075*0Sstevel@tonic-gate 5076*0Sstevel@tonic-gate if ((status = acpi_get_irq_cache_ent(busid, devid, ipin, pci_irqp, 5077*0Sstevel@tonic-gate intr_flagp)) == ACPI_PSM_SUCCESS) { 5078*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_CONT, "!pcplusmp: Found irqno %d " 5079*0Sstevel@tonic-gate "from cache for device %s, instance #%d\n", *pci_irqp, 5080*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip))); 5081*0Sstevel@tonic-gate return (status); 5082*0Sstevel@tonic-gate } 5083*0Sstevel@tonic-gate 5084*0Sstevel@tonic-gate bzero(&acpipsmlnk, sizeof (acpi_psm_lnk_t)); 5085*0Sstevel@tonic-gate 5086*0Sstevel@tonic-gate if ((status = acpi_translate_pci_irq(dip, ipin, pci_irqp, intr_flagp, 5087*0Sstevel@tonic-gate &acpipsmlnk)) == ACPI_PSM_FAILURE) { 5088*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "pcplusmp: " 5089*0Sstevel@tonic-gate " acpi_translate_pci_irq failed for device %s, instance" 5090*0Sstevel@tonic-gate " #%d", ddi_get_name(dip), ddi_get_instance(dip))); 5091*0Sstevel@tonic-gate return (status); 5092*0Sstevel@tonic-gate } 5093*0Sstevel@tonic-gate 5094*0Sstevel@tonic-gate if (status == ACPI_PSM_PARTIAL && acpipsmlnk.lnkobj != NULL) { 5095*0Sstevel@tonic-gate status = apic_acpi_irq_configure(&acpipsmlnk, dip, pci_irqp, 5096*0Sstevel@tonic-gate intr_flagp); 5097*0Sstevel@tonic-gate if (status != ACPI_PSM_SUCCESS) { 5098*0Sstevel@tonic-gate status = acpi_get_current_irq_resource(&acpipsmlnk, 5099*0Sstevel@tonic-gate pci_irqp, intr_flagp); 5100*0Sstevel@tonic-gate } 5101*0Sstevel@tonic-gate } 5102*0Sstevel@tonic-gate 5103*0Sstevel@tonic-gate if (status == ACPI_PSM_SUCCESS) { 5104*0Sstevel@tonic-gate acpi_new_irq_cache_ent(busid, devid, ipin, *pci_irqp, 5105*0Sstevel@tonic-gate intr_flagp, &acpipsmlnk); 5106*0Sstevel@tonic-gate 5107*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_CONT, "pcplusmp: [ACPI] " 5108*0Sstevel@tonic-gate "new irq %d for device %s, instance #%d\n", 5109*0Sstevel@tonic-gate *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip))); 5110*0Sstevel@tonic-gate } 5111*0Sstevel@tonic-gate 5112*0Sstevel@tonic-gate return (status); 5113*0Sstevel@tonic-gate } 5114*0Sstevel@tonic-gate 5115*0Sstevel@tonic-gate /* 5116*0Sstevel@tonic-gate * Configures the irq for the interrupt link device identified by 5117*0Sstevel@tonic-gate * acpipsmlnkp. 5118*0Sstevel@tonic-gate * 5119*0Sstevel@tonic-gate * Gets the current and the list of possible irq settings for the 5120*0Sstevel@tonic-gate * device. If apic_unconditional_srs is not set, and the current 5121*0Sstevel@tonic-gate * resource setting is in the list of possible irq settings, 5122*0Sstevel@tonic-gate * current irq resource setting is passed to the caller. 5123*0Sstevel@tonic-gate * 5124*0Sstevel@tonic-gate * Otherwise, picks an irq number from the list of possible irq 5125*0Sstevel@tonic-gate * settings, and sets the irq of the device to this value. 5126*0Sstevel@tonic-gate * If prefer_crs is set, among a set of irq numbers in the list that have 5127*0Sstevel@tonic-gate * the least number of devices sharing the interrupt, we pick current irq 5128*0Sstevel@tonic-gate * resource setting if it is a member of this set. 5129*0Sstevel@tonic-gate * 5130*0Sstevel@tonic-gate * Passes the irq number in the value pointed to by pci_irqp, and 5131*0Sstevel@tonic-gate * polarity and sensitivity in the structure pointed to by dipintrflagp 5132*0Sstevel@tonic-gate * to the caller. 5133*0Sstevel@tonic-gate * 5134*0Sstevel@tonic-gate * Note that if setting the irq resource failed, but successfuly obtained 5135*0Sstevel@tonic-gate * the current irq resource settings, passes the current irq resources 5136*0Sstevel@tonic-gate * and considers it a success. 5137*0Sstevel@tonic-gate * 5138*0Sstevel@tonic-gate * Returns: 5139*0Sstevel@tonic-gate * ACPI_PSM_SUCCESS on success. 5140*0Sstevel@tonic-gate * 5141*0Sstevel@tonic-gate * ACPI_PSM_FAILURE if an error occured during the configuration or 5142*0Sstevel@tonic-gate * if a suitable irq was not found for this device, or if setting the 5143*0Sstevel@tonic-gate * irq resource and obtaining the current resource fails. 5144*0Sstevel@tonic-gate * 5145*0Sstevel@tonic-gate */ 5146*0Sstevel@tonic-gate static int 5147*0Sstevel@tonic-gate apic_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip, 5148*0Sstevel@tonic-gate int *pci_irqp, iflag_t *dipintr_flagp) 5149*0Sstevel@tonic-gate { 5150*0Sstevel@tonic-gate 5151*0Sstevel@tonic-gate int i, min_share, foundnow, done = 0; 5152*0Sstevel@tonic-gate int32_t irq; 5153*0Sstevel@tonic-gate int32_t share_irq = -1; 5154*0Sstevel@tonic-gate int32_t chosen_irq = -1; 5155*0Sstevel@tonic-gate int cur_irq = -1; 5156*0Sstevel@tonic-gate acpi_irqlist_t *irqlistp; 5157*0Sstevel@tonic-gate acpi_irqlist_t *irqlistent; 5158*0Sstevel@tonic-gate 5159*0Sstevel@tonic-gate if ((acpi_get_possible_irq_resources(acpipsmlnkp, &irqlistp)) 5160*0Sstevel@tonic-gate == ACPI_PSM_FAILURE) { 5161*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Unable to determine " 5162*0Sstevel@tonic-gate "or assign IRQ for device %s, instance #%d: The system was " 5163*0Sstevel@tonic-gate "unable to get the list of potential IRQs from ACPI.", 5164*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip))); 5165*0Sstevel@tonic-gate 5166*0Sstevel@tonic-gate return (ACPI_PSM_FAILURE); 5167*0Sstevel@tonic-gate } 5168*0Sstevel@tonic-gate 5169*0Sstevel@tonic-gate if ((acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq, 5170*0Sstevel@tonic-gate dipintr_flagp) == ACPI_PSM_SUCCESS) && (!apic_unconditional_srs) && 5171*0Sstevel@tonic-gate (cur_irq > 0)) { 5172*0Sstevel@tonic-gate /* 5173*0Sstevel@tonic-gate * If an IRQ is set in CRS and that IRQ exists in the set 5174*0Sstevel@tonic-gate * returned from _PRS, return that IRQ, otherwise print 5175*0Sstevel@tonic-gate * a warning 5176*0Sstevel@tonic-gate */ 5177*0Sstevel@tonic-gate 5178*0Sstevel@tonic-gate if (acpi_irqlist_find_irq(irqlistp, cur_irq, NULL) 5179*0Sstevel@tonic-gate == ACPI_PSM_SUCCESS) { 5180*0Sstevel@tonic-gate 5181*0Sstevel@tonic-gate acpi_free_irqlist(irqlistp); 5182*0Sstevel@tonic-gate ASSERT(pci_irqp != NULL); 5183*0Sstevel@tonic-gate *pci_irqp = cur_irq; 5184*0Sstevel@tonic-gate return (ACPI_PSM_SUCCESS); 5185*0Sstevel@tonic-gate } 5186*0Sstevel@tonic-gate 5187*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Could not find the " 5188*0Sstevel@tonic-gate "current irq %d for device %s, instance #%d in ACPI's " 5189*0Sstevel@tonic-gate "list of possible irqs for this device. Picking one from " 5190*0Sstevel@tonic-gate " the latter list.", cur_irq, ddi_get_name(dip), 5191*0Sstevel@tonic-gate ddi_get_instance(dip))); 5192*0Sstevel@tonic-gate } 5193*0Sstevel@tonic-gate 5194*0Sstevel@tonic-gate irqlistent = irqlistp; 5195*0Sstevel@tonic-gate min_share = 255; 5196*0Sstevel@tonic-gate 5197*0Sstevel@tonic-gate while (irqlistent != NULL) { 5198*0Sstevel@tonic-gate irqlistent->intr_flags.bustype = BUS_PCI; 5199*0Sstevel@tonic-gate 5200*0Sstevel@tonic-gate for (foundnow = 0, i = 0; i < irqlistent->num_irqs; i++) { 5201*0Sstevel@tonic-gate 5202*0Sstevel@tonic-gate irq = irqlistent->irqs[i]; 5203*0Sstevel@tonic-gate 5204*0Sstevel@tonic-gate if ((irq < 16) && (apic_reserved_irqlist[irq])) 5205*0Sstevel@tonic-gate continue; 5206*0Sstevel@tonic-gate 5207*0Sstevel@tonic-gate if (irq == 0) { 5208*0Sstevel@tonic-gate /* invalid irq number */ 5209*0Sstevel@tonic-gate continue; 5210*0Sstevel@tonic-gate } 5211*0Sstevel@tonic-gate 5212*0Sstevel@tonic-gate if ((apic_irq_table[irq] == NULL) || 5213*0Sstevel@tonic-gate (apic_irq_table[irq]->airq_dip == dip)) { 5214*0Sstevel@tonic-gate chosen_irq = irq; 5215*0Sstevel@tonic-gate foundnow = 1; 5216*0Sstevel@tonic-gate /* 5217*0Sstevel@tonic-gate * If we do not prefer current irq from crs 5218*0Sstevel@tonic-gate * or if we do and this irq is the same as 5219*0Sstevel@tonic-gate * current irq from crs, this is the one 5220*0Sstevel@tonic-gate * to pick. 5221*0Sstevel@tonic-gate */ 5222*0Sstevel@tonic-gate if (!(apic_prefer_crs) || (irq == cur_irq)) { 5223*0Sstevel@tonic-gate done = 1; 5224*0Sstevel@tonic-gate break; 5225*0Sstevel@tonic-gate } 5226*0Sstevel@tonic-gate continue; 5227*0Sstevel@tonic-gate } 5228*0Sstevel@tonic-gate 5229*0Sstevel@tonic-gate if (irqlistent->intr_flags.intr_el == INTR_EL_EDGE) 5230*0Sstevel@tonic-gate continue; 5231*0Sstevel@tonic-gate 5232*0Sstevel@tonic-gate if (!acpi_intr_compatible(irqlistent->intr_flags, 5233*0Sstevel@tonic-gate apic_irq_table[irq]->airq_iflag)) 5234*0Sstevel@tonic-gate continue; 5235*0Sstevel@tonic-gate 5236*0Sstevel@tonic-gate if ((apic_irq_table[irq]->airq_share < min_share) || 5237*0Sstevel@tonic-gate ((apic_irq_table[irq]->airq_share == min_share) && 5238*0Sstevel@tonic-gate (cur_irq == irq) && (apic_prefer_crs))) { 5239*0Sstevel@tonic-gate min_share = apic_irq_table[irq]->airq_share; 5240*0Sstevel@tonic-gate share_irq = irq; 5241*0Sstevel@tonic-gate foundnow = 1; 5242*0Sstevel@tonic-gate } 5243*0Sstevel@tonic-gate } 5244*0Sstevel@tonic-gate 5245*0Sstevel@tonic-gate /* 5246*0Sstevel@tonic-gate * If we found an IRQ in the inner loop this time, save the 5247*0Sstevel@tonic-gate * details from the irqlist for later use. 5248*0Sstevel@tonic-gate */ 5249*0Sstevel@tonic-gate if (foundnow && ((chosen_irq != -1) || (share_irq != -1))) { 5250*0Sstevel@tonic-gate /* 5251*0Sstevel@tonic-gate * Copy the acpi_prs_private_t and flags from this 5252*0Sstevel@tonic-gate * irq list entry, since we found an irq from this 5253*0Sstevel@tonic-gate * entry. 5254*0Sstevel@tonic-gate */ 5255*0Sstevel@tonic-gate acpipsmlnkp->acpi_prs_prv = irqlistent->acpi_prs_prv; 5256*0Sstevel@tonic-gate *dipintr_flagp = irqlistent->intr_flags; 5257*0Sstevel@tonic-gate } 5258*0Sstevel@tonic-gate 5259*0Sstevel@tonic-gate if (done) 5260*0Sstevel@tonic-gate break; 5261*0Sstevel@tonic-gate 5262*0Sstevel@tonic-gate /* Go to the next irqlist entry */ 5263*0Sstevel@tonic-gate irqlistent = irqlistent->next; 5264*0Sstevel@tonic-gate } 5265*0Sstevel@tonic-gate 5266*0Sstevel@tonic-gate 5267*0Sstevel@tonic-gate acpi_free_irqlist(irqlistp); 5268*0Sstevel@tonic-gate if (chosen_irq != -1) 5269*0Sstevel@tonic-gate irq = chosen_irq; 5270*0Sstevel@tonic-gate else if (share_irq != -1) 5271*0Sstevel@tonic-gate irq = share_irq; 5272*0Sstevel@tonic-gate else { 5273*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Could not find a " 5274*0Sstevel@tonic-gate "suitable irq from the list of possible irqs for device " 5275*0Sstevel@tonic-gate "%s, instance #%d in ACPI's list of possible irqs", 5276*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_instance(dip))); 5277*0Sstevel@tonic-gate return (ACPI_PSM_FAILURE); 5278*0Sstevel@tonic-gate } 5279*0Sstevel@tonic-gate 5280*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_CONT, "!pcplusmp: Setting irq %d for device %s " 5281*0Sstevel@tonic-gate "instance #%d\n", irq, ddi_get_name(dip), ddi_get_instance(dip))); 5282*0Sstevel@tonic-gate 5283*0Sstevel@tonic-gate if ((acpi_set_irq_resource(acpipsmlnkp, irq)) == ACPI_PSM_SUCCESS) { 5284*0Sstevel@tonic-gate /* 5285*0Sstevel@tonic-gate * setting irq was successful, check to make sure CRS 5286*0Sstevel@tonic-gate * reflects that. If CRS does not agree with what we 5287*0Sstevel@tonic-gate * set, return the irq that was set. 5288*0Sstevel@tonic-gate */ 5289*0Sstevel@tonic-gate 5290*0Sstevel@tonic-gate if (acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq, 5291*0Sstevel@tonic-gate dipintr_flagp) == ACPI_PSM_SUCCESS) { 5292*0Sstevel@tonic-gate 5293*0Sstevel@tonic-gate if (cur_irq != irq) 5294*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: " 5295*0Sstevel@tonic-gate "IRQ resource set (irqno %d) for device %s " 5296*0Sstevel@tonic-gate "instance #%d, differs from current " 5297*0Sstevel@tonic-gate "setting irqno %d", 5298*0Sstevel@tonic-gate irq, ddi_get_name(dip), 5299*0Sstevel@tonic-gate ddi_get_instance(dip), cur_irq)); 5300*0Sstevel@tonic-gate } 5301*0Sstevel@tonic-gate 5302*0Sstevel@tonic-gate /* 5303*0Sstevel@tonic-gate * return the irq that was set, and not what CRS reports, 5304*0Sstevel@tonic-gate * since CRS has been seen to be bogus on some systems 5305*0Sstevel@tonic-gate */ 5306*0Sstevel@tonic-gate cur_irq = irq; 5307*0Sstevel@tonic-gate } else { 5308*0Sstevel@tonic-gate APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: set resource irq %d " 5309*0Sstevel@tonic-gate "failed for device %s instance #%d", 5310*0Sstevel@tonic-gate irq, ddi_get_name(dip), ddi_get_instance(dip))); 5311*0Sstevel@tonic-gate 5312*0Sstevel@tonic-gate if (cur_irq == -1) 5313*0Sstevel@tonic-gate return (ACPI_PSM_FAILURE); 5314*0Sstevel@tonic-gate } 5315*0Sstevel@tonic-gate 5316*0Sstevel@tonic-gate ASSERT(pci_irqp != NULL); 5317*0Sstevel@tonic-gate *pci_irqp = cur_irq; 5318*0Sstevel@tonic-gate return (ACPI_PSM_SUCCESS); 5319*0Sstevel@tonic-gate } 5320