112683SJimmy.Vetayases@oracle.com /*
212683SJimmy.Vetayases@oracle.com * CDDL HEADER START
312683SJimmy.Vetayases@oracle.com *
412683SJimmy.Vetayases@oracle.com * The contents of this file are subject to the terms of the
512683SJimmy.Vetayases@oracle.com * Common Development and Distribution License (the "License").
612683SJimmy.Vetayases@oracle.com * You may not use this file except in compliance with the License.
712683SJimmy.Vetayases@oracle.com *
812683SJimmy.Vetayases@oracle.com * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
912683SJimmy.Vetayases@oracle.com * or http://www.opensolaris.org/os/licensing.
1012683SJimmy.Vetayases@oracle.com * See the License for the specific language governing permissions
1112683SJimmy.Vetayases@oracle.com * and limitations under the License.
1212683SJimmy.Vetayases@oracle.com *
1312683SJimmy.Vetayases@oracle.com * When distributing Covered Code, include this CDDL HEADER in each
1412683SJimmy.Vetayases@oracle.com * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1512683SJimmy.Vetayases@oracle.com * If applicable, add the following below this CDDL HEADER, with the
1612683SJimmy.Vetayases@oracle.com * fields enclosed by brackets "[]" replaced with your own identifying
1712683SJimmy.Vetayases@oracle.com * information: Portions Copyright [yyyy] [name of copyright owner]
1812683SJimmy.Vetayases@oracle.com *
1912683SJimmy.Vetayases@oracle.com * CDDL HEADER END
2012683SJimmy.Vetayases@oracle.com */
2112683SJimmy.Vetayases@oracle.com
2212683SJimmy.Vetayases@oracle.com /*
2312683SJimmy.Vetayases@oracle.com * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
2412683SJimmy.Vetayases@oracle.com */
2512683SJimmy.Vetayases@oracle.com /*
2612683SJimmy.Vetayases@oracle.com * Copyright (c) 2010, Intel Corporation.
2712683SJimmy.Vetayases@oracle.com * All rights reserved.
2812683SJimmy.Vetayases@oracle.com */
2912683SJimmy.Vetayases@oracle.com
3012683SJimmy.Vetayases@oracle.com /*
3112683SJimmy.Vetayases@oracle.com * PSMI 1.1 extensions are supported only in 2.6 and later versions.
3212683SJimmy.Vetayases@oracle.com * PSMI 1.2 extensions are supported only in 2.7 and later versions.
3312683SJimmy.Vetayases@oracle.com * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
3412683SJimmy.Vetayases@oracle.com * PSMI 1.5 extensions are supported in Solaris Nevada.
3512683SJimmy.Vetayases@oracle.com * PSMI 1.6 extensions are supported in Solaris Nevada.
3612683SJimmy.Vetayases@oracle.com * PSMI 1.7 extensions are supported in Solaris Nevada.
3712683SJimmy.Vetayases@oracle.com */
3812683SJimmy.Vetayases@oracle.com #define PSMI_1_7
3912683SJimmy.Vetayases@oracle.com
4012683SJimmy.Vetayases@oracle.com #include <sys/processor.h>
4112683SJimmy.Vetayases@oracle.com #include <sys/time.h>
4212683SJimmy.Vetayases@oracle.com #include <sys/psm.h>
4312683SJimmy.Vetayases@oracle.com #include <sys/smp_impldefs.h>
4412683SJimmy.Vetayases@oracle.com #include <sys/cram.h>
4512683SJimmy.Vetayases@oracle.com #include <sys/acpi/acpi.h>
4612683SJimmy.Vetayases@oracle.com #include <sys/acpica.h>
4712683SJimmy.Vetayases@oracle.com #include <sys/psm_common.h>
4812683SJimmy.Vetayases@oracle.com #include <sys/pit.h>
4912683SJimmy.Vetayases@oracle.com #include <sys/ddi.h>
5012683SJimmy.Vetayases@oracle.com #include <sys/sunddi.h>
5112683SJimmy.Vetayases@oracle.com #include <sys/ddi_impldefs.h>
5212683SJimmy.Vetayases@oracle.com #include <sys/pci.h>
5312683SJimmy.Vetayases@oracle.com #include <sys/promif.h>
5412683SJimmy.Vetayases@oracle.com #include <sys/x86_archext.h>
5512683SJimmy.Vetayases@oracle.com #include <sys/cpc_impl.h>
5612683SJimmy.Vetayases@oracle.com #include <sys/uadmin.h>
5712683SJimmy.Vetayases@oracle.com #include <sys/panic.h>
5812683SJimmy.Vetayases@oracle.com #include <sys/debug.h>
5912683SJimmy.Vetayases@oracle.com #include <sys/archsystm.h>
6012683SJimmy.Vetayases@oracle.com #include <sys/trap.h>
6112683SJimmy.Vetayases@oracle.com #include <sys/machsystm.h>
6212683SJimmy.Vetayases@oracle.com #include <sys/sysmacros.h>
6312683SJimmy.Vetayases@oracle.com #include <sys/cpuvar.h>
6412683SJimmy.Vetayases@oracle.com #include <sys/rm_platter.h>
6512683SJimmy.Vetayases@oracle.com #include <sys/privregs.h>
6612683SJimmy.Vetayases@oracle.com #include <sys/note.h>
6712683SJimmy.Vetayases@oracle.com #include <sys/pci_intr_lib.h>
6812683SJimmy.Vetayases@oracle.com #include <sys/spl.h>
6912683SJimmy.Vetayases@oracle.com #include <sys/clock.h>
7012683SJimmy.Vetayases@oracle.com #include <sys/dditypes.h>
7112683SJimmy.Vetayases@oracle.com #include <sys/sunddi.h>
7212683SJimmy.Vetayases@oracle.com #include <sys/x_call.h>
7312683SJimmy.Vetayases@oracle.com #include <sys/reboot.h>
7412683SJimmy.Vetayases@oracle.com #include <sys/mach_intr.h>
7512683SJimmy.Vetayases@oracle.com #include <sys/apix.h>
7612683SJimmy.Vetayases@oracle.com #include <sys/apix_irm_impl.h>
7712683SJimmy.Vetayases@oracle.com
7812683SJimmy.Vetayases@oracle.com static int apix_probe();
7912683SJimmy.Vetayases@oracle.com static void apix_init();
8012683SJimmy.Vetayases@oracle.com static void apix_picinit(void);
8112683SJimmy.Vetayases@oracle.com static int apix_intr_enter(int, int *);
8212683SJimmy.Vetayases@oracle.com static void apix_intr_exit(int, int);
8312683SJimmy.Vetayases@oracle.com static void apix_setspl(int);
8412683SJimmy.Vetayases@oracle.com static int apix_disable_intr(processorid_t);
8512683SJimmy.Vetayases@oracle.com static void apix_enable_intr(processorid_t);
8612683SJimmy.Vetayases@oracle.com static int apix_get_clkvect(int);
8712683SJimmy.Vetayases@oracle.com static int apix_get_ipivect(int, int);
8812683SJimmy.Vetayases@oracle.com static void apix_post_cyclic_setup(void *);
8912683SJimmy.Vetayases@oracle.com static int apix_post_cpu_start();
9012683SJimmy.Vetayases@oracle.com static int apix_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *,
9112683SJimmy.Vetayases@oracle.com psm_intr_op_t, int *);
9212683SJimmy.Vetayases@oracle.com
9312683SJimmy.Vetayases@oracle.com /*
9412683SJimmy.Vetayases@oracle.com * Helper functions for apix_intr_ops()
9512683SJimmy.Vetayases@oracle.com */
9612683SJimmy.Vetayases@oracle.com static void apix_redistribute_compute(void);
9712683SJimmy.Vetayases@oracle.com static int apix_get_pending(apix_vector_t *);
9812683SJimmy.Vetayases@oracle.com static apix_vector_t *apix_get_req_vector(ddi_intr_handle_impl_t *, ushort_t);
9912683SJimmy.Vetayases@oracle.com static int apix_get_intr_info(ddi_intr_handle_impl_t *, apic_get_intr_t *);
10012683SJimmy.Vetayases@oracle.com static char *apix_get_apic_type(void);
10112683SJimmy.Vetayases@oracle.com static int apix_intx_get_pending(int);
10212683SJimmy.Vetayases@oracle.com static void apix_intx_set_mask(int irqno);
10312683SJimmy.Vetayases@oracle.com static void apix_intx_clear_mask(int irqno);
10412683SJimmy.Vetayases@oracle.com static int apix_intx_get_shared(int irqno);
10512683SJimmy.Vetayases@oracle.com static void apix_intx_set_shared(int irqno, int delta);
10612683SJimmy.Vetayases@oracle.com static apix_vector_t *apix_intx_xlate_vector(dev_info_t *, int,
10712683SJimmy.Vetayases@oracle.com struct intrspec *);
10812683SJimmy.Vetayases@oracle.com static int apix_intx_alloc_vector(dev_info_t *, int, struct intrspec *);
10912683SJimmy.Vetayases@oracle.com
11012683SJimmy.Vetayases@oracle.com extern int apic_clkinit(int);
11112683SJimmy.Vetayases@oracle.com
11212683SJimmy.Vetayases@oracle.com /* IRM initialization for APIX PSM module */
11312683SJimmy.Vetayases@oracle.com extern void apix_irm_init(void);
11412683SJimmy.Vetayases@oracle.com
11512683SJimmy.Vetayases@oracle.com extern int irm_enable;
11612683SJimmy.Vetayases@oracle.com
11712683SJimmy.Vetayases@oracle.com /*
11812683SJimmy.Vetayases@oracle.com * Local static data
11912683SJimmy.Vetayases@oracle.com */
12012683SJimmy.Vetayases@oracle.com static struct psm_ops apix_ops = {
12112683SJimmy.Vetayases@oracle.com apix_probe,
12212683SJimmy.Vetayases@oracle.com
12312683SJimmy.Vetayases@oracle.com apix_init,
12412683SJimmy.Vetayases@oracle.com apix_picinit,
12512683SJimmy.Vetayases@oracle.com apix_intr_enter,
12612683SJimmy.Vetayases@oracle.com apix_intr_exit,
12712683SJimmy.Vetayases@oracle.com apix_setspl,
12812683SJimmy.Vetayases@oracle.com apix_addspl,
12912683SJimmy.Vetayases@oracle.com apix_delspl,
13012683SJimmy.Vetayases@oracle.com apix_disable_intr,
13112683SJimmy.Vetayases@oracle.com apix_enable_intr,
13212683SJimmy.Vetayases@oracle.com NULL, /* psm_softlvl_to_irq */
13312683SJimmy.Vetayases@oracle.com NULL, /* psm_set_softintr */
13412683SJimmy.Vetayases@oracle.com
13512683SJimmy.Vetayases@oracle.com apic_set_idlecpu,
13612683SJimmy.Vetayases@oracle.com apic_unset_idlecpu,
13712683SJimmy.Vetayases@oracle.com
13812683SJimmy.Vetayases@oracle.com apic_clkinit,
13912683SJimmy.Vetayases@oracle.com apix_get_clkvect,
14012683SJimmy.Vetayases@oracle.com NULL, /* psm_hrtimeinit */
14112683SJimmy.Vetayases@oracle.com apic_gethrtime,
14212683SJimmy.Vetayases@oracle.com
14312683SJimmy.Vetayases@oracle.com apic_get_next_processorid,
14412683SJimmy.Vetayases@oracle.com apic_cpu_start,
14512683SJimmy.Vetayases@oracle.com apix_post_cpu_start,
14612683SJimmy.Vetayases@oracle.com apic_shutdown,
14712683SJimmy.Vetayases@oracle.com apix_get_ipivect,
14812683SJimmy.Vetayases@oracle.com apic_send_ipi,
14912683SJimmy.Vetayases@oracle.com
15012683SJimmy.Vetayases@oracle.com NULL, /* psm_translate_irq */
15112683SJimmy.Vetayases@oracle.com NULL, /* psm_notify_error */
15212683SJimmy.Vetayases@oracle.com NULL, /* psm_notify_func */
15312683SJimmy.Vetayases@oracle.com apic_timer_reprogram,
15412683SJimmy.Vetayases@oracle.com apic_timer_enable,
15512683SJimmy.Vetayases@oracle.com apic_timer_disable,
15612683SJimmy.Vetayases@oracle.com apix_post_cyclic_setup,
15712683SJimmy.Vetayases@oracle.com apic_preshutdown,
15812683SJimmy.Vetayases@oracle.com apix_intr_ops, /* Advanced DDI Interrupt framework */
15912683SJimmy.Vetayases@oracle.com apic_state, /* save, restore apic state for S3 */
16012683SJimmy.Vetayases@oracle.com apic_cpu_ops, /* CPU control interface. */
16112683SJimmy.Vetayases@oracle.com };
16212683SJimmy.Vetayases@oracle.com
16312683SJimmy.Vetayases@oracle.com struct psm_ops *psmops = &apix_ops;
16412683SJimmy.Vetayases@oracle.com
16512683SJimmy.Vetayases@oracle.com static struct psm_info apix_psm_info = {
16612683SJimmy.Vetayases@oracle.com PSM_INFO_VER01_7, /* version */
16712683SJimmy.Vetayases@oracle.com PSM_OWN_EXCLUSIVE, /* ownership */
16812683SJimmy.Vetayases@oracle.com &apix_ops, /* operation */
16912683SJimmy.Vetayases@oracle.com APIX_NAME, /* machine name */
17012683SJimmy.Vetayases@oracle.com "apix MPv1.4 compatible",
17112683SJimmy.Vetayases@oracle.com };
17212683SJimmy.Vetayases@oracle.com
17312683SJimmy.Vetayases@oracle.com static void *apix_hdlp;
17412683SJimmy.Vetayases@oracle.com
17512683SJimmy.Vetayases@oracle.com static int apix_is_enabled = 0;
17612683SJimmy.Vetayases@oracle.com
17712683SJimmy.Vetayases@oracle.com /*
17812683SJimmy.Vetayases@oracle.com * Flag to indicate if APIX is to be enabled only for platforms
17912683SJimmy.Vetayases@oracle.com * with specific hw feature(s).
18012683SJimmy.Vetayases@oracle.com */
18112683SJimmy.Vetayases@oracle.com int apix_hw_chk_enable = 1;
18212683SJimmy.Vetayases@oracle.com
18312683SJimmy.Vetayases@oracle.com /*
18412683SJimmy.Vetayases@oracle.com * Hw features that are checked for enabling APIX support.
18512683SJimmy.Vetayases@oracle.com */
18612683SJimmy.Vetayases@oracle.com #define APIX_SUPPORT_X2APIC 0x00000001
18712683SJimmy.Vetayases@oracle.com uint_t apix_supported_hw = APIX_SUPPORT_X2APIC;
18812683SJimmy.Vetayases@oracle.com
18912683SJimmy.Vetayases@oracle.com /*
19012683SJimmy.Vetayases@oracle.com * apix_lock is used for cpu selection and vector re-binding
19112683SJimmy.Vetayases@oracle.com */
19212683SJimmy.Vetayases@oracle.com lock_t apix_lock;
19312683SJimmy.Vetayases@oracle.com apix_impl_t *apixs[NCPU];
19412683SJimmy.Vetayases@oracle.com /*
19512683SJimmy.Vetayases@oracle.com * Mapping between device interrupt and the allocated vector. Indexed
19612683SJimmy.Vetayases@oracle.com * by major number.
19712683SJimmy.Vetayases@oracle.com */
19812683SJimmy.Vetayases@oracle.com apix_dev_vector_t **apix_dev_vector;
19912683SJimmy.Vetayases@oracle.com /*
20012683SJimmy.Vetayases@oracle.com * Mapping between device major number and cpu id. It gets used
20112683SJimmy.Vetayases@oracle.com * when interrupt binding policy round robin with affinity is
20212683SJimmy.Vetayases@oracle.com * applied. With that policy, devices with the same major number
20312683SJimmy.Vetayases@oracle.com * will be bound to the same CPU.
20412683SJimmy.Vetayases@oracle.com */
20512683SJimmy.Vetayases@oracle.com processorid_t *apix_major_to_cpu; /* major to cpu mapping */
20612683SJimmy.Vetayases@oracle.com kmutex_t apix_mutex; /* for apix_dev_vector & apix_major_to_cpu */
20712683SJimmy.Vetayases@oracle.com
20812683SJimmy.Vetayases@oracle.com int apix_nipis = 16; /* Maximum number of IPIs */
20912683SJimmy.Vetayases@oracle.com /*
21012683SJimmy.Vetayases@oracle.com * Maximum number of vectors in a CPU that can be used for interrupt
21112683SJimmy.Vetayases@oracle.com * allocation (including IPIs and the reserved vectors).
21212683SJimmy.Vetayases@oracle.com */
21312683SJimmy.Vetayases@oracle.com int apix_cpu_nvectors = APIX_NVECTOR;
21412683SJimmy.Vetayases@oracle.com
21512683SJimmy.Vetayases@oracle.com /* gcpu.h */
21612683SJimmy.Vetayases@oracle.com
21712683SJimmy.Vetayases@oracle.com extern void apic_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp);
21812683SJimmy.Vetayases@oracle.com extern void apic_change_eoi();
21912683SJimmy.Vetayases@oracle.com
22012683SJimmy.Vetayases@oracle.com /*
22112683SJimmy.Vetayases@oracle.com * This is the loadable module wrapper
22212683SJimmy.Vetayases@oracle.com */
22312683SJimmy.Vetayases@oracle.com
22412683SJimmy.Vetayases@oracle.com int
_init(void)22512683SJimmy.Vetayases@oracle.com _init(void)
22612683SJimmy.Vetayases@oracle.com {
22712683SJimmy.Vetayases@oracle.com if (apic_coarse_hrtime)
22812683SJimmy.Vetayases@oracle.com apix_ops.psm_gethrtime = &apic_gettime;
22912683SJimmy.Vetayases@oracle.com return (psm_mod_init(&apix_hdlp, &apix_psm_info));
23012683SJimmy.Vetayases@oracle.com }
23112683SJimmy.Vetayases@oracle.com
23212683SJimmy.Vetayases@oracle.com int
_fini(void)23312683SJimmy.Vetayases@oracle.com _fini(void)
23412683SJimmy.Vetayases@oracle.com {
23512683SJimmy.Vetayases@oracle.com return (psm_mod_fini(&apix_hdlp, &apix_psm_info));
23612683SJimmy.Vetayases@oracle.com }
23712683SJimmy.Vetayases@oracle.com
23812683SJimmy.Vetayases@oracle.com int
_info(struct modinfo * modinfop)23912683SJimmy.Vetayases@oracle.com _info(struct modinfo *modinfop)
24012683SJimmy.Vetayases@oracle.com {
24112683SJimmy.Vetayases@oracle.com return (psm_mod_info(&apix_hdlp, &apix_psm_info, modinfop));
24212683SJimmy.Vetayases@oracle.com }
24312683SJimmy.Vetayases@oracle.com
24412683SJimmy.Vetayases@oracle.com static int
apix_probe()24512683SJimmy.Vetayases@oracle.com apix_probe()
24612683SJimmy.Vetayases@oracle.com {
24712683SJimmy.Vetayases@oracle.com int rval;
24812683SJimmy.Vetayases@oracle.com
24912683SJimmy.Vetayases@oracle.com if (apix_enable == 0)
25012683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
25112683SJimmy.Vetayases@oracle.com
25212683SJimmy.Vetayases@oracle.com /* check for hw features if specified */
25312683SJimmy.Vetayases@oracle.com if (apix_hw_chk_enable) {
25412683SJimmy.Vetayases@oracle.com /* check if x2APIC mode is supported */
25512683SJimmy.Vetayases@oracle.com if ((apix_supported_hw & APIX_SUPPORT_X2APIC) ==
25612683SJimmy.Vetayases@oracle.com APIX_SUPPORT_X2APIC) {
25712683SJimmy.Vetayases@oracle.com if (!((apic_local_mode() == LOCAL_X2APIC) ||
25812683SJimmy.Vetayases@oracle.com apic_detect_x2apic())) {
25912683SJimmy.Vetayases@oracle.com /* x2APIC mode is not supported in the hw */
26012683SJimmy.Vetayases@oracle.com apix_enable = 0;
26112683SJimmy.Vetayases@oracle.com }
26212683SJimmy.Vetayases@oracle.com }
26312683SJimmy.Vetayases@oracle.com if (apix_enable == 0)
26412683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
26512683SJimmy.Vetayases@oracle.com }
26612683SJimmy.Vetayases@oracle.com
26712683SJimmy.Vetayases@oracle.com rval = apic_probe_common(apix_psm_info.p_mach_idstring);
26812683SJimmy.Vetayases@oracle.com if (rval == PSM_SUCCESS)
26912683SJimmy.Vetayases@oracle.com apix_is_enabled = 1;
27012683SJimmy.Vetayases@oracle.com else
27112683SJimmy.Vetayases@oracle.com apix_is_enabled = 0;
27212683SJimmy.Vetayases@oracle.com return (rval);
27312683SJimmy.Vetayases@oracle.com }
27412683SJimmy.Vetayases@oracle.com
27512683SJimmy.Vetayases@oracle.com /*
27612683SJimmy.Vetayases@oracle.com * Initialize the data structures needed by pcplusmpx module.
27712683SJimmy.Vetayases@oracle.com * Specifically, the data structures used by addspl() and delspl()
27812683SJimmy.Vetayases@oracle.com * routines.
27912683SJimmy.Vetayases@oracle.com */
28012683SJimmy.Vetayases@oracle.com static void
apix_softinit()28112683SJimmy.Vetayases@oracle.com apix_softinit()
28212683SJimmy.Vetayases@oracle.com {
28312683SJimmy.Vetayases@oracle.com int i, *iptr;
28412683SJimmy.Vetayases@oracle.com apix_impl_t *hdlp;
28512683SJimmy.Vetayases@oracle.com int nproc;
28612683SJimmy.Vetayases@oracle.com
28712683SJimmy.Vetayases@oracle.com nproc = max(apic_nproc, apic_max_nproc);
28812683SJimmy.Vetayases@oracle.com
28912683SJimmy.Vetayases@oracle.com hdlp = kmem_zalloc(nproc * sizeof (apix_impl_t), KM_SLEEP);
29012683SJimmy.Vetayases@oracle.com for (i = 0; i < nproc; i++) {
29112683SJimmy.Vetayases@oracle.com apixs[i] = &hdlp[i];
29212683SJimmy.Vetayases@oracle.com apixs[i]->x_cpuid = i;
29312683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apixs[i]->x_lock);
29412683SJimmy.Vetayases@oracle.com }
29512683SJimmy.Vetayases@oracle.com
29612683SJimmy.Vetayases@oracle.com /* cpu 0 is always up (for now) */
29712683SJimmy.Vetayases@oracle.com apic_cpus[0].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE;
29812683SJimmy.Vetayases@oracle.com
29912683SJimmy.Vetayases@oracle.com iptr = (int *)&apic_irq_table[0];
30012683SJimmy.Vetayases@oracle.com for (i = 0; i <= APIC_MAX_VECTOR; i++) {
30112683SJimmy.Vetayases@oracle.com apic_level_intr[i] = 0;
30212683SJimmy.Vetayases@oracle.com *iptr++ = NULL;
30312683SJimmy.Vetayases@oracle.com }
30412683SJimmy.Vetayases@oracle.com mutex_init(&airq_mutex, NULL, MUTEX_DEFAULT, NULL);
30512683SJimmy.Vetayases@oracle.com
30612683SJimmy.Vetayases@oracle.com apix_dev_vector = kmem_zalloc(sizeof (apix_dev_vector_t *) * devcnt,
30712683SJimmy.Vetayases@oracle.com KM_SLEEP);
30812683SJimmy.Vetayases@oracle.com
30912683SJimmy.Vetayases@oracle.com if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) {
31012683SJimmy.Vetayases@oracle.com apix_major_to_cpu = kmem_zalloc(sizeof (int) * devcnt,
31112683SJimmy.Vetayases@oracle.com KM_SLEEP);
31212683SJimmy.Vetayases@oracle.com for (i = 0; i < devcnt; i++)
31312683SJimmy.Vetayases@oracle.com apix_major_to_cpu[i] = IRQ_UNINIT;
31412683SJimmy.Vetayases@oracle.com }
31512683SJimmy.Vetayases@oracle.com
31612683SJimmy.Vetayases@oracle.com mutex_init(&apix_mutex, NULL, MUTEX_DEFAULT, NULL);
31712683SJimmy.Vetayases@oracle.com }
31812683SJimmy.Vetayases@oracle.com
31912683SJimmy.Vetayases@oracle.com static int
apix_get_pending_spl(void)32012683SJimmy.Vetayases@oracle.com apix_get_pending_spl(void)
32112683SJimmy.Vetayases@oracle.com {
32212683SJimmy.Vetayases@oracle.com int cpuid = CPU->cpu_id;
32312683SJimmy.Vetayases@oracle.com
32412683SJimmy.Vetayases@oracle.com return (bsrw_insn(apixs[cpuid]->x_intr_pending));
32512683SJimmy.Vetayases@oracle.com }
32612683SJimmy.Vetayases@oracle.com
32712683SJimmy.Vetayases@oracle.com static uintptr_t
apix_get_intr_handler(int cpu,short vec)32812683SJimmy.Vetayases@oracle.com apix_get_intr_handler(int cpu, short vec)
32912683SJimmy.Vetayases@oracle.com {
33012683SJimmy.Vetayases@oracle.com apix_vector_t *apix_vector;
33112683SJimmy.Vetayases@oracle.com
33212683SJimmy.Vetayases@oracle.com ASSERT(cpu < apic_nproc && vec < APIX_NVECTOR);
33312683SJimmy.Vetayases@oracle.com if (cpu >= apic_nproc)
33412683SJimmy.Vetayases@oracle.com return (NULL);
33512683SJimmy.Vetayases@oracle.com
33612683SJimmy.Vetayases@oracle.com apix_vector = apixs[cpu]->x_vectbl[vec];
33712683SJimmy.Vetayases@oracle.com
33812683SJimmy.Vetayases@oracle.com return ((uintptr_t)(apix_vector->v_autovect));
33912683SJimmy.Vetayases@oracle.com }
34012683SJimmy.Vetayases@oracle.com
34112683SJimmy.Vetayases@oracle.com #if defined(__amd64)
34212683SJimmy.Vetayases@oracle.com static unsigned char dummy_cpu_pri[MAXIPL + 1] = {
34312683SJimmy.Vetayases@oracle.com 0, 0, 0, 0, 0, 0, 0, 0,
34412683SJimmy.Vetayases@oracle.com 0, 0, 0, 0, 0, 0, 0, 0, 0
34512683SJimmy.Vetayases@oracle.com };
34612683SJimmy.Vetayases@oracle.com #endif
34712683SJimmy.Vetayases@oracle.com
34812683SJimmy.Vetayases@oracle.com static void
apix_init()34912683SJimmy.Vetayases@oracle.com apix_init()
35012683SJimmy.Vetayases@oracle.com {
35112683SJimmy.Vetayases@oracle.com extern void (*do_interrupt_common)(struct regs *, trap_trace_rec_t *);
35212683SJimmy.Vetayases@oracle.com
35312683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INIT, (CE_CONT, "apix: psm_softinit\n"));
35412683SJimmy.Vetayases@oracle.com
35512683SJimmy.Vetayases@oracle.com do_interrupt_common = apix_do_interrupt;
35612683SJimmy.Vetayases@oracle.com addintr = apix_add_avintr;
35712683SJimmy.Vetayases@oracle.com remintr = apix_rem_avintr;
35812683SJimmy.Vetayases@oracle.com get_pending_spl = apix_get_pending_spl;
35912683SJimmy.Vetayases@oracle.com get_intr_handler = apix_get_intr_handler;
36012683SJimmy.Vetayases@oracle.com psm_get_localapicid = apic_get_localapicid;
36112683SJimmy.Vetayases@oracle.com psm_get_ioapicid = apic_get_ioapicid;
36212683SJimmy.Vetayases@oracle.com
36312683SJimmy.Vetayases@oracle.com apix_softinit();
36412683SJimmy.Vetayases@oracle.com #if defined(__amd64)
36512683SJimmy.Vetayases@oracle.com /*
36612683SJimmy.Vetayases@oracle.com * Make cpu-specific interrupt info point to cr8pri vector
36712683SJimmy.Vetayases@oracle.com */
36812683SJimmy.Vetayases@oracle.com CPU->cpu_pri_data = dummy_cpu_pri;
36912683SJimmy.Vetayases@oracle.com #else
37012683SJimmy.Vetayases@oracle.com if (cpuid_have_cr8access(CPU))
37112683SJimmy.Vetayases@oracle.com apic_have_32bit_cr8 = 1;
37212683SJimmy.Vetayases@oracle.com #endif /* __amd64 */
37312683SJimmy.Vetayases@oracle.com
37412683SJimmy.Vetayases@oracle.com /*
37512683SJimmy.Vetayases@oracle.com * Initialize IRM pool parameters
37612683SJimmy.Vetayases@oracle.com */
37712683SJimmy.Vetayases@oracle.com if (irm_enable) {
37812683SJimmy.Vetayases@oracle.com int i;
37912683SJimmy.Vetayases@oracle.com int lowest_irq;
38012683SJimmy.Vetayases@oracle.com int highest_irq;
38112683SJimmy.Vetayases@oracle.com
38212683SJimmy.Vetayases@oracle.com /* number of CPUs present */
38312683SJimmy.Vetayases@oracle.com apix_irminfo.apix_ncpus = apic_nproc;
38412683SJimmy.Vetayases@oracle.com /* total number of entries in all of the IOAPICs present */
38512683SJimmy.Vetayases@oracle.com lowest_irq = apic_io_vectbase[0];
38612683SJimmy.Vetayases@oracle.com highest_irq = apic_io_vectend[0];
38712683SJimmy.Vetayases@oracle.com for (i = 1; i < apic_io_max; i++) {
38812683SJimmy.Vetayases@oracle.com if (apic_io_vectbase[i] < lowest_irq)
38912683SJimmy.Vetayases@oracle.com lowest_irq = apic_io_vectbase[i];
39012683SJimmy.Vetayases@oracle.com if (apic_io_vectend[i] > highest_irq)
39112683SJimmy.Vetayases@oracle.com highest_irq = apic_io_vectend[i];
39212683SJimmy.Vetayases@oracle.com }
39312683SJimmy.Vetayases@oracle.com apix_irminfo.apix_ioapic_max_vectors =
39412683SJimmy.Vetayases@oracle.com highest_irq - lowest_irq + 1;
39512683SJimmy.Vetayases@oracle.com /*
39612683SJimmy.Vetayases@oracle.com * Number of available per-CPU vectors excluding
39712683SJimmy.Vetayases@oracle.com * reserved vectors for Dtrace, int80, system-call,
39812683SJimmy.Vetayases@oracle.com * fast-trap, etc.
39912683SJimmy.Vetayases@oracle.com */
40012683SJimmy.Vetayases@oracle.com apix_irminfo.apix_per_cpu_vectors = APIX_NAVINTR -
40112683SJimmy.Vetayases@oracle.com APIX_SW_RESERVED_VECTORS;
40212683SJimmy.Vetayases@oracle.com
40312683SJimmy.Vetayases@oracle.com /* Number of vectors (pre) allocated (SCI and HPET) */
40412683SJimmy.Vetayases@oracle.com apix_irminfo.apix_vectors_allocated = 0;
40512683SJimmy.Vetayases@oracle.com if (apic_hpet_vect != -1)
40612683SJimmy.Vetayases@oracle.com apix_irminfo.apix_vectors_allocated++;
40712683SJimmy.Vetayases@oracle.com if (apic_sci_vect != -1)
40812683SJimmy.Vetayases@oracle.com apix_irminfo.apix_vectors_allocated++;
40912683SJimmy.Vetayases@oracle.com }
41012683SJimmy.Vetayases@oracle.com }
41112683SJimmy.Vetayases@oracle.com
41212683SJimmy.Vetayases@oracle.com static void
apix_init_intr()41312683SJimmy.Vetayases@oracle.com apix_init_intr()
41412683SJimmy.Vetayases@oracle.com {
41512683SJimmy.Vetayases@oracle.com processorid_t cpun = psm_get_cpu_id();
41612683SJimmy.Vetayases@oracle.com uint_t nlvt;
41712683SJimmy.Vetayases@oracle.com uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
41812683SJimmy.Vetayases@oracle.com extern void cmi_cmci_trap(void);
41912683SJimmy.Vetayases@oracle.com
42012683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
42112683SJimmy.Vetayases@oracle.com
42212683SJimmy.Vetayases@oracle.com if (apic_mode == LOCAL_APIC) {
42312683SJimmy.Vetayases@oracle.com /*
42412683SJimmy.Vetayases@oracle.com * We are running APIC in MMIO mode.
42512683SJimmy.Vetayases@oracle.com */
42612683SJimmy.Vetayases@oracle.com if (apic_flat_model) {
42712683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_FORMAT_REG,
42812683SJimmy.Vetayases@oracle.com APIC_FLAT_MODEL);
42912683SJimmy.Vetayases@oracle.com } else {
43012683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_FORMAT_REG,
43112683SJimmy.Vetayases@oracle.com APIC_CLUSTER_MODEL);
43212683SJimmy.Vetayases@oracle.com }
43312683SJimmy.Vetayases@oracle.com
43412683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_DEST_REG,
43512683SJimmy.Vetayases@oracle.com AV_HIGH_ORDER >> cpun);
43612683SJimmy.Vetayases@oracle.com }
43712683SJimmy.Vetayases@oracle.com
43812683SJimmy.Vetayases@oracle.com if (apic_directed_EOI_supported()) {
43912683SJimmy.Vetayases@oracle.com /*
44012683SJimmy.Vetayases@oracle.com * Setting the 12th bit in the Spurious Interrupt Vector
44112683SJimmy.Vetayases@oracle.com * Register suppresses broadcast EOIs generated by the local
44212683SJimmy.Vetayases@oracle.com * APIC. The suppression of broadcast EOIs happens only when
44312683SJimmy.Vetayases@oracle.com * interrupts are level-triggered.
44412683SJimmy.Vetayases@oracle.com */
44512683SJimmy.Vetayases@oracle.com svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
44612683SJimmy.Vetayases@oracle.com }
44712683SJimmy.Vetayases@oracle.com
44812683SJimmy.Vetayases@oracle.com /* need to enable APIC before unmasking NMI */
44912683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
45012683SJimmy.Vetayases@oracle.com
45112683SJimmy.Vetayases@oracle.com /*
45212683SJimmy.Vetayases@oracle.com * Presence of an invalid vector with delivery mode AV_FIXED can
45312683SJimmy.Vetayases@oracle.com * cause an error interrupt, even if the entry is masked...so
45412683SJimmy.Vetayases@oracle.com * write a valid vector to LVT entries along with the mask bit
45512683SJimmy.Vetayases@oracle.com */
45612683SJimmy.Vetayases@oracle.com
45712683SJimmy.Vetayases@oracle.com /* All APICs have timer and LINT0/1 */
45812683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ);
45912683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ);
46012683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI); /* enable NMI */
46112683SJimmy.Vetayases@oracle.com
46212683SJimmy.Vetayases@oracle.com /*
46312683SJimmy.Vetayases@oracle.com * On integrated APICs, the number of LVT entries is
46412683SJimmy.Vetayases@oracle.com * 'Max LVT entry' + 1; on 82489DX's (non-integrated
46512683SJimmy.Vetayases@oracle.com * APICs), nlvt is "3" (LINT0, LINT1, and timer)
46612683SJimmy.Vetayases@oracle.com */
46712683SJimmy.Vetayases@oracle.com
46812683SJimmy.Vetayases@oracle.com if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
46912683SJimmy.Vetayases@oracle.com nlvt = 3;
47012683SJimmy.Vetayases@oracle.com } else {
47112683SJimmy.Vetayases@oracle.com nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) &
47212683SJimmy.Vetayases@oracle.com 0xFF) + 1;
47312683SJimmy.Vetayases@oracle.com }
47412683SJimmy.Vetayases@oracle.com
47512683SJimmy.Vetayases@oracle.com if (nlvt >= 5) {
47612683SJimmy.Vetayases@oracle.com /* Enable performance counter overflow interrupt */
47712683SJimmy.Vetayases@oracle.com
478*12826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_MSR))
47912683SJimmy.Vetayases@oracle.com apic_enable_cpcovf_intr = 0;
48012683SJimmy.Vetayases@oracle.com if (apic_enable_cpcovf_intr) {
48112683SJimmy.Vetayases@oracle.com if (apic_cpcovf_vect == 0) {
48212683SJimmy.Vetayases@oracle.com int ipl = APIC_PCINT_IPL;
48312683SJimmy.Vetayases@oracle.com
48412683SJimmy.Vetayases@oracle.com apic_cpcovf_vect = apix_get_ipivect(ipl, -1);
48512683SJimmy.Vetayases@oracle.com ASSERT(apic_cpcovf_vect);
48612683SJimmy.Vetayases@oracle.com
48712683SJimmy.Vetayases@oracle.com (void) add_avintr(NULL, ipl,
48812683SJimmy.Vetayases@oracle.com (avfunc)kcpc_hw_overflow_intr,
48912683SJimmy.Vetayases@oracle.com "apic pcint", apic_cpcovf_vect,
49012683SJimmy.Vetayases@oracle.com NULL, NULL, NULL, NULL);
49112683SJimmy.Vetayases@oracle.com kcpc_hw_overflow_intr_installed = 1;
49212683SJimmy.Vetayases@oracle.com kcpc_hw_enable_cpc_intr =
49312683SJimmy.Vetayases@oracle.com apic_cpcovf_mask_clear;
49412683SJimmy.Vetayases@oracle.com }
49512683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_PCINT_VECT,
49612683SJimmy.Vetayases@oracle.com apic_cpcovf_vect);
49712683SJimmy.Vetayases@oracle.com }
49812683SJimmy.Vetayases@oracle.com }
49912683SJimmy.Vetayases@oracle.com
50012683SJimmy.Vetayases@oracle.com if (nlvt >= 6) {
50112683SJimmy.Vetayases@oracle.com /* Only mask TM intr if the BIOS apparently doesn't use it */
50212683SJimmy.Vetayases@oracle.com
50312683SJimmy.Vetayases@oracle.com uint32_t lvtval;
50412683SJimmy.Vetayases@oracle.com
50512683SJimmy.Vetayases@oracle.com lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT);
50612683SJimmy.Vetayases@oracle.com if (((lvtval & AV_MASK) == AV_MASK) ||
50712683SJimmy.Vetayases@oracle.com ((lvtval & AV_DELIV_MODE) != AV_SMI)) {
50812683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_THERM_VECT,
50912683SJimmy.Vetayases@oracle.com AV_MASK|APIC_RESV_IRQ);
51012683SJimmy.Vetayases@oracle.com }
51112683SJimmy.Vetayases@oracle.com }
51212683SJimmy.Vetayases@oracle.com
51312683SJimmy.Vetayases@oracle.com /* Enable error interrupt */
51412683SJimmy.Vetayases@oracle.com
51512683SJimmy.Vetayases@oracle.com if (nlvt >= 4 && apic_enable_error_intr) {
51612683SJimmy.Vetayases@oracle.com if (apic_errvect == 0) {
51712683SJimmy.Vetayases@oracle.com int ipl = 0xf; /* get highest priority intr */
51812683SJimmy.Vetayases@oracle.com apic_errvect = apix_get_ipivect(ipl, -1);
51912683SJimmy.Vetayases@oracle.com ASSERT(apic_errvect);
52012683SJimmy.Vetayases@oracle.com /*
52112683SJimmy.Vetayases@oracle.com * Not PSMI compliant, but we are going to merge
52212683SJimmy.Vetayases@oracle.com * with ON anyway
52312683SJimmy.Vetayases@oracle.com */
52412683SJimmy.Vetayases@oracle.com (void) add_avintr(NULL, ipl,
52512683SJimmy.Vetayases@oracle.com (avfunc)apic_error_intr, "apic error intr",
52612683SJimmy.Vetayases@oracle.com apic_errvect, NULL, NULL, NULL, NULL);
52712683SJimmy.Vetayases@oracle.com }
52812683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect);
52912683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
53012683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
53112683SJimmy.Vetayases@oracle.com }
53212683SJimmy.Vetayases@oracle.com
53312683SJimmy.Vetayases@oracle.com /* Enable CMCI interrupt */
53412683SJimmy.Vetayases@oracle.com if (cmi_enable_cmci) {
53512683SJimmy.Vetayases@oracle.com mutex_enter(&cmci_cpu_setup_lock);
53612683SJimmy.Vetayases@oracle.com if (cmci_cpu_setup_registered == 0) {
53712683SJimmy.Vetayases@oracle.com mutex_enter(&cpu_lock);
53812683SJimmy.Vetayases@oracle.com register_cpu_setup_func(cmci_cpu_setup, NULL);
53912683SJimmy.Vetayases@oracle.com mutex_exit(&cpu_lock);
54012683SJimmy.Vetayases@oracle.com cmci_cpu_setup_registered = 1;
54112683SJimmy.Vetayases@oracle.com }
54212683SJimmy.Vetayases@oracle.com mutex_exit(&cmci_cpu_setup_lock);
54312683SJimmy.Vetayases@oracle.com
54412683SJimmy.Vetayases@oracle.com if (apic_cmci_vect == 0) {
54512683SJimmy.Vetayases@oracle.com int ipl = 0x2;
54612683SJimmy.Vetayases@oracle.com apic_cmci_vect = apix_get_ipivect(ipl, -1);
54712683SJimmy.Vetayases@oracle.com ASSERT(apic_cmci_vect);
54812683SJimmy.Vetayases@oracle.com
54912683SJimmy.Vetayases@oracle.com (void) add_avintr(NULL, ipl,
55012683SJimmy.Vetayases@oracle.com (avfunc)cmi_cmci_trap, "apic cmci intr",
55112683SJimmy.Vetayases@oracle.com apic_cmci_vect, NULL, NULL, NULL, NULL);
55212683SJimmy.Vetayases@oracle.com }
55312683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
55412683SJimmy.Vetayases@oracle.com }
55512683SJimmy.Vetayases@oracle.com
55612683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write_task_reg(0);
55712683SJimmy.Vetayases@oracle.com }
55812683SJimmy.Vetayases@oracle.com
55912683SJimmy.Vetayases@oracle.com static void
apix_picinit(void)56012683SJimmy.Vetayases@oracle.com apix_picinit(void)
56112683SJimmy.Vetayases@oracle.com {
56212683SJimmy.Vetayases@oracle.com int i, j;
56312683SJimmy.Vetayases@oracle.com uint_t isr;
56412683SJimmy.Vetayases@oracle.com
56512683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INIT, (CE_CONT, "apix: psm_picinit\n"));
56612683SJimmy.Vetayases@oracle.com
56712683SJimmy.Vetayases@oracle.com /*
56812683SJimmy.Vetayases@oracle.com * initialize interrupt remapping before apic
56912683SJimmy.Vetayases@oracle.com * hardware initialization
57012683SJimmy.Vetayases@oracle.com */
57112683SJimmy.Vetayases@oracle.com apic_intrmap_init(apic_mode);
57212683SJimmy.Vetayases@oracle.com if (apic_vt_ops == psm_vt_ops)
57312683SJimmy.Vetayases@oracle.com apix_mul_ioapic_method = APIC_MUL_IOAPIC_IIR;
57412683SJimmy.Vetayases@oracle.com
57512683SJimmy.Vetayases@oracle.com /*
57612683SJimmy.Vetayases@oracle.com * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
57712683SJimmy.Vetayases@oracle.com * bit on without clearing it with EOI. Since softint
57812683SJimmy.Vetayases@oracle.com * uses vector 0x20 to interrupt itself, so softint will
57912683SJimmy.Vetayases@oracle.com * not work on this machine. In order to fix this problem
58012683SJimmy.Vetayases@oracle.com * a check is made to verify all the isr bits are clear.
58112683SJimmy.Vetayases@oracle.com * If not, EOIs are issued to clear the bits.
58212683SJimmy.Vetayases@oracle.com */
58312683SJimmy.Vetayases@oracle.com for (i = 7; i >= 1; i--) {
58412683SJimmy.Vetayases@oracle.com isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4));
58512683SJimmy.Vetayases@oracle.com if (isr != 0)
58612683SJimmy.Vetayases@oracle.com for (j = 0; ((j < 32) && (isr != 0)); j++)
58712683SJimmy.Vetayases@oracle.com if (isr & (1 << j)) {
58812683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(
58912683SJimmy.Vetayases@oracle.com APIC_EOI_REG, 0);
59012683SJimmy.Vetayases@oracle.com isr &= ~(1 << j);
59112683SJimmy.Vetayases@oracle.com apic_error |= APIC_ERR_BOOT_EOI;
59212683SJimmy.Vetayases@oracle.com }
59312683SJimmy.Vetayases@oracle.com }
59412683SJimmy.Vetayases@oracle.com
59512683SJimmy.Vetayases@oracle.com /* set a flag so we know we have run apic_picinit() */
59612683SJimmy.Vetayases@oracle.com apic_picinit_called = 1;
59712683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apic_gethrtime_lock);
59812683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apic_ioapic_lock);
59912683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apic_error_lock);
60012683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apic_mode_switch_lock);
60112683SJimmy.Vetayases@oracle.com
60212683SJimmy.Vetayases@oracle.com picsetup(); /* initialise the 8259 */
60312683SJimmy.Vetayases@oracle.com
60412683SJimmy.Vetayases@oracle.com /* add nmi handler - least priority nmi handler */
60512683SJimmy.Vetayases@oracle.com LOCK_INIT_CLEAR(&apic_nmi_lock);
60612683SJimmy.Vetayases@oracle.com
60712683SJimmy.Vetayases@oracle.com if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr,
60812683SJimmy.Vetayases@oracle.com "apix NMI handler", (caddr_t)NULL))
60912683SJimmy.Vetayases@oracle.com cmn_err(CE_WARN, "apix: Unable to add nmi handler");
61012683SJimmy.Vetayases@oracle.com
61112683SJimmy.Vetayases@oracle.com apix_init_intr();
61212683SJimmy.Vetayases@oracle.com
61312683SJimmy.Vetayases@oracle.com /* enable apic mode if imcr present */
61412683SJimmy.Vetayases@oracle.com if (apic_imcrp) {
61512683SJimmy.Vetayases@oracle.com outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
61612683SJimmy.Vetayases@oracle.com outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC);
61712683SJimmy.Vetayases@oracle.com }
61812683SJimmy.Vetayases@oracle.com
61912683SJimmy.Vetayases@oracle.com ioapix_init_intr(IOAPIC_MASK);
62012683SJimmy.Vetayases@oracle.com
62112683SJimmy.Vetayases@oracle.com /* setup global IRM pool if applicable */
62212683SJimmy.Vetayases@oracle.com if (irm_enable)
62312683SJimmy.Vetayases@oracle.com apix_irm_init();
62412683SJimmy.Vetayases@oracle.com }
62512683SJimmy.Vetayases@oracle.com
62612683SJimmy.Vetayases@oracle.com static __inline__ void
apix_send_eoi(void)62712683SJimmy.Vetayases@oracle.com apix_send_eoi(void)
62812683SJimmy.Vetayases@oracle.com {
62912683SJimmy.Vetayases@oracle.com if (apic_mode == LOCAL_APIC)
63012683SJimmy.Vetayases@oracle.com LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
63112683SJimmy.Vetayases@oracle.com else
63212683SJimmy.Vetayases@oracle.com X2APIC_WRITE(APIC_EOI_REG, 0);
63312683SJimmy.Vetayases@oracle.com }
63412683SJimmy.Vetayases@oracle.com
63512683SJimmy.Vetayases@oracle.com /*
63612683SJimmy.Vetayases@oracle.com * platform_intr_enter
63712683SJimmy.Vetayases@oracle.com *
63812683SJimmy.Vetayases@oracle.com * Called at the beginning of the interrupt service routine to
63912683SJimmy.Vetayases@oracle.com * mask all level equal to and below the interrupt priority
64012683SJimmy.Vetayases@oracle.com * of the interrupting vector. An EOI should be given to
64112683SJimmy.Vetayases@oracle.com * the interrupt controller to enable other HW interrupts.
64212683SJimmy.Vetayases@oracle.com *
64312683SJimmy.Vetayases@oracle.com * Return -1 for spurious interrupts
64412683SJimmy.Vetayases@oracle.com *
64512683SJimmy.Vetayases@oracle.com */
64612683SJimmy.Vetayases@oracle.com static int
apix_intr_enter(int ipl,int * vectorp)64712683SJimmy.Vetayases@oracle.com apix_intr_enter(int ipl, int *vectorp)
64812683SJimmy.Vetayases@oracle.com {
64912683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
65012683SJimmy.Vetayases@oracle.com uint32_t cpuid = CPU->cpu_id;
65112683SJimmy.Vetayases@oracle.com apic_cpus_info_t *cpu_infop;
65212683SJimmy.Vetayases@oracle.com uchar_t vector;
65312683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
65412683SJimmy.Vetayases@oracle.com int nipl = -1;
65512683SJimmy.Vetayases@oracle.com
65612683SJimmy.Vetayases@oracle.com /*
65712683SJimmy.Vetayases@oracle.com * The real vector delivered is (*vectorp + 0x20), but our caller
65812683SJimmy.Vetayases@oracle.com * subtracts 0x20 from the vector before passing it to us.
65912683SJimmy.Vetayases@oracle.com * (That's why APIC_BASE_VECT is 0x20.)
66012683SJimmy.Vetayases@oracle.com */
66112683SJimmy.Vetayases@oracle.com vector = *vectorp = (uchar_t)*vectorp + APIC_BASE_VECT;
66212683SJimmy.Vetayases@oracle.com
66312683SJimmy.Vetayases@oracle.com cpu_infop = &apic_cpus[cpuid];
66412683SJimmy.Vetayases@oracle.com if (vector == APIC_SPUR_INTR) {
66512683SJimmy.Vetayases@oracle.com cpu_infop->aci_spur_cnt++;
66612683SJimmy.Vetayases@oracle.com return (APIC_INT_SPURIOUS);
66712683SJimmy.Vetayases@oracle.com }
66812683SJimmy.Vetayases@oracle.com
66912683SJimmy.Vetayases@oracle.com vecp = xv_vector(cpuid, vector);
67012683SJimmy.Vetayases@oracle.com if (vecp == NULL) {
67112683SJimmy.Vetayases@oracle.com if (APIX_IS_FAKE_INTR(vector))
67212683SJimmy.Vetayases@oracle.com nipl = apix_rebindinfo.i_pri;
67312683SJimmy.Vetayases@oracle.com apix_send_eoi();
67412683SJimmy.Vetayases@oracle.com return (nipl);
67512683SJimmy.Vetayases@oracle.com }
67612683SJimmy.Vetayases@oracle.com nipl = vecp->v_pri;
67712683SJimmy.Vetayases@oracle.com
67812683SJimmy.Vetayases@oracle.com /* if interrupted by the clock, increment apic_nsec_since_boot */
67912683SJimmy.Vetayases@oracle.com if (vector == (apic_clkvect + APIC_BASE_VECT)) {
68012683SJimmy.Vetayases@oracle.com if (!apic_oneshot) {
68112683SJimmy.Vetayases@oracle.com /* NOTE: this is not MT aware */
68212683SJimmy.Vetayases@oracle.com apic_hrtime_stamp++;
68312683SJimmy.Vetayases@oracle.com apic_nsec_since_boot += apic_nsec_per_intr;
68412683SJimmy.Vetayases@oracle.com apic_hrtime_stamp++;
68512683SJimmy.Vetayases@oracle.com last_count_read = apic_hertz_count;
68612683SJimmy.Vetayases@oracle.com apix_redistribute_compute();
68712683SJimmy.Vetayases@oracle.com }
68812683SJimmy.Vetayases@oracle.com
68912683SJimmy.Vetayases@oracle.com apix_send_eoi();
69012683SJimmy.Vetayases@oracle.com
69112683SJimmy.Vetayases@oracle.com return (nipl);
69212683SJimmy.Vetayases@oracle.com }
69312683SJimmy.Vetayases@oracle.com
69412683SJimmy.Vetayases@oracle.com ASSERT(vecp->v_state != APIX_STATE_OBSOLETED);
69512683SJimmy.Vetayases@oracle.com
69612683SJimmy.Vetayases@oracle.com /* pre-EOI handling for level-triggered interrupts */
69712683SJimmy.Vetayases@oracle.com if (!APIX_IS_DIRECTED_EOI(apix_mul_ioapic_method) &&
69812683SJimmy.Vetayases@oracle.com (vecp->v_type & APIX_TYPE_FIXED) && apic_level_intr[vecp->v_inum])
69912683SJimmy.Vetayases@oracle.com apix_level_intr_pre_eoi(vecp->v_inum);
70012683SJimmy.Vetayases@oracle.com
70112683SJimmy.Vetayases@oracle.com /* send back EOI */
70212683SJimmy.Vetayases@oracle.com apix_send_eoi();
70312683SJimmy.Vetayases@oracle.com
70412683SJimmy.Vetayases@oracle.com cpu_infop->aci_current[nipl] = vector;
70512683SJimmy.Vetayases@oracle.com if ((nipl > ipl) && (nipl > cpu->cpu_base_spl)) {
70612683SJimmy.Vetayases@oracle.com cpu_infop->aci_curipl = (uchar_t)nipl;
70712683SJimmy.Vetayases@oracle.com cpu_infop->aci_ISR_in_progress |= 1 << nipl;
70812683SJimmy.Vetayases@oracle.com }
70912683SJimmy.Vetayases@oracle.com
71012683SJimmy.Vetayases@oracle.com #ifdef DEBUG
71112683SJimmy.Vetayases@oracle.com if (vector >= APIX_IPI_MIN)
71212683SJimmy.Vetayases@oracle.com return (nipl); /* skip IPI */
71312683SJimmy.Vetayases@oracle.com
71412683SJimmy.Vetayases@oracle.com APIC_DEBUG_BUF_PUT(vector);
71512683SJimmy.Vetayases@oracle.com APIC_DEBUG_BUF_PUT(vecp->v_inum);
71612683SJimmy.Vetayases@oracle.com APIC_DEBUG_BUF_PUT(nipl);
71712683SJimmy.Vetayases@oracle.com APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
71812683SJimmy.Vetayases@oracle.com if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
71912683SJimmy.Vetayases@oracle.com drv_usecwait(apic_stretch_interrupts);
72012683SJimmy.Vetayases@oracle.com #endif /* DEBUG */
72112683SJimmy.Vetayases@oracle.com
72212683SJimmy.Vetayases@oracle.com return (nipl);
72312683SJimmy.Vetayases@oracle.com }
72412683SJimmy.Vetayases@oracle.com
72512683SJimmy.Vetayases@oracle.com /*
72612683SJimmy.Vetayases@oracle.com * Any changes made to this function must also change X2APIC
72712683SJimmy.Vetayases@oracle.com * version of intr_exit.
72812683SJimmy.Vetayases@oracle.com */
72912683SJimmy.Vetayases@oracle.com static void
apix_intr_exit(int prev_ipl,int arg2)73012683SJimmy.Vetayases@oracle.com apix_intr_exit(int prev_ipl, int arg2)
73112683SJimmy.Vetayases@oracle.com {
73212683SJimmy.Vetayases@oracle.com int cpuid = psm_get_cpu_id();
73312683SJimmy.Vetayases@oracle.com apic_cpus_info_t *cpu_infop = &apic_cpus[cpuid];
73412683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpuid];
73512683SJimmy.Vetayases@oracle.com
73612683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg2);
73712683SJimmy.Vetayases@oracle.com
73812683SJimmy.Vetayases@oracle.com cpu_infop->aci_curipl = (uchar_t)prev_ipl;
73912683SJimmy.Vetayases@oracle.com /* ISR above current pri could not be in progress */
74012683SJimmy.Vetayases@oracle.com cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1;
74112683SJimmy.Vetayases@oracle.com
74212683SJimmy.Vetayases@oracle.com if (apixp->x_obsoletes != NULL) {
74312683SJimmy.Vetayases@oracle.com if (APIX_CPU_LOCK_HELD(cpuid))
74412683SJimmy.Vetayases@oracle.com return;
74512683SJimmy.Vetayases@oracle.com
74612683SJimmy.Vetayases@oracle.com APIX_ENTER_CPU_LOCK(cpuid);
74712683SJimmy.Vetayases@oracle.com (void) apix_obsolete_vector(apixp->x_obsoletes);
74812683SJimmy.Vetayases@oracle.com APIX_LEAVE_CPU_LOCK(cpuid);
74912683SJimmy.Vetayases@oracle.com }
75012683SJimmy.Vetayases@oracle.com }
75112683SJimmy.Vetayases@oracle.com
75212683SJimmy.Vetayases@oracle.com /*
75312683SJimmy.Vetayases@oracle.com * Mask all interrupts below or equal to the given IPL.
75412683SJimmy.Vetayases@oracle.com * Any changes made to this function must also change X2APIC
75512683SJimmy.Vetayases@oracle.com * version of setspl.
75612683SJimmy.Vetayases@oracle.com */
75712683SJimmy.Vetayases@oracle.com static void
apix_setspl(int ipl)75812683SJimmy.Vetayases@oracle.com apix_setspl(int ipl)
75912683SJimmy.Vetayases@oracle.com {
76012683SJimmy.Vetayases@oracle.com /* interrupts at ipl above this cannot be in progress */
76112683SJimmy.Vetayases@oracle.com apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
76212683SJimmy.Vetayases@oracle.com
76312683SJimmy.Vetayases@oracle.com /*
76412683SJimmy.Vetayases@oracle.com * Mask all interrupts for XC_HI_PIL (i.e set TPR to 0xf).
76512683SJimmy.Vetayases@oracle.com * Otherwise, enable all interrupts (i.e. set TPR to 0).
76612683SJimmy.Vetayases@oracle.com */
76712683SJimmy.Vetayases@oracle.com if (ipl != XC_HI_PIL)
76812683SJimmy.Vetayases@oracle.com ipl = 0;
76912683SJimmy.Vetayases@oracle.com
77012683SJimmy.Vetayases@oracle.com #if defined(__amd64)
77112683SJimmy.Vetayases@oracle.com setcr8((ulong_t)ipl);
77212683SJimmy.Vetayases@oracle.com #else
77312683SJimmy.Vetayases@oracle.com if (apic_have_32bit_cr8)
77412683SJimmy.Vetayases@oracle.com setcr8((ulong_t)ipl);
77512683SJimmy.Vetayases@oracle.com else
77612683SJimmy.Vetayases@oracle.com apicadr[APIC_TASK_REG] = ipl << APIC_IPL_SHIFT;
77712683SJimmy.Vetayases@oracle.com #endif
77812683SJimmy.Vetayases@oracle.com
77912683SJimmy.Vetayases@oracle.com /*
78012683SJimmy.Vetayases@oracle.com * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
78112683SJimmy.Vetayases@oracle.com * have enough time to come in before the priority is raised again
78212683SJimmy.Vetayases@oracle.com * during the idle() loop.
78312683SJimmy.Vetayases@oracle.com */
78412683SJimmy.Vetayases@oracle.com if (apic_setspl_delay)
78512683SJimmy.Vetayases@oracle.com (void) apic_reg_ops->apic_get_pri();
78612683SJimmy.Vetayases@oracle.com }
78712683SJimmy.Vetayases@oracle.com
78812683SJimmy.Vetayases@oracle.com /*
78912683SJimmy.Vetayases@oracle.com * X2APIC version of setspl.
79012683SJimmy.Vetayases@oracle.com */
79112683SJimmy.Vetayases@oracle.com static void
x2apix_setspl(int ipl)79212683SJimmy.Vetayases@oracle.com x2apix_setspl(int ipl)
79312683SJimmy.Vetayases@oracle.com {
79412683SJimmy.Vetayases@oracle.com /* interrupts at ipl above this cannot be in progress */
79512683SJimmy.Vetayases@oracle.com apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
79612683SJimmy.Vetayases@oracle.com
79712683SJimmy.Vetayases@oracle.com /*
79812683SJimmy.Vetayases@oracle.com * Mask all interrupts for XC_HI_PIL (i.e set TPR to 0xf).
79912683SJimmy.Vetayases@oracle.com * Otherwise, enable all interrupts (i.e. set TPR to 0).
80012683SJimmy.Vetayases@oracle.com */
80112683SJimmy.Vetayases@oracle.com if (ipl != XC_HI_PIL)
80212683SJimmy.Vetayases@oracle.com ipl = 0;
80312683SJimmy.Vetayases@oracle.com
80412683SJimmy.Vetayases@oracle.com X2APIC_WRITE(APIC_TASK_REG, ipl << APIC_IPL_SHIFT);
80512683SJimmy.Vetayases@oracle.com }
80612683SJimmy.Vetayases@oracle.com
80712683SJimmy.Vetayases@oracle.com int
apix_addspl(int virtvec,int ipl,int min_ipl,int max_ipl)80812683SJimmy.Vetayases@oracle.com apix_addspl(int virtvec, int ipl, int min_ipl, int max_ipl)
80912683SJimmy.Vetayases@oracle.com {
81012683SJimmy.Vetayases@oracle.com uint32_t cpuid = APIX_VIRTVEC_CPU(virtvec);
81112683SJimmy.Vetayases@oracle.com uchar_t vector = (uchar_t)APIX_VIRTVEC_VECTOR(virtvec);
81212683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = xv_vector(cpuid, vector);
81312683SJimmy.Vetayases@oracle.com
81412683SJimmy.Vetayases@oracle.com UNREFERENCED_3PARAMETER(ipl, min_ipl, max_ipl);
81512683SJimmy.Vetayases@oracle.com ASSERT(vecp != NULL && LOCK_HELD(&apix_lock));
81612683SJimmy.Vetayases@oracle.com
81712683SJimmy.Vetayases@oracle.com if (vecp->v_type == APIX_TYPE_FIXED)
81812683SJimmy.Vetayases@oracle.com apix_intx_set_shared(vecp->v_inum, 1);
81912683SJimmy.Vetayases@oracle.com
82012683SJimmy.Vetayases@oracle.com /* There are more interrupts, so it's already been enabled */
82112683SJimmy.Vetayases@oracle.com if (vecp->v_share > 1)
82212683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
82312683SJimmy.Vetayases@oracle.com
82412683SJimmy.Vetayases@oracle.com /* return if it is not hardware interrupt */
82512683SJimmy.Vetayases@oracle.com if (vecp->v_type == APIX_TYPE_IPI)
82612683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
82712683SJimmy.Vetayases@oracle.com
82812683SJimmy.Vetayases@oracle.com /*
82912683SJimmy.Vetayases@oracle.com * if apix_picinit() has not been called yet, just return.
83012683SJimmy.Vetayases@oracle.com * At the end of apic_picinit(), we will call setup_io_intr().
83112683SJimmy.Vetayases@oracle.com */
83212683SJimmy.Vetayases@oracle.com if (!apic_picinit_called)
83312683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
83412683SJimmy.Vetayases@oracle.com
83512683SJimmy.Vetayases@oracle.com (void) apix_setup_io_intr(vecp);
83612683SJimmy.Vetayases@oracle.com
83712683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
83812683SJimmy.Vetayases@oracle.com }
83912683SJimmy.Vetayases@oracle.com
84012683SJimmy.Vetayases@oracle.com int
apix_delspl(int virtvec,int ipl,int min_ipl,int max_ipl)84112683SJimmy.Vetayases@oracle.com apix_delspl(int virtvec, int ipl, int min_ipl, int max_ipl)
84212683SJimmy.Vetayases@oracle.com {
84312683SJimmy.Vetayases@oracle.com uint32_t cpuid = APIX_VIRTVEC_CPU(virtvec);
84412683SJimmy.Vetayases@oracle.com uchar_t vector = (uchar_t)APIX_VIRTVEC_VECTOR(virtvec);
84512683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = xv_vector(cpuid, vector);
84612683SJimmy.Vetayases@oracle.com
84712683SJimmy.Vetayases@oracle.com UNREFERENCED_3PARAMETER(ipl, min_ipl, max_ipl);
84812683SJimmy.Vetayases@oracle.com ASSERT(vecp != NULL && LOCK_HELD(&apix_lock));
84912683SJimmy.Vetayases@oracle.com
85012683SJimmy.Vetayases@oracle.com if (vecp->v_type == APIX_TYPE_FIXED)
85112683SJimmy.Vetayases@oracle.com apix_intx_set_shared(vecp->v_inum, -1);
85212683SJimmy.Vetayases@oracle.com
85312683SJimmy.Vetayases@oracle.com /* There are more interrupts */
85412683SJimmy.Vetayases@oracle.com if (vecp->v_share > 1)
85512683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
85612683SJimmy.Vetayases@oracle.com
85712683SJimmy.Vetayases@oracle.com /* return if it is not hardware interrupt */
85812683SJimmy.Vetayases@oracle.com if (vecp->v_type == APIX_TYPE_IPI)
85912683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
86012683SJimmy.Vetayases@oracle.com
86112683SJimmy.Vetayases@oracle.com if (!apic_picinit_called) {
86212683SJimmy.Vetayases@oracle.com cmn_err(CE_WARN, "apix: delete 0x%x before apic init",
86312683SJimmy.Vetayases@oracle.com virtvec);
86412683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
86512683SJimmy.Vetayases@oracle.com }
86612683SJimmy.Vetayases@oracle.com
86712683SJimmy.Vetayases@oracle.com apix_disable_vector(vecp);
86812683SJimmy.Vetayases@oracle.com
86912683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
87012683SJimmy.Vetayases@oracle.com }
87112683SJimmy.Vetayases@oracle.com
87212683SJimmy.Vetayases@oracle.com /*
87312683SJimmy.Vetayases@oracle.com * Try and disable all interrupts. We just assign interrupts to other
87412683SJimmy.Vetayases@oracle.com * processors based on policy. If any were bound by user request, we
87512683SJimmy.Vetayases@oracle.com * let them continue and return failure. We do not bother to check
87612683SJimmy.Vetayases@oracle.com * for cache affinity while rebinding.
87712683SJimmy.Vetayases@oracle.com */
87812683SJimmy.Vetayases@oracle.com static int
apix_disable_intr(processorid_t cpun)87912683SJimmy.Vetayases@oracle.com apix_disable_intr(processorid_t cpun)
88012683SJimmy.Vetayases@oracle.com {
88112683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpun];
88212683SJimmy.Vetayases@oracle.com apix_vector_t *vecp, *newp;
88312683SJimmy.Vetayases@oracle.com int bindcpu, i, hardbound = 0, errbound = 0, ret, loop, type;
88412683SJimmy.Vetayases@oracle.com
88512683SJimmy.Vetayases@oracle.com lock_set(&apix_lock);
88612683SJimmy.Vetayases@oracle.com
88712683SJimmy.Vetayases@oracle.com apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE;
88812683SJimmy.Vetayases@oracle.com apic_cpus[cpun].aci_curipl = 0;
88912683SJimmy.Vetayases@oracle.com
89012683SJimmy.Vetayases@oracle.com /* if this is for SUSPEND operation, skip rebinding */
89112683SJimmy.Vetayases@oracle.com if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND) {
89212683SJimmy.Vetayases@oracle.com for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
89312683SJimmy.Vetayases@oracle.com vecp = apixp->x_vectbl[i];
89412683SJimmy.Vetayases@oracle.com if (!IS_VECT_ENABLED(vecp))
89512683SJimmy.Vetayases@oracle.com continue;
89612683SJimmy.Vetayases@oracle.com
89712683SJimmy.Vetayases@oracle.com apix_disable_vector(vecp);
89812683SJimmy.Vetayases@oracle.com }
89912683SJimmy.Vetayases@oracle.com lock_clear(&apix_lock);
90012683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
90112683SJimmy.Vetayases@oracle.com }
90212683SJimmy.Vetayases@oracle.com
90312683SJimmy.Vetayases@oracle.com for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
90412683SJimmy.Vetayases@oracle.com vecp = apixp->x_vectbl[i];
90512683SJimmy.Vetayases@oracle.com if (!IS_VECT_ENABLED(vecp))
90612683SJimmy.Vetayases@oracle.com continue;
90712683SJimmy.Vetayases@oracle.com
90812683SJimmy.Vetayases@oracle.com if (vecp->v_flags & APIX_VECT_USER_BOUND) {
90912683SJimmy.Vetayases@oracle.com hardbound++;
91012683SJimmy.Vetayases@oracle.com continue;
91112683SJimmy.Vetayases@oracle.com }
91212683SJimmy.Vetayases@oracle.com type = vecp->v_type;
91312683SJimmy.Vetayases@oracle.com
91412683SJimmy.Vetayases@oracle.com /*
91512683SJimmy.Vetayases@oracle.com * If there are bound interrupts on this cpu, then
91612683SJimmy.Vetayases@oracle.com * rebind them to other processors.
91712683SJimmy.Vetayases@oracle.com */
91812683SJimmy.Vetayases@oracle.com loop = 0;
91912683SJimmy.Vetayases@oracle.com do {
92012683SJimmy.Vetayases@oracle.com bindcpu = apic_find_cpu(APIC_CPU_INTR_ENABLE);
92112683SJimmy.Vetayases@oracle.com
92212683SJimmy.Vetayases@oracle.com if (type != APIX_TYPE_MSI)
92312683SJimmy.Vetayases@oracle.com newp = apix_set_cpu(vecp, bindcpu, &ret);
92412683SJimmy.Vetayases@oracle.com else
92512683SJimmy.Vetayases@oracle.com newp = apix_grp_set_cpu(vecp, bindcpu, &ret);
92612683SJimmy.Vetayases@oracle.com } while ((newp == NULL) && (loop++ < apic_nproc));
92712683SJimmy.Vetayases@oracle.com
92812683SJimmy.Vetayases@oracle.com if (loop >= apic_nproc) {
92912683SJimmy.Vetayases@oracle.com errbound++;
93012683SJimmy.Vetayases@oracle.com cmn_err(CE_WARN, "apix: failed to rebind vector %x/%x",
93112683SJimmy.Vetayases@oracle.com vecp->v_cpuid, vecp->v_vector);
93212683SJimmy.Vetayases@oracle.com }
93312683SJimmy.Vetayases@oracle.com }
93412683SJimmy.Vetayases@oracle.com
93512683SJimmy.Vetayases@oracle.com lock_clear(&apix_lock);
93612683SJimmy.Vetayases@oracle.com
93712683SJimmy.Vetayases@oracle.com if (hardbound || errbound) {
93812683SJimmy.Vetayases@oracle.com cmn_err(CE_WARN, "Could not disable interrupts on %d"
93912683SJimmy.Vetayases@oracle.com "due to user bound interrupts or failed operation",
94012683SJimmy.Vetayases@oracle.com cpun);
94112683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
94212683SJimmy.Vetayases@oracle.com }
94312683SJimmy.Vetayases@oracle.com
94412683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
94512683SJimmy.Vetayases@oracle.com }
94612683SJimmy.Vetayases@oracle.com
94712683SJimmy.Vetayases@oracle.com /*
94812683SJimmy.Vetayases@oracle.com * Bind interrupts to specified CPU
94912683SJimmy.Vetayases@oracle.com */
95012683SJimmy.Vetayases@oracle.com static void
apix_enable_intr(processorid_t cpun)95112683SJimmy.Vetayases@oracle.com apix_enable_intr(processorid_t cpun)
95212683SJimmy.Vetayases@oracle.com {
95312683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
95412683SJimmy.Vetayases@oracle.com int i, ret;
95512683SJimmy.Vetayases@oracle.com processorid_t n;
95612683SJimmy.Vetayases@oracle.com
95712683SJimmy.Vetayases@oracle.com lock_set(&apix_lock);
95812683SJimmy.Vetayases@oracle.com
95912683SJimmy.Vetayases@oracle.com apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE;
96012683SJimmy.Vetayases@oracle.com
96112683SJimmy.Vetayases@oracle.com /* interrupt enabling for system resume */
96212683SJimmy.Vetayases@oracle.com if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND) {
96312683SJimmy.Vetayases@oracle.com for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
96412683SJimmy.Vetayases@oracle.com vecp = xv_vector(cpun, i);
96512683SJimmy.Vetayases@oracle.com if (!IS_VECT_ENABLED(vecp))
96612683SJimmy.Vetayases@oracle.com continue;
96712683SJimmy.Vetayases@oracle.com
96812683SJimmy.Vetayases@oracle.com apix_enable_vector(vecp);
96912683SJimmy.Vetayases@oracle.com }
97012683SJimmy.Vetayases@oracle.com apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND;
97112683SJimmy.Vetayases@oracle.com }
97212683SJimmy.Vetayases@oracle.com
97312683SJimmy.Vetayases@oracle.com for (n = 0; n < apic_nproc; n++) {
97412683SJimmy.Vetayases@oracle.com if (!apic_cpu_in_range(n) || n == cpun ||
97512683SJimmy.Vetayases@oracle.com (apic_cpus[n].aci_status & APIC_CPU_INTR_ENABLE) == 0)
97612683SJimmy.Vetayases@oracle.com continue;
97712683SJimmy.Vetayases@oracle.com
97812683SJimmy.Vetayases@oracle.com for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
97912683SJimmy.Vetayases@oracle.com vecp = xv_vector(n, i);
98012683SJimmy.Vetayases@oracle.com if (!IS_VECT_ENABLED(vecp) ||
98112683SJimmy.Vetayases@oracle.com vecp->v_bound_cpuid != cpun)
98212683SJimmy.Vetayases@oracle.com continue;
98312683SJimmy.Vetayases@oracle.com
98412683SJimmy.Vetayases@oracle.com if (vecp->v_type != APIX_TYPE_MSI)
98512683SJimmy.Vetayases@oracle.com (void) apix_set_cpu(vecp, cpun, &ret);
98612683SJimmy.Vetayases@oracle.com else
98712683SJimmy.Vetayases@oracle.com (void) apix_grp_set_cpu(vecp, cpun, &ret);
98812683SJimmy.Vetayases@oracle.com }
98912683SJimmy.Vetayases@oracle.com }
99012683SJimmy.Vetayases@oracle.com
99112683SJimmy.Vetayases@oracle.com lock_clear(&apix_lock);
99212683SJimmy.Vetayases@oracle.com }
99312683SJimmy.Vetayases@oracle.com
99412683SJimmy.Vetayases@oracle.com /*
99512683SJimmy.Vetayases@oracle.com * Allocate vector for IPI
99612683SJimmy.Vetayases@oracle.com * type == -1 indicates it is an internal request. Do not change
99712683SJimmy.Vetayases@oracle.com * resv_vector for these requests.
99812683SJimmy.Vetayases@oracle.com */
99912683SJimmy.Vetayases@oracle.com static int
apix_get_ipivect(int ipl,int type)100012683SJimmy.Vetayases@oracle.com apix_get_ipivect(int ipl, int type)
100112683SJimmy.Vetayases@oracle.com {
100212683SJimmy.Vetayases@oracle.com uchar_t vector;
100312683SJimmy.Vetayases@oracle.com
100412683SJimmy.Vetayases@oracle.com if ((vector = apix_alloc_ipi(ipl)) > 0) {
100512683SJimmy.Vetayases@oracle.com if (type != -1)
100612683SJimmy.Vetayases@oracle.com apic_resv_vector[ipl] = vector;
100712683SJimmy.Vetayases@oracle.com return (vector);
100812683SJimmy.Vetayases@oracle.com }
100912683SJimmy.Vetayases@oracle.com apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
101012683SJimmy.Vetayases@oracle.com return (-1); /* shouldn't happen */
101112683SJimmy.Vetayases@oracle.com }
101212683SJimmy.Vetayases@oracle.com
101312683SJimmy.Vetayases@oracle.com static int
apix_get_clkvect(int ipl)101412683SJimmy.Vetayases@oracle.com apix_get_clkvect(int ipl)
101512683SJimmy.Vetayases@oracle.com {
101612683SJimmy.Vetayases@oracle.com int vector;
101712683SJimmy.Vetayases@oracle.com
101812683SJimmy.Vetayases@oracle.com if ((vector = apix_get_ipivect(ipl, -1)) == -1)
101912683SJimmy.Vetayases@oracle.com return (-1);
102012683SJimmy.Vetayases@oracle.com
102112683SJimmy.Vetayases@oracle.com apic_clkvect = vector - APIC_BASE_VECT;
102212683SJimmy.Vetayases@oracle.com APIC_VERBOSE(IPI, (CE_CONT, "apix: clock vector = %x\n",
102312683SJimmy.Vetayases@oracle.com apic_clkvect));
102412683SJimmy.Vetayases@oracle.com return (vector);
102512683SJimmy.Vetayases@oracle.com }
102612683SJimmy.Vetayases@oracle.com
102712683SJimmy.Vetayases@oracle.com static int
apix_post_cpu_start()102812683SJimmy.Vetayases@oracle.com apix_post_cpu_start()
102912683SJimmy.Vetayases@oracle.com {
103012683SJimmy.Vetayases@oracle.com int cpun;
103112683SJimmy.Vetayases@oracle.com static int cpus_started = 1;
103212683SJimmy.Vetayases@oracle.com
103312683SJimmy.Vetayases@oracle.com /* We know this CPU + BSP started successfully. */
103412683SJimmy.Vetayases@oracle.com cpus_started++;
103512683SJimmy.Vetayases@oracle.com
103612683SJimmy.Vetayases@oracle.com /*
103712683SJimmy.Vetayases@oracle.com * On BSP we would have enabled X2APIC, if supported by processor,
103812683SJimmy.Vetayases@oracle.com * in acpi_probe(), but on AP we do it here.
103912683SJimmy.Vetayases@oracle.com *
104012683SJimmy.Vetayases@oracle.com * We enable X2APIC mode only if BSP is running in X2APIC & the
104112683SJimmy.Vetayases@oracle.com * local APIC mode of the current CPU is MMIO (xAPIC).
104212683SJimmy.Vetayases@oracle.com */
104312683SJimmy.Vetayases@oracle.com if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() &&
104412683SJimmy.Vetayases@oracle.com apic_local_mode() == LOCAL_APIC) {
104512683SJimmy.Vetayases@oracle.com apic_enable_x2apic();
104612683SJimmy.Vetayases@oracle.com }
104712683SJimmy.Vetayases@oracle.com
104812683SJimmy.Vetayases@oracle.com /*
104912683SJimmy.Vetayases@oracle.com * Switch back to x2apic IPI sending method for performance when target
105012683SJimmy.Vetayases@oracle.com * CPU has entered x2apic mode.
105112683SJimmy.Vetayases@oracle.com */
105212683SJimmy.Vetayases@oracle.com if (apic_mode == LOCAL_X2APIC) {
105312683SJimmy.Vetayases@oracle.com apic_switch_ipi_callback(B_FALSE);
105412683SJimmy.Vetayases@oracle.com }
105512683SJimmy.Vetayases@oracle.com
105612683SJimmy.Vetayases@oracle.com splx(ipltospl(LOCK_LEVEL));
105712683SJimmy.Vetayases@oracle.com apix_init_intr();
105812683SJimmy.Vetayases@oracle.com
105912683SJimmy.Vetayases@oracle.com /*
106012683SJimmy.Vetayases@oracle.com * since some systems don't enable the internal cache on the non-boot
106112683SJimmy.Vetayases@oracle.com * cpus, so we have to enable them here
106212683SJimmy.Vetayases@oracle.com */
106312683SJimmy.Vetayases@oracle.com setcr0(getcr0() & ~(CR0_CD | CR0_NW));
106412683SJimmy.Vetayases@oracle.com
106512683SJimmy.Vetayases@oracle.com #ifdef DEBUG
106612683SJimmy.Vetayases@oracle.com APIC_AV_PENDING_SET();
106712683SJimmy.Vetayases@oracle.com #else
106812683SJimmy.Vetayases@oracle.com if (apic_mode == LOCAL_APIC)
106912683SJimmy.Vetayases@oracle.com APIC_AV_PENDING_SET();
107012683SJimmy.Vetayases@oracle.com #endif /* DEBUG */
107112683SJimmy.Vetayases@oracle.com
107212683SJimmy.Vetayases@oracle.com /*
107312683SJimmy.Vetayases@oracle.com * We may be booting, or resuming from suspend; aci_status will
107412683SJimmy.Vetayases@oracle.com * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
107512683SJimmy.Vetayases@oracle.com * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
107612683SJimmy.Vetayases@oracle.com */
107712683SJimmy.Vetayases@oracle.com cpun = psm_get_cpu_id();
107812683SJimmy.Vetayases@oracle.com apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
107912683SJimmy.Vetayases@oracle.com
108012683SJimmy.Vetayases@oracle.com apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
108112683SJimmy.Vetayases@oracle.com
108212683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
108312683SJimmy.Vetayases@oracle.com }
108412683SJimmy.Vetayases@oracle.com
108512683SJimmy.Vetayases@oracle.com /*
108612683SJimmy.Vetayases@oracle.com * If this module needs a periodic handler for the interrupt distribution, it
108712683SJimmy.Vetayases@oracle.com * can be added here. The argument to the periodic handler is not currently
108812683SJimmy.Vetayases@oracle.com * used, but is reserved for future.
108912683SJimmy.Vetayases@oracle.com */
109012683SJimmy.Vetayases@oracle.com static void
apix_post_cyclic_setup(void * arg)109112683SJimmy.Vetayases@oracle.com apix_post_cyclic_setup(void *arg)
109212683SJimmy.Vetayases@oracle.com {
109312683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg);
109412683SJimmy.Vetayases@oracle.com
109512683SJimmy.Vetayases@oracle.com /* cpu_lock is held */
109612683SJimmy.Vetayases@oracle.com /* set up a periodic handler for intr redistribution */
109712683SJimmy.Vetayases@oracle.com
109812683SJimmy.Vetayases@oracle.com /*
109912683SJimmy.Vetayases@oracle.com * In peridoc mode intr redistribution processing is done in
110012683SJimmy.Vetayases@oracle.com * apic_intr_enter during clk intr processing
110112683SJimmy.Vetayases@oracle.com */
110212683SJimmy.Vetayases@oracle.com if (!apic_oneshot)
110312683SJimmy.Vetayases@oracle.com return;
110412683SJimmy.Vetayases@oracle.com
110512683SJimmy.Vetayases@oracle.com /*
110612683SJimmy.Vetayases@oracle.com * Register a periodical handler for the redistribution processing.
110712683SJimmy.Vetayases@oracle.com * On X86, CY_LOW_LEVEL is mapped to the level 2 interrupt, so
110812683SJimmy.Vetayases@oracle.com * DDI_IPL_2 should be passed to ddi_periodic_add() here.
110912683SJimmy.Vetayases@oracle.com */
111012683SJimmy.Vetayases@oracle.com apic_periodic_id = ddi_periodic_add(
111112683SJimmy.Vetayases@oracle.com (void (*)(void *))apix_redistribute_compute, NULL,
111212683SJimmy.Vetayases@oracle.com apic_redistribute_sample_interval, DDI_IPL_2);
111312683SJimmy.Vetayases@oracle.com }
111412683SJimmy.Vetayases@oracle.com
111512683SJimmy.Vetayases@oracle.com void
x2apic_update_psm()111612683SJimmy.Vetayases@oracle.com x2apic_update_psm()
111712683SJimmy.Vetayases@oracle.com {
111812683SJimmy.Vetayases@oracle.com struct psm_ops *pops = &apix_ops;
111912683SJimmy.Vetayases@oracle.com
112012683SJimmy.Vetayases@oracle.com ASSERT(pops != NULL);
112112683SJimmy.Vetayases@oracle.com
112212683SJimmy.Vetayases@oracle.com /*
112312683SJimmy.Vetayases@oracle.com * The xxx_intr_exit() sets TPR and sends back EOI. The
112412683SJimmy.Vetayases@oracle.com * xxx_setspl() sets TPR. These two routines are not
112512683SJimmy.Vetayases@oracle.com * needed in new design.
112612683SJimmy.Vetayases@oracle.com *
112712683SJimmy.Vetayases@oracle.com * pops->psm_intr_exit = x2apic_intr_exit;
112812683SJimmy.Vetayases@oracle.com * pops->psm_setspl = x2apic_setspl;
112912683SJimmy.Vetayases@oracle.com */
113012683SJimmy.Vetayases@oracle.com pops->psm_setspl = x2apix_setspl;
113112683SJimmy.Vetayases@oracle.com pops->psm_send_ipi = x2apic_send_ipi;
113212683SJimmy.Vetayases@oracle.com
113312683SJimmy.Vetayases@oracle.com send_dirintf = pops->psm_send_ipi;
113412683SJimmy.Vetayases@oracle.com
113512683SJimmy.Vetayases@oracle.com apic_mode = LOCAL_X2APIC;
113612683SJimmy.Vetayases@oracle.com apic_change_ops();
113712683SJimmy.Vetayases@oracle.com }
113812683SJimmy.Vetayases@oracle.com
113912683SJimmy.Vetayases@oracle.com /*
114012683SJimmy.Vetayases@oracle.com * This function provides external interface to the nexus for all
114112683SJimmy.Vetayases@oracle.com * functionalities related to the new DDI interrupt framework.
114212683SJimmy.Vetayases@oracle.com *
114312683SJimmy.Vetayases@oracle.com * Input:
114412683SJimmy.Vetayases@oracle.com * dip - pointer to the dev_info structure of the requested device
114512683SJimmy.Vetayases@oracle.com * hdlp - pointer to the internal interrupt handle structure for the
114612683SJimmy.Vetayases@oracle.com * requested interrupt
114712683SJimmy.Vetayases@oracle.com * intr_op - opcode for this call
114812683SJimmy.Vetayases@oracle.com * result - pointer to the integer that will hold the result to be
114912683SJimmy.Vetayases@oracle.com * passed back if return value is PSM_SUCCESS
115012683SJimmy.Vetayases@oracle.com *
115112683SJimmy.Vetayases@oracle.com * Output:
115212683SJimmy.Vetayases@oracle.com * return value is either PSM_SUCCESS or PSM_FAILURE
115312683SJimmy.Vetayases@oracle.com */
115412683SJimmy.Vetayases@oracle.com static int
apix_intr_ops(dev_info_t * dip,ddi_intr_handle_impl_t * hdlp,psm_intr_op_t intr_op,int * result)115512683SJimmy.Vetayases@oracle.com apix_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
115612683SJimmy.Vetayases@oracle.com psm_intr_op_t intr_op, int *result)
115712683SJimmy.Vetayases@oracle.com {
115812683SJimmy.Vetayases@oracle.com int cap;
115912683SJimmy.Vetayases@oracle.com apix_vector_t *vecp, *newvecp;
116012683SJimmy.Vetayases@oracle.com struct intrspec *ispec, intr_spec;
116112683SJimmy.Vetayases@oracle.com processorid_t target;
116212683SJimmy.Vetayases@oracle.com
116312683SJimmy.Vetayases@oracle.com ispec = &intr_spec;
116412683SJimmy.Vetayases@oracle.com ispec->intrspec_pri = hdlp->ih_pri;
116512683SJimmy.Vetayases@oracle.com ispec->intrspec_vec = hdlp->ih_inum;
116612683SJimmy.Vetayases@oracle.com ispec->intrspec_func = hdlp->ih_cb_func;
116712683SJimmy.Vetayases@oracle.com
116812683SJimmy.Vetayases@oracle.com switch (intr_op) {
116912683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_ALLOC_VECTORS:
117012683SJimmy.Vetayases@oracle.com switch (hdlp->ih_type) {
117112683SJimmy.Vetayases@oracle.com case DDI_INTR_TYPE_MSI:
117212683SJimmy.Vetayases@oracle.com /* allocate MSI vectors */
117312683SJimmy.Vetayases@oracle.com *result = apix_alloc_msi(dip, hdlp->ih_inum,
117412683SJimmy.Vetayases@oracle.com hdlp->ih_scratch1,
117512683SJimmy.Vetayases@oracle.com (int)(uintptr_t)hdlp->ih_scratch2);
117612683SJimmy.Vetayases@oracle.com break;
117712683SJimmy.Vetayases@oracle.com case DDI_INTR_TYPE_MSIX:
117812683SJimmy.Vetayases@oracle.com /* allocate MSI-X vectors */
117912683SJimmy.Vetayases@oracle.com *result = apix_alloc_msix(dip, hdlp->ih_inum,
118012683SJimmy.Vetayases@oracle.com hdlp->ih_scratch1,
118112683SJimmy.Vetayases@oracle.com (int)(uintptr_t)hdlp->ih_scratch2);
118212683SJimmy.Vetayases@oracle.com break;
118312683SJimmy.Vetayases@oracle.com case DDI_INTR_TYPE_FIXED:
118412683SJimmy.Vetayases@oracle.com /* allocate or share vector for fixed */
118512683SJimmy.Vetayases@oracle.com if ((ihdl_plat_t *)hdlp->ih_private == NULL) {
118612683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
118712683SJimmy.Vetayases@oracle.com }
118812683SJimmy.Vetayases@oracle.com ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
118912683SJimmy.Vetayases@oracle.com *result = apix_intx_alloc_vector(dip, hdlp->ih_inum,
119012683SJimmy.Vetayases@oracle.com ispec);
119112683SJimmy.Vetayases@oracle.com break;
119212683SJimmy.Vetayases@oracle.com default:
119312683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
119412683SJimmy.Vetayases@oracle.com }
119512683SJimmy.Vetayases@oracle.com break;
119612683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_FREE_VECTORS:
119712683SJimmy.Vetayases@oracle.com apix_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1,
119812683SJimmy.Vetayases@oracle.com hdlp->ih_type);
119912683SJimmy.Vetayases@oracle.com break;
120012683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_XLATE_VECTOR:
120112683SJimmy.Vetayases@oracle.com /*
120212683SJimmy.Vetayases@oracle.com * Vectors are allocated by ALLOC and freed by FREE.
120312683SJimmy.Vetayases@oracle.com * XLATE finds and returns APIX_VIRTVEC_VECTOR(cpu, vector).
120412683SJimmy.Vetayases@oracle.com */
120512683SJimmy.Vetayases@oracle.com *result = APIX_INVALID_VECT;
120612683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type);
120712683SJimmy.Vetayases@oracle.com if (vecp != NULL) {
120812683SJimmy.Vetayases@oracle.com *result = APIX_VIRTVECTOR(vecp->v_cpuid,
120912683SJimmy.Vetayases@oracle.com vecp->v_vector);
121012683SJimmy.Vetayases@oracle.com break;
121112683SJimmy.Vetayases@oracle.com }
121212683SJimmy.Vetayases@oracle.com
121312683SJimmy.Vetayases@oracle.com /*
121412683SJimmy.Vetayases@oracle.com * No vector to device mapping exists. If this is FIXED type
121512683SJimmy.Vetayases@oracle.com * then check if this IRQ is already mapped for another device
121612683SJimmy.Vetayases@oracle.com * then return the vector number for it (i.e. shared IRQ case).
121712683SJimmy.Vetayases@oracle.com * Otherwise, return PSM_FAILURE.
121812683SJimmy.Vetayases@oracle.com */
121912683SJimmy.Vetayases@oracle.com if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) {
122012683SJimmy.Vetayases@oracle.com vecp = apix_intx_xlate_vector(dip, hdlp->ih_inum,
122112683SJimmy.Vetayases@oracle.com ispec);
122212683SJimmy.Vetayases@oracle.com *result = (vecp == NULL) ? APIX_INVALID_VECT :
122312683SJimmy.Vetayases@oracle.com APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector);
122412683SJimmy.Vetayases@oracle.com }
122512683SJimmy.Vetayases@oracle.com if (*result == APIX_INVALID_VECT)
122612683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
122712683SJimmy.Vetayases@oracle.com break;
122812683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_GET_PENDING:
122912683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type);
123012683SJimmy.Vetayases@oracle.com if (vecp == NULL)
123112683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
123212683SJimmy.Vetayases@oracle.com
123312683SJimmy.Vetayases@oracle.com *result = apix_get_pending(vecp);
123412683SJimmy.Vetayases@oracle.com break;
123512683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_CLEAR_MASK:
123612683SJimmy.Vetayases@oracle.com if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
123712683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
123812683SJimmy.Vetayases@oracle.com
123912683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type);
124012683SJimmy.Vetayases@oracle.com if (vecp == NULL)
124112683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
124212683SJimmy.Vetayases@oracle.com
124312683SJimmy.Vetayases@oracle.com apix_intx_clear_mask(vecp->v_inum);
124412683SJimmy.Vetayases@oracle.com break;
124512683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_SET_MASK:
124612683SJimmy.Vetayases@oracle.com if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
124712683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
124812683SJimmy.Vetayases@oracle.com
124912683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type);
125012683SJimmy.Vetayases@oracle.com if (vecp == NULL)
125112683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
125212683SJimmy.Vetayases@oracle.com
125312683SJimmy.Vetayases@oracle.com apix_intx_set_mask(vecp->v_inum);
125412683SJimmy.Vetayases@oracle.com break;
125512683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_GET_SHARED:
125612683SJimmy.Vetayases@oracle.com if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
125712683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
125812683SJimmy.Vetayases@oracle.com
125912683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type);
126012683SJimmy.Vetayases@oracle.com if (vecp == NULL)
126112683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
126212683SJimmy.Vetayases@oracle.com
126312683SJimmy.Vetayases@oracle.com *result = apix_intx_get_shared(vecp->v_inum);
126412683SJimmy.Vetayases@oracle.com break;
126512683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_SET_PRI:
126612683SJimmy.Vetayases@oracle.com /*
126712683SJimmy.Vetayases@oracle.com * Called prior to adding the interrupt handler or when
126812683SJimmy.Vetayases@oracle.com * an interrupt handler is unassigned.
126912683SJimmy.Vetayases@oracle.com */
127012683SJimmy.Vetayases@oracle.com if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
127112683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
127212683SJimmy.Vetayases@oracle.com
127312683SJimmy.Vetayases@oracle.com if (apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type) == NULL)
127412683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
127512683SJimmy.Vetayases@oracle.com
127612683SJimmy.Vetayases@oracle.com break;
127712683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_SET_CPU:
127812683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_GRP_SET_CPU:
127912683SJimmy.Vetayases@oracle.com /*
128012683SJimmy.Vetayases@oracle.com * The interrupt handle given here has been allocated
128112683SJimmy.Vetayases@oracle.com * specifically for this command, and ih_private carries
128212683SJimmy.Vetayases@oracle.com * a CPU value.
128312683SJimmy.Vetayases@oracle.com */
128412683SJimmy.Vetayases@oracle.com *result = EINVAL;
128512683SJimmy.Vetayases@oracle.com target = (int)(intptr_t)hdlp->ih_private;
128612683SJimmy.Vetayases@oracle.com if (!apic_cpu_in_range(target)) {
128712683SJimmy.Vetayases@oracle.com DDI_INTR_IMPLDBG((CE_WARN,
128812683SJimmy.Vetayases@oracle.com "[grp_]set_cpu: cpu out of range: %d\n", target));
128912683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
129012683SJimmy.Vetayases@oracle.com }
129112683SJimmy.Vetayases@oracle.com
129212683SJimmy.Vetayases@oracle.com lock_set(&apix_lock);
129312683SJimmy.Vetayases@oracle.com
129412683SJimmy.Vetayases@oracle.com vecp = apix_get_req_vector(hdlp, hdlp->ih_flags);
129512683SJimmy.Vetayases@oracle.com if (!IS_VECT_ENABLED(vecp)) {
129612683SJimmy.Vetayases@oracle.com DDI_INTR_IMPLDBG((CE_WARN,
129712683SJimmy.Vetayases@oracle.com "[grp]_set_cpu: invalid vector 0x%x\n",
129812683SJimmy.Vetayases@oracle.com hdlp->ih_vector));
129912683SJimmy.Vetayases@oracle.com lock_clear(&apix_lock);
130012683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
130112683SJimmy.Vetayases@oracle.com }
130212683SJimmy.Vetayases@oracle.com
130312683SJimmy.Vetayases@oracle.com *result = 0;
130412683SJimmy.Vetayases@oracle.com
130512683SJimmy.Vetayases@oracle.com if (intr_op == PSM_INTR_OP_SET_CPU)
130612683SJimmy.Vetayases@oracle.com newvecp = apix_set_cpu(vecp, target, result);
130712683SJimmy.Vetayases@oracle.com else
130812683SJimmy.Vetayases@oracle.com newvecp = apix_grp_set_cpu(vecp, target, result);
130912683SJimmy.Vetayases@oracle.com
131012683SJimmy.Vetayases@oracle.com lock_clear(&apix_lock);
131112683SJimmy.Vetayases@oracle.com
131212683SJimmy.Vetayases@oracle.com if (newvecp == NULL) {
131312683SJimmy.Vetayases@oracle.com *result = EIO;
131412683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
131512683SJimmy.Vetayases@oracle.com }
131612683SJimmy.Vetayases@oracle.com newvecp->v_bound_cpuid = target;
131712683SJimmy.Vetayases@oracle.com hdlp->ih_vector = APIX_VIRTVECTOR(newvecp->v_cpuid,
131812683SJimmy.Vetayases@oracle.com newvecp->v_vector);
131912683SJimmy.Vetayases@oracle.com break;
132012683SJimmy.Vetayases@oracle.com
132112683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_GET_INTR:
132212683SJimmy.Vetayases@oracle.com /*
132312683SJimmy.Vetayases@oracle.com * The interrupt handle given here has been allocated
132412683SJimmy.Vetayases@oracle.com * specifically for this command, and ih_private carries
132512683SJimmy.Vetayases@oracle.com * a pointer to a apic_get_intr_t.
132612683SJimmy.Vetayases@oracle.com */
132712683SJimmy.Vetayases@oracle.com if (apix_get_intr_info(hdlp, hdlp->ih_private) != PSM_SUCCESS)
132812683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
132912683SJimmy.Vetayases@oracle.com break;
133012683SJimmy.Vetayases@oracle.com
133112683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_CHECK_MSI:
133212683SJimmy.Vetayases@oracle.com /*
133312683SJimmy.Vetayases@oracle.com * Check MSI/X is supported or not at APIC level and
133412683SJimmy.Vetayases@oracle.com * masked off the MSI/X bits in hdlp->ih_type if not
133512683SJimmy.Vetayases@oracle.com * supported before return. If MSI/X is supported,
133612683SJimmy.Vetayases@oracle.com * leave the ih_type unchanged and return.
133712683SJimmy.Vetayases@oracle.com *
133812683SJimmy.Vetayases@oracle.com * hdlp->ih_type passed in from the nexus has all the
133912683SJimmy.Vetayases@oracle.com * interrupt types supported by the device.
134012683SJimmy.Vetayases@oracle.com */
134112683SJimmy.Vetayases@oracle.com if (apic_support_msi == 0) { /* uninitialized */
134212683SJimmy.Vetayases@oracle.com /*
134312683SJimmy.Vetayases@oracle.com * if apic_support_msi is not set, call
134412683SJimmy.Vetayases@oracle.com * apic_check_msi_support() to check whether msi
134512683SJimmy.Vetayases@oracle.com * is supported first
134612683SJimmy.Vetayases@oracle.com */
134712683SJimmy.Vetayases@oracle.com if (apic_check_msi_support() == PSM_SUCCESS)
134812683SJimmy.Vetayases@oracle.com apic_support_msi = 1; /* supported */
134912683SJimmy.Vetayases@oracle.com else
135012683SJimmy.Vetayases@oracle.com apic_support_msi = -1; /* not-supported */
135112683SJimmy.Vetayases@oracle.com }
135212683SJimmy.Vetayases@oracle.com if (apic_support_msi == 1) {
135312683SJimmy.Vetayases@oracle.com if (apic_msix_enable)
135412683SJimmy.Vetayases@oracle.com *result = hdlp->ih_type;
135512683SJimmy.Vetayases@oracle.com else
135612683SJimmy.Vetayases@oracle.com *result = hdlp->ih_type & ~DDI_INTR_TYPE_MSIX;
135712683SJimmy.Vetayases@oracle.com } else
135812683SJimmy.Vetayases@oracle.com *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
135912683SJimmy.Vetayases@oracle.com DDI_INTR_TYPE_MSIX);
136012683SJimmy.Vetayases@oracle.com break;
136112683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_GET_CAP:
136212683SJimmy.Vetayases@oracle.com cap = DDI_INTR_FLAG_PENDING;
136312683SJimmy.Vetayases@oracle.com if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
136412683SJimmy.Vetayases@oracle.com cap |= DDI_INTR_FLAG_MASKABLE;
136512683SJimmy.Vetayases@oracle.com *result = cap;
136612683SJimmy.Vetayases@oracle.com break;
136712683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_APIC_TYPE:
136812683SJimmy.Vetayases@oracle.com ((apic_get_type_t *)(hdlp->ih_private))->avgi_type =
136912683SJimmy.Vetayases@oracle.com apix_get_apic_type();
137012683SJimmy.Vetayases@oracle.com ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_intr =
137112683SJimmy.Vetayases@oracle.com APIX_IPI_MIN;
137212683SJimmy.Vetayases@oracle.com ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_cpu =
137312683SJimmy.Vetayases@oracle.com apic_nproc;
137412683SJimmy.Vetayases@oracle.com hdlp->ih_ver = apic_get_apic_version();
137512683SJimmy.Vetayases@oracle.com break;
137612683SJimmy.Vetayases@oracle.com case PSM_INTR_OP_SET_CAP:
137712683SJimmy.Vetayases@oracle.com default:
137812683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
137912683SJimmy.Vetayases@oracle.com }
138012683SJimmy.Vetayases@oracle.com
138112683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
138212683SJimmy.Vetayases@oracle.com }
138312683SJimmy.Vetayases@oracle.com
138412683SJimmy.Vetayases@oracle.com static void
apix_cleanup_busy(void)138512683SJimmy.Vetayases@oracle.com apix_cleanup_busy(void)
138612683SJimmy.Vetayases@oracle.com {
138712683SJimmy.Vetayases@oracle.com int i, j;
138812683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
138912683SJimmy.Vetayases@oracle.com
139012683SJimmy.Vetayases@oracle.com for (i = 0; i < apic_nproc; i++) {
139112683SJimmy.Vetayases@oracle.com if (!apic_cpu_in_range(i))
139212683SJimmy.Vetayases@oracle.com continue;
139312683SJimmy.Vetayases@oracle.com apic_cpus[i].aci_busy = 0;
139412683SJimmy.Vetayases@oracle.com for (j = APIX_AVINTR_MIN; j < APIX_AVINTR_MAX; j++) {
139512683SJimmy.Vetayases@oracle.com if ((vecp = xv_vector(i, j)) != NULL)
139612683SJimmy.Vetayases@oracle.com vecp->v_busy = 0;
139712683SJimmy.Vetayases@oracle.com }
139812683SJimmy.Vetayases@oracle.com }
139912683SJimmy.Vetayases@oracle.com }
140012683SJimmy.Vetayases@oracle.com
140112683SJimmy.Vetayases@oracle.com static void
apix_redistribute_compute(void)140212683SJimmy.Vetayases@oracle.com apix_redistribute_compute(void)
140312683SJimmy.Vetayases@oracle.com {
140412683SJimmy.Vetayases@oracle.com int i, j, max_busy;
140512683SJimmy.Vetayases@oracle.com
140612683SJimmy.Vetayases@oracle.com if (!apic_enable_dynamic_migration)
140712683SJimmy.Vetayases@oracle.com return;
140812683SJimmy.Vetayases@oracle.com
140912683SJimmy.Vetayases@oracle.com if (++apic_nticks == apic_sample_factor_redistribution) {
141012683SJimmy.Vetayases@oracle.com /*
141112683SJimmy.Vetayases@oracle.com * Time to call apic_intr_redistribute().
141212683SJimmy.Vetayases@oracle.com * reset apic_nticks. This will cause max_busy
141312683SJimmy.Vetayases@oracle.com * to be calculated below and if it is more than
141412683SJimmy.Vetayases@oracle.com * apic_int_busy, we will do the whole thing
141512683SJimmy.Vetayases@oracle.com */
141612683SJimmy.Vetayases@oracle.com apic_nticks = 0;
141712683SJimmy.Vetayases@oracle.com }
141812683SJimmy.Vetayases@oracle.com max_busy = 0;
141912683SJimmy.Vetayases@oracle.com for (i = 0; i < apic_nproc; i++) {
142012683SJimmy.Vetayases@oracle.com if (!apic_cpu_in_range(i))
142112683SJimmy.Vetayases@oracle.com continue;
142212683SJimmy.Vetayases@oracle.com /*
142312683SJimmy.Vetayases@oracle.com * Check if curipl is non zero & if ISR is in
142412683SJimmy.Vetayases@oracle.com * progress
142512683SJimmy.Vetayases@oracle.com */
142612683SJimmy.Vetayases@oracle.com if (((j = apic_cpus[i].aci_curipl) != 0) &&
142712683SJimmy.Vetayases@oracle.com (apic_cpus[i].aci_ISR_in_progress & (1 << j))) {
142812683SJimmy.Vetayases@oracle.com
142912683SJimmy.Vetayases@oracle.com int vect;
143012683SJimmy.Vetayases@oracle.com apic_cpus[i].aci_busy++;
143112683SJimmy.Vetayases@oracle.com vect = apic_cpus[i].aci_current[j];
143212683SJimmy.Vetayases@oracle.com apixs[i]->x_vectbl[vect]->v_busy++;
143312683SJimmy.Vetayases@oracle.com }
143412683SJimmy.Vetayases@oracle.com
143512683SJimmy.Vetayases@oracle.com if (!apic_nticks &&
143612683SJimmy.Vetayases@oracle.com (apic_cpus[i].aci_busy > max_busy))
143712683SJimmy.Vetayases@oracle.com max_busy = apic_cpus[i].aci_busy;
143812683SJimmy.Vetayases@oracle.com }
143912683SJimmy.Vetayases@oracle.com if (!apic_nticks) {
144012683SJimmy.Vetayases@oracle.com if (max_busy > apic_int_busy_mark) {
144112683SJimmy.Vetayases@oracle.com /*
144212683SJimmy.Vetayases@oracle.com * We could make the following check be
144312683SJimmy.Vetayases@oracle.com * skipped > 1 in which case, we get a
144412683SJimmy.Vetayases@oracle.com * redistribution at half the busy mark (due to
144512683SJimmy.Vetayases@oracle.com * double interval). Need to be able to collect
144612683SJimmy.Vetayases@oracle.com * more empirical data to decide if that is a
144712683SJimmy.Vetayases@oracle.com * good strategy. Punt for now.
144812683SJimmy.Vetayases@oracle.com */
144912683SJimmy.Vetayases@oracle.com apix_cleanup_busy();
145012683SJimmy.Vetayases@oracle.com apic_skipped_redistribute = 0;
145112683SJimmy.Vetayases@oracle.com } else
145212683SJimmy.Vetayases@oracle.com apic_skipped_redistribute++;
145312683SJimmy.Vetayases@oracle.com }
145412683SJimmy.Vetayases@oracle.com }
145512683SJimmy.Vetayases@oracle.com
145612683SJimmy.Vetayases@oracle.com /*
145712683SJimmy.Vetayases@oracle.com * intr_ops() service routines
145812683SJimmy.Vetayases@oracle.com */
145912683SJimmy.Vetayases@oracle.com
146012683SJimmy.Vetayases@oracle.com static int
apix_get_pending(apix_vector_t * vecp)146112683SJimmy.Vetayases@oracle.com apix_get_pending(apix_vector_t *vecp)
146212683SJimmy.Vetayases@oracle.com {
146312683SJimmy.Vetayases@oracle.com int bit, index, irr, pending;
146412683SJimmy.Vetayases@oracle.com
146512683SJimmy.Vetayases@oracle.com /* need to get on the bound cpu */
146612683SJimmy.Vetayases@oracle.com mutex_enter(&cpu_lock);
146712683SJimmy.Vetayases@oracle.com affinity_set(vecp->v_cpuid);
146812683SJimmy.Vetayases@oracle.com
146912683SJimmy.Vetayases@oracle.com index = vecp->v_vector / 32;
147012683SJimmy.Vetayases@oracle.com bit = vecp->v_vector % 32;
147112683SJimmy.Vetayases@oracle.com irr = apic_reg_ops->apic_read(APIC_IRR_REG + index);
147212683SJimmy.Vetayases@oracle.com
147312683SJimmy.Vetayases@oracle.com affinity_clear();
147412683SJimmy.Vetayases@oracle.com mutex_exit(&cpu_lock);
147512683SJimmy.Vetayases@oracle.com
147612683SJimmy.Vetayases@oracle.com pending = (irr & (1 << bit)) ? 1 : 0;
147712683SJimmy.Vetayases@oracle.com if (!pending && vecp->v_type == APIX_TYPE_FIXED)
147812683SJimmy.Vetayases@oracle.com pending = apix_intx_get_pending(vecp->v_inum);
147912683SJimmy.Vetayases@oracle.com
148012683SJimmy.Vetayases@oracle.com return (pending);
148112683SJimmy.Vetayases@oracle.com }
148212683SJimmy.Vetayases@oracle.com
148312683SJimmy.Vetayases@oracle.com static apix_vector_t *
apix_get_req_vector(ddi_intr_handle_impl_t * hdlp,ushort_t flags)148412683SJimmy.Vetayases@oracle.com apix_get_req_vector(ddi_intr_handle_impl_t *hdlp, ushort_t flags)
148512683SJimmy.Vetayases@oracle.com {
148612683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
148712683SJimmy.Vetayases@oracle.com processorid_t cpuid;
148812683SJimmy.Vetayases@oracle.com int32_t virt_vec = 0;
148912683SJimmy.Vetayases@oracle.com
149012683SJimmy.Vetayases@oracle.com switch (flags & PSMGI_INTRBY_FLAGS) {
149112683SJimmy.Vetayases@oracle.com case PSMGI_INTRBY_IRQ:
149212683SJimmy.Vetayases@oracle.com return (apix_intx_get_vector(hdlp->ih_vector));
149312683SJimmy.Vetayases@oracle.com case PSMGI_INTRBY_VEC:
149412683SJimmy.Vetayases@oracle.com virt_vec = (virt_vec == 0) ? hdlp->ih_vector : virt_vec;
149512683SJimmy.Vetayases@oracle.com
149612683SJimmy.Vetayases@oracle.com cpuid = APIX_VIRTVEC_CPU(virt_vec);
149712683SJimmy.Vetayases@oracle.com if (!apic_cpu_in_range(cpuid))
149812683SJimmy.Vetayases@oracle.com return (NULL);
149912683SJimmy.Vetayases@oracle.com
150012683SJimmy.Vetayases@oracle.com vecp = xv_vector(cpuid, APIX_VIRTVEC_VECTOR(virt_vec));
150112683SJimmy.Vetayases@oracle.com break;
150212683SJimmy.Vetayases@oracle.com case PSMGI_INTRBY_DEFAULT:
150312683SJimmy.Vetayases@oracle.com vecp = apix_get_dev_map(hdlp->ih_dip, hdlp->ih_inum,
150412683SJimmy.Vetayases@oracle.com hdlp->ih_type);
150512683SJimmy.Vetayases@oracle.com break;
150612683SJimmy.Vetayases@oracle.com default:
150712683SJimmy.Vetayases@oracle.com return (NULL);
150812683SJimmy.Vetayases@oracle.com }
150912683SJimmy.Vetayases@oracle.com
151012683SJimmy.Vetayases@oracle.com return (vecp);
151112683SJimmy.Vetayases@oracle.com }
151212683SJimmy.Vetayases@oracle.com
151312683SJimmy.Vetayases@oracle.com static int
apix_get_intr_info(ddi_intr_handle_impl_t * hdlp,apic_get_intr_t * intr_params_p)151412683SJimmy.Vetayases@oracle.com apix_get_intr_info(ddi_intr_handle_impl_t *hdlp,
151512683SJimmy.Vetayases@oracle.com apic_get_intr_t *intr_params_p)
151612683SJimmy.Vetayases@oracle.com {
151712683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
151812683SJimmy.Vetayases@oracle.com struct autovec *av_dev;
151912683SJimmy.Vetayases@oracle.com int i;
152012683SJimmy.Vetayases@oracle.com
152112683SJimmy.Vetayases@oracle.com vecp = apix_get_req_vector(hdlp, intr_params_p->avgi_req_flags);
152212683SJimmy.Vetayases@oracle.com if (IS_VECT_FREE(vecp)) {
152312683SJimmy.Vetayases@oracle.com intr_params_p->avgi_num_devs = 0;
152412683SJimmy.Vetayases@oracle.com intr_params_p->avgi_cpu_id = 0;
152512683SJimmy.Vetayases@oracle.com intr_params_p->avgi_req_flags = 0;
152612683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
152712683SJimmy.Vetayases@oracle.com }
152812683SJimmy.Vetayases@oracle.com
152912683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) {
153012683SJimmy.Vetayases@oracle.com intr_params_p->avgi_cpu_id = vecp->v_cpuid;
153112683SJimmy.Vetayases@oracle.com
153212683SJimmy.Vetayases@oracle.com /* Return user bound info for intrd. */
153312683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) {
153412683SJimmy.Vetayases@oracle.com intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND;
153512683SJimmy.Vetayases@oracle.com intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND;
153612683SJimmy.Vetayases@oracle.com }
153712683SJimmy.Vetayases@oracle.com }
153812683SJimmy.Vetayases@oracle.com
153912683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR)
154012683SJimmy.Vetayases@oracle.com intr_params_p->avgi_vector = vecp->v_vector;
154112683SJimmy.Vetayases@oracle.com
154212683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_req_flags &
154312683SJimmy.Vetayases@oracle.com (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS))
154412683SJimmy.Vetayases@oracle.com /* Get number of devices from apic_irq table shared field. */
154512683SJimmy.Vetayases@oracle.com intr_params_p->avgi_num_devs = vecp->v_share;
154612683SJimmy.Vetayases@oracle.com
154712683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) {
154812683SJimmy.Vetayases@oracle.com
154912683SJimmy.Vetayases@oracle.com intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS;
155012683SJimmy.Vetayases@oracle.com
155112683SJimmy.Vetayases@oracle.com /* Some devices have NULL dip. Don't count these. */
155212683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_num_devs > 0) {
155312683SJimmy.Vetayases@oracle.com for (i = 0, av_dev = vecp->v_autovect; av_dev;
155412683SJimmy.Vetayases@oracle.com av_dev = av_dev->av_link) {
155512683SJimmy.Vetayases@oracle.com if (av_dev->av_vector && av_dev->av_dip)
155612683SJimmy.Vetayases@oracle.com i++;
155712683SJimmy.Vetayases@oracle.com }
155812683SJimmy.Vetayases@oracle.com intr_params_p->avgi_num_devs =
155912683SJimmy.Vetayases@oracle.com (uint8_t)MIN(intr_params_p->avgi_num_devs, i);
156012683SJimmy.Vetayases@oracle.com }
156112683SJimmy.Vetayases@oracle.com
156212683SJimmy.Vetayases@oracle.com /* There are no viable dips to return. */
156312683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_num_devs == 0) {
156412683SJimmy.Vetayases@oracle.com intr_params_p->avgi_dip_list = NULL;
156512683SJimmy.Vetayases@oracle.com
156612683SJimmy.Vetayases@oracle.com } else { /* Return list of dips */
156712683SJimmy.Vetayases@oracle.com
156812683SJimmy.Vetayases@oracle.com /* Allocate space in array for that number of devs. */
156912683SJimmy.Vetayases@oracle.com intr_params_p->avgi_dip_list = kmem_zalloc(
157012683SJimmy.Vetayases@oracle.com intr_params_p->avgi_num_devs *
157112683SJimmy.Vetayases@oracle.com sizeof (dev_info_t *),
157212683SJimmy.Vetayases@oracle.com KM_NOSLEEP);
157312683SJimmy.Vetayases@oracle.com if (intr_params_p->avgi_dip_list == NULL) {
157412683SJimmy.Vetayases@oracle.com DDI_INTR_IMPLDBG((CE_WARN,
157512683SJimmy.Vetayases@oracle.com "apix_get_vector_intr_info: no memory"));
157612683SJimmy.Vetayases@oracle.com return (PSM_FAILURE);
157712683SJimmy.Vetayases@oracle.com }
157812683SJimmy.Vetayases@oracle.com
157912683SJimmy.Vetayases@oracle.com /*
158012683SJimmy.Vetayases@oracle.com * Loop through the device list of the autovec table
158112683SJimmy.Vetayases@oracle.com * filling in the dip array.
158212683SJimmy.Vetayases@oracle.com *
158312683SJimmy.Vetayases@oracle.com * Note that the autovect table may have some special
158412683SJimmy.Vetayases@oracle.com * entries which contain NULL dips. These will be
158512683SJimmy.Vetayases@oracle.com * ignored.
158612683SJimmy.Vetayases@oracle.com */
158712683SJimmy.Vetayases@oracle.com for (i = 0, av_dev = vecp->v_autovect; av_dev;
158812683SJimmy.Vetayases@oracle.com av_dev = av_dev->av_link) {
158912683SJimmy.Vetayases@oracle.com if (av_dev->av_vector && av_dev->av_dip)
159012683SJimmy.Vetayases@oracle.com intr_params_p->avgi_dip_list[i++] =
159112683SJimmy.Vetayases@oracle.com av_dev->av_dip;
159212683SJimmy.Vetayases@oracle.com }
159312683SJimmy.Vetayases@oracle.com }
159412683SJimmy.Vetayases@oracle.com }
159512683SJimmy.Vetayases@oracle.com
159612683SJimmy.Vetayases@oracle.com return (PSM_SUCCESS);
159712683SJimmy.Vetayases@oracle.com }
159812683SJimmy.Vetayases@oracle.com
159912683SJimmy.Vetayases@oracle.com static char *
apix_get_apic_type(void)160012683SJimmy.Vetayases@oracle.com apix_get_apic_type(void)
160112683SJimmy.Vetayases@oracle.com {
160212683SJimmy.Vetayases@oracle.com return (apix_psm_info.p_mach_idstring);
160312683SJimmy.Vetayases@oracle.com }
160412683SJimmy.Vetayases@oracle.com
160512683SJimmy.Vetayases@oracle.com apix_vector_t *
apix_set_cpu(apix_vector_t * vecp,int new_cpu,int * result)160612683SJimmy.Vetayases@oracle.com apix_set_cpu(apix_vector_t *vecp, int new_cpu, int *result)
160712683SJimmy.Vetayases@oracle.com {
160812683SJimmy.Vetayases@oracle.com apix_vector_t *newp = NULL;
160912683SJimmy.Vetayases@oracle.com dev_info_t *dip;
161012683SJimmy.Vetayases@oracle.com int inum, cap_ptr;
161112683SJimmy.Vetayases@oracle.com ddi_acc_handle_t handle;
161212822SJudy.Chen@Sun.COM ddi_intr_msix_t *msix_p = NULL;
161312683SJimmy.Vetayases@oracle.com ushort_t msix_ctrl;
161412683SJimmy.Vetayases@oracle.com uintptr_t off;
161512683SJimmy.Vetayases@oracle.com uint32_t mask;
161612683SJimmy.Vetayases@oracle.com
161712683SJimmy.Vetayases@oracle.com ASSERT(LOCK_HELD(&apix_lock));
161812683SJimmy.Vetayases@oracle.com *result = ENXIO;
161912683SJimmy.Vetayases@oracle.com
162012683SJimmy.Vetayases@oracle.com /* Fail if this is an MSI intr and is part of a group. */
162112683SJimmy.Vetayases@oracle.com if (vecp->v_type == APIX_TYPE_MSI) {
162212683SJimmy.Vetayases@oracle.com if (i_ddi_intr_get_current_nintrs(APIX_GET_DIP(vecp)) > 1)
162312683SJimmy.Vetayases@oracle.com return (NULL);
162412683SJimmy.Vetayases@oracle.com else
162512683SJimmy.Vetayases@oracle.com return (apix_grp_set_cpu(vecp, new_cpu, result));
162612683SJimmy.Vetayases@oracle.com }
162712683SJimmy.Vetayases@oracle.com
162812683SJimmy.Vetayases@oracle.com /*
162912683SJimmy.Vetayases@oracle.com * Mask MSI-X. It's unmasked when MSI-X gets enabled.
163012683SJimmy.Vetayases@oracle.com */
163112822SJudy.Chen@Sun.COM if (vecp->v_type == APIX_TYPE_MSIX && IS_VECT_ENABLED(vecp)) {
163212683SJimmy.Vetayases@oracle.com if ((dip = APIX_GET_DIP(vecp)) == NULL)
163312683SJimmy.Vetayases@oracle.com return (NULL);
163412683SJimmy.Vetayases@oracle.com inum = vecp->v_devp->dv_inum;
163512683SJimmy.Vetayases@oracle.com
163612683SJimmy.Vetayases@oracle.com handle = i_ddi_get_pci_config_handle(dip);
163712683SJimmy.Vetayases@oracle.com cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
163812683SJimmy.Vetayases@oracle.com msix_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
163912683SJimmy.Vetayases@oracle.com if ((msix_ctrl & PCI_MSIX_FUNCTION_MASK) == 0) {
164012683SJimmy.Vetayases@oracle.com /*
164112683SJimmy.Vetayases@oracle.com * Function is not masked, then mask "inum"th
164212683SJimmy.Vetayases@oracle.com * entry in the MSI-X table
164312683SJimmy.Vetayases@oracle.com */
164412683SJimmy.Vetayases@oracle.com msix_p = i_ddi_get_msix(dip);
164512683SJimmy.Vetayases@oracle.com off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
164612683SJimmy.Vetayases@oracle.com PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
164712683SJimmy.Vetayases@oracle.com mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
164812683SJimmy.Vetayases@oracle.com ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off,
164912683SJimmy.Vetayases@oracle.com mask | 1);
165012683SJimmy.Vetayases@oracle.com }
165112683SJimmy.Vetayases@oracle.com }
165212683SJimmy.Vetayases@oracle.com
165312683SJimmy.Vetayases@oracle.com *result = 0;
165412683SJimmy.Vetayases@oracle.com if ((newp = apix_rebind(vecp, new_cpu, 1)) == NULL)
165512683SJimmy.Vetayases@oracle.com *result = EIO;
165612683SJimmy.Vetayases@oracle.com
165712822SJudy.Chen@Sun.COM /* Restore mask bit */
165812822SJudy.Chen@Sun.COM if (msix_p != NULL)
165912822SJudy.Chen@Sun.COM ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, mask);
166012822SJudy.Chen@Sun.COM
166112683SJimmy.Vetayases@oracle.com return (newp);
166212683SJimmy.Vetayases@oracle.com }
166312683SJimmy.Vetayases@oracle.com
166412683SJimmy.Vetayases@oracle.com /*
166512683SJimmy.Vetayases@oracle.com * Set cpu for MSIs
166612683SJimmy.Vetayases@oracle.com */
166712683SJimmy.Vetayases@oracle.com apix_vector_t *
apix_grp_set_cpu(apix_vector_t * vecp,int new_cpu,int * result)166812683SJimmy.Vetayases@oracle.com apix_grp_set_cpu(apix_vector_t *vecp, int new_cpu, int *result)
166912683SJimmy.Vetayases@oracle.com {
167012683SJimmy.Vetayases@oracle.com apix_vector_t *newp, *vp;
167112683SJimmy.Vetayases@oracle.com uint32_t orig_cpu = vecp->v_cpuid;
167212683SJimmy.Vetayases@oracle.com int orig_vect = vecp->v_vector;
167312683SJimmy.Vetayases@oracle.com int i, num_vectors, cap_ptr, msi_mask_off;
167412683SJimmy.Vetayases@oracle.com uint32_t msi_pvm;
167512683SJimmy.Vetayases@oracle.com ushort_t msi_ctrl;
167612683SJimmy.Vetayases@oracle.com ddi_acc_handle_t handle;
167712683SJimmy.Vetayases@oracle.com dev_info_t *dip;
167812683SJimmy.Vetayases@oracle.com
167912683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_CONT, "apix_grp_set_cpu: oldcpu: %x, vector: %x,"
168012683SJimmy.Vetayases@oracle.com " newcpu:%x\n", vecp->v_cpuid, vecp->v_vector, new_cpu));
168112683SJimmy.Vetayases@oracle.com
168212683SJimmy.Vetayases@oracle.com ASSERT(LOCK_HELD(&apix_lock));
168312683SJimmy.Vetayases@oracle.com
168412683SJimmy.Vetayases@oracle.com *result = ENXIO;
168512683SJimmy.Vetayases@oracle.com
168612683SJimmy.Vetayases@oracle.com if (vecp->v_type != APIX_TYPE_MSI) {
168712683SJimmy.Vetayases@oracle.com DDI_INTR_IMPLDBG((CE_WARN, "set_grp: intr not MSI\n"));
168812683SJimmy.Vetayases@oracle.com return (NULL);
168912683SJimmy.Vetayases@oracle.com }
169012683SJimmy.Vetayases@oracle.com
169112683SJimmy.Vetayases@oracle.com if ((dip = APIX_GET_DIP(vecp)) == NULL)
169212683SJimmy.Vetayases@oracle.com return (NULL);
169312683SJimmy.Vetayases@oracle.com
169412683SJimmy.Vetayases@oracle.com num_vectors = i_ddi_intr_get_current_nintrs(dip);
169512683SJimmy.Vetayases@oracle.com if ((num_vectors < 1) || ((num_vectors - 1) & orig_vect)) {
169612683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_WARN,
169712683SJimmy.Vetayases@oracle.com "set_grp: base vec not part of a grp or not aligned: "
169812683SJimmy.Vetayases@oracle.com "vec:0x%x, num_vec:0x%x\n", orig_vect, num_vectors));
169912683SJimmy.Vetayases@oracle.com return (NULL);
170012683SJimmy.Vetayases@oracle.com }
170112683SJimmy.Vetayases@oracle.com
170212683SJimmy.Vetayases@oracle.com if (vecp->v_inum != apix_get_min_dev_inum(dip, vecp->v_type))
170312683SJimmy.Vetayases@oracle.com return (NULL);
170412683SJimmy.Vetayases@oracle.com
170512683SJimmy.Vetayases@oracle.com *result = EIO;
170612683SJimmy.Vetayases@oracle.com for (i = 1; i < num_vectors; i++) {
170712683SJimmy.Vetayases@oracle.com if ((vp = xv_vector(orig_cpu, orig_vect + i)) == NULL)
170812683SJimmy.Vetayases@oracle.com return (NULL);
170912683SJimmy.Vetayases@oracle.com #ifdef DEBUG
171012683SJimmy.Vetayases@oracle.com /*
171112683SJimmy.Vetayases@oracle.com * Sanity check: CPU and dip is the same for all entries.
171212683SJimmy.Vetayases@oracle.com * May be called when first msi to be enabled, at this time
171312683SJimmy.Vetayases@oracle.com * add_avintr() is not called for other msi
171412683SJimmy.Vetayases@oracle.com */
171512683SJimmy.Vetayases@oracle.com if ((vp->v_share != 0) &&
171612683SJimmy.Vetayases@oracle.com ((APIX_GET_DIP(vp) != dip) ||
171712683SJimmy.Vetayases@oracle.com (vp->v_cpuid != vecp->v_cpuid))) {
171812683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_WARN,
171912683SJimmy.Vetayases@oracle.com "set_grp: cpu or dip for vec 0x%x difft than for "
172012683SJimmy.Vetayases@oracle.com "vec 0x%x\n", orig_vect, orig_vect + i));
172112683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_WARN,
172212683SJimmy.Vetayases@oracle.com " cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu,
172312683SJimmy.Vetayases@oracle.com vp->v_cpuid, (void *)dip,
172412683SJimmy.Vetayases@oracle.com (void *)APIX_GET_DIP(vp)));
172512683SJimmy.Vetayases@oracle.com return (NULL);
172612683SJimmy.Vetayases@oracle.com }
172712683SJimmy.Vetayases@oracle.com #endif /* DEBUG */
172812683SJimmy.Vetayases@oracle.com }
172912683SJimmy.Vetayases@oracle.com
173012683SJimmy.Vetayases@oracle.com cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
173112683SJimmy.Vetayases@oracle.com handle = i_ddi_get_pci_config_handle(dip);
173212683SJimmy.Vetayases@oracle.com msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
173312683SJimmy.Vetayases@oracle.com
173412683SJimmy.Vetayases@oracle.com /* MSI Per vector masking is supported. */
173512683SJimmy.Vetayases@oracle.com if (msi_ctrl & PCI_MSI_PVM_MASK) {
173612683SJimmy.Vetayases@oracle.com if (msi_ctrl & PCI_MSI_64BIT_MASK)
173712683SJimmy.Vetayases@oracle.com msi_mask_off = cap_ptr + PCI_MSI_64BIT_MASKBITS;
173812683SJimmy.Vetayases@oracle.com else
173912683SJimmy.Vetayases@oracle.com msi_mask_off = cap_ptr + PCI_MSI_32BIT_MASK;
174012683SJimmy.Vetayases@oracle.com msi_pvm = pci_config_get32(handle, msi_mask_off);
174112683SJimmy.Vetayases@oracle.com pci_config_put32(handle, msi_mask_off, (uint32_t)-1);
174212683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_CONT,
174312683SJimmy.Vetayases@oracle.com "set_grp: pvm supported. Mask set to 0x%x\n",
174412683SJimmy.Vetayases@oracle.com pci_config_get32(handle, msi_mask_off)));
174512683SJimmy.Vetayases@oracle.com }
174612683SJimmy.Vetayases@oracle.com
174712683SJimmy.Vetayases@oracle.com if ((newp = apix_rebind(vecp, new_cpu, num_vectors)) != NULL)
174812683SJimmy.Vetayases@oracle.com *result = 0;
174912683SJimmy.Vetayases@oracle.com
175012683SJimmy.Vetayases@oracle.com /* Reenable vectors if per vector masking is supported. */
175112683SJimmy.Vetayases@oracle.com if (msi_ctrl & PCI_MSI_PVM_MASK) {
175212683SJimmy.Vetayases@oracle.com pci_config_put32(handle, msi_mask_off, msi_pvm);
175312683SJimmy.Vetayases@oracle.com APIC_VERBOSE(INTR, (CE_CONT,
175412683SJimmy.Vetayases@oracle.com "set_grp: pvm supported. Mask restored to 0x%x\n",
175512683SJimmy.Vetayases@oracle.com pci_config_get32(handle, msi_mask_off)));
175612683SJimmy.Vetayases@oracle.com }
175712683SJimmy.Vetayases@oracle.com
175812683SJimmy.Vetayases@oracle.com return (newp);
175912683SJimmy.Vetayases@oracle.com }
176012683SJimmy.Vetayases@oracle.com
176112683SJimmy.Vetayases@oracle.com void
apix_intx_set_vector(int irqno,uint32_t cpuid,uchar_t vector)176212683SJimmy.Vetayases@oracle.com apix_intx_set_vector(int irqno, uint32_t cpuid, uchar_t vector)
176312683SJimmy.Vetayases@oracle.com {
176412683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
176512683SJimmy.Vetayases@oracle.com
176612683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
176712683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
176812683SJimmy.Vetayases@oracle.com irqp->airq_cpu = cpuid;
176912683SJimmy.Vetayases@oracle.com irqp->airq_vector = vector;
177012683SJimmy.Vetayases@oracle.com apic_record_rdt_entry(irqp, irqno);
177112683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
177212683SJimmy.Vetayases@oracle.com }
177312683SJimmy.Vetayases@oracle.com
177412683SJimmy.Vetayases@oracle.com apix_vector_t *
apix_intx_get_vector(int irqno)177512683SJimmy.Vetayases@oracle.com apix_intx_get_vector(int irqno)
177612683SJimmy.Vetayases@oracle.com {
177712683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
177812683SJimmy.Vetayases@oracle.com uint32_t cpuid;
177912683SJimmy.Vetayases@oracle.com uchar_t vector;
178012683SJimmy.Vetayases@oracle.com
178112683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
178212683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno & 0xff];
178312683SJimmy.Vetayases@oracle.com if (IS_IRQ_FREE(irqp) || (irqp->airq_cpu == IRQ_UNINIT)) {
178412683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
178512683SJimmy.Vetayases@oracle.com return (NULL);
178612683SJimmy.Vetayases@oracle.com }
178712683SJimmy.Vetayases@oracle.com cpuid = irqp->airq_cpu;
178812683SJimmy.Vetayases@oracle.com vector = irqp->airq_vector;
178912683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
179012683SJimmy.Vetayases@oracle.com
179112683SJimmy.Vetayases@oracle.com return (xv_vector(cpuid, vector));
179212683SJimmy.Vetayases@oracle.com }
179312683SJimmy.Vetayases@oracle.com
179412683SJimmy.Vetayases@oracle.com /*
179512683SJimmy.Vetayases@oracle.com * Must called with interrupts disabled and apic_ioapic_lock held
179612683SJimmy.Vetayases@oracle.com */
179712683SJimmy.Vetayases@oracle.com void
apix_intx_enable(int irqno)179812683SJimmy.Vetayases@oracle.com apix_intx_enable(int irqno)
179912683SJimmy.Vetayases@oracle.com {
180012683SJimmy.Vetayases@oracle.com uchar_t ioapicindex, intin;
180112683SJimmy.Vetayases@oracle.com apic_irq_t *irqp = apic_irq_table[irqno];
180212683SJimmy.Vetayases@oracle.com ioapic_rdt_t irdt;
180312683SJimmy.Vetayases@oracle.com apic_cpus_info_t *cpu_infop;
180412683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector);
180512683SJimmy.Vetayases@oracle.com
180612683SJimmy.Vetayases@oracle.com ASSERT(LOCK_HELD(&apic_ioapic_lock) && !IS_IRQ_FREE(irqp));
180712683SJimmy.Vetayases@oracle.com
180812683SJimmy.Vetayases@oracle.com ioapicindex = irqp->airq_ioapicindex;
180912683SJimmy.Vetayases@oracle.com intin = irqp->airq_intin_no;
181012683SJimmy.Vetayases@oracle.com cpu_infop = &apic_cpus[irqp->airq_cpu];
181112683SJimmy.Vetayases@oracle.com
181212683SJimmy.Vetayases@oracle.com irdt.ir_lo = AV_PDEST | AV_FIXED | irqp->airq_rdt_entry;
181312683SJimmy.Vetayases@oracle.com irdt.ir_hi = cpu_infop->aci_local_id;
181412683SJimmy.Vetayases@oracle.com
181512683SJimmy.Vetayases@oracle.com apic_vt_ops->apic_intrmap_alloc_entry(&vecp->v_intrmap_private, NULL,
181612683SJimmy.Vetayases@oracle.com vecp->v_type, 1, ioapicindex);
181712683SJimmy.Vetayases@oracle.com apic_vt_ops->apic_intrmap_map_entry(vecp->v_intrmap_private,
181812683SJimmy.Vetayases@oracle.com (void *)&irdt, vecp->v_type, 1);
181912683SJimmy.Vetayases@oracle.com apic_vt_ops->apic_intrmap_record_rdt(vecp->v_intrmap_private, &irdt);
182012683SJimmy.Vetayases@oracle.com
182112683SJimmy.Vetayases@oracle.com /* write RDT entry high dword - destination */
182212683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapicindex, intin,
182312683SJimmy.Vetayases@oracle.com irdt.ir_hi);
182412683SJimmy.Vetayases@oracle.com
182512683SJimmy.Vetayases@oracle.com /* Write the vector, trigger, and polarity portion of the RDT */
182612683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapicindex, intin, irdt.ir_lo);
182712683SJimmy.Vetayases@oracle.com
182812683SJimmy.Vetayases@oracle.com vecp->v_state = APIX_STATE_ENABLED;
182912683SJimmy.Vetayases@oracle.com
183012683SJimmy.Vetayases@oracle.com APIC_VERBOSE_IOAPIC((CE_CONT, "apix_intx_enable: ioapic 0x%x"
183112683SJimmy.Vetayases@oracle.com " intin 0x%x rdt_low 0x%x rdt_high 0x%x\n",
183212683SJimmy.Vetayases@oracle.com ioapicindex, intin, irdt.ir_lo, irdt.ir_hi));
183312683SJimmy.Vetayases@oracle.com }
183412683SJimmy.Vetayases@oracle.com
183512683SJimmy.Vetayases@oracle.com /*
183612683SJimmy.Vetayases@oracle.com * Must called with interrupts disabled and apic_ioapic_lock held
183712683SJimmy.Vetayases@oracle.com */
183812683SJimmy.Vetayases@oracle.com void
apix_intx_disable(int irqno)183912683SJimmy.Vetayases@oracle.com apix_intx_disable(int irqno)
184012683SJimmy.Vetayases@oracle.com {
184112683SJimmy.Vetayases@oracle.com apic_irq_t *irqp = apic_irq_table[irqno];
184212683SJimmy.Vetayases@oracle.com int ioapicindex, intin;
184312683SJimmy.Vetayases@oracle.com
184412683SJimmy.Vetayases@oracle.com ASSERT(LOCK_HELD(&apic_ioapic_lock) && !IS_IRQ_FREE(irqp));
184512683SJimmy.Vetayases@oracle.com /*
184612683SJimmy.Vetayases@oracle.com * The assumption here is that this is safe, even for
184712683SJimmy.Vetayases@oracle.com * systems with IOAPICs that suffer from the hardware
184812683SJimmy.Vetayases@oracle.com * erratum because all devices have been quiesced before
184912683SJimmy.Vetayases@oracle.com * they unregister their interrupt handlers. If that
185012683SJimmy.Vetayases@oracle.com * assumption turns out to be false, this mask operation
185112683SJimmy.Vetayases@oracle.com * can induce the same erratum result we're trying to
185212683SJimmy.Vetayases@oracle.com * avoid.
185312683SJimmy.Vetayases@oracle.com */
185412683SJimmy.Vetayases@oracle.com ioapicindex = irqp->airq_ioapicindex;
185512683SJimmy.Vetayases@oracle.com intin = irqp->airq_intin_no;
185612683SJimmy.Vetayases@oracle.com ioapic_write(ioapicindex, APIC_RDT_CMD + 2 * intin, AV_MASK);
185712683SJimmy.Vetayases@oracle.com
185812683SJimmy.Vetayases@oracle.com APIC_VERBOSE_IOAPIC((CE_CONT, "apix_intx_disable: ioapic 0x%x"
185912683SJimmy.Vetayases@oracle.com " intin 0x%x\n", ioapicindex, intin));
186012683SJimmy.Vetayases@oracle.com }
186112683SJimmy.Vetayases@oracle.com
186212683SJimmy.Vetayases@oracle.com void
apix_intx_free(int irqno)186312683SJimmy.Vetayases@oracle.com apix_intx_free(int irqno)
186412683SJimmy.Vetayases@oracle.com {
186512683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
186612683SJimmy.Vetayases@oracle.com
186712683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
186812683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
186912683SJimmy.Vetayases@oracle.com
187012683SJimmy.Vetayases@oracle.com if (IS_IRQ_FREE(irqp)) {
187112683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
187212683SJimmy.Vetayases@oracle.com return;
187312683SJimmy.Vetayases@oracle.com }
187412683SJimmy.Vetayases@oracle.com
187512683SJimmy.Vetayases@oracle.com irqp->airq_mps_intr_index = FREE_INDEX;
187612683SJimmy.Vetayases@oracle.com irqp->airq_cpu = IRQ_UNINIT;
187712683SJimmy.Vetayases@oracle.com irqp->airq_vector = APIX_INVALID_VECT;
187812683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
187912683SJimmy.Vetayases@oracle.com }
188012683SJimmy.Vetayases@oracle.com
188112683SJimmy.Vetayases@oracle.com #ifdef DEBUG
188212683SJimmy.Vetayases@oracle.com int apix_intr_deliver_timeouts = 0;
188312683SJimmy.Vetayases@oracle.com int apix_intr_rirr_timeouts = 0;
188412683SJimmy.Vetayases@oracle.com int apix_intr_rirr_reset_failure = 0;
188512683SJimmy.Vetayases@oracle.com #endif
188612683SJimmy.Vetayases@oracle.com int apix_max_reps_irr_pending = 10;
188712683SJimmy.Vetayases@oracle.com
188812683SJimmy.Vetayases@oracle.com #define GET_RDT_BITS(ioapic, intin, bits) \
188912683SJimmy.Vetayases@oracle.com (READ_IOAPIC_RDT_ENTRY_LOW_DWORD((ioapic), (intin)) & (bits))
189012683SJimmy.Vetayases@oracle.com #define APIX_CHECK_IRR_DELAY drv_usectohz(5000)
189112683SJimmy.Vetayases@oracle.com
189212683SJimmy.Vetayases@oracle.com int
apix_intx_rebind(int irqno,processorid_t cpuid,uchar_t vector)189312683SJimmy.Vetayases@oracle.com apix_intx_rebind(int irqno, processorid_t cpuid, uchar_t vector)
189412683SJimmy.Vetayases@oracle.com {
189512683SJimmy.Vetayases@oracle.com apic_irq_t *irqp = apic_irq_table[irqno];
189612683SJimmy.Vetayases@oracle.com ulong_t iflag;
189712683SJimmy.Vetayases@oracle.com int waited, ioapic_ix, intin_no, level, repeats, rdt_entry, masked;
189812683SJimmy.Vetayases@oracle.com
189912683SJimmy.Vetayases@oracle.com ASSERT(irqp != NULL);
190012683SJimmy.Vetayases@oracle.com
190112683SJimmy.Vetayases@oracle.com iflag = intr_clear();
190212683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
190312683SJimmy.Vetayases@oracle.com
190412683SJimmy.Vetayases@oracle.com ioapic_ix = irqp->airq_ioapicindex;
190512683SJimmy.Vetayases@oracle.com intin_no = irqp->airq_intin_no;
190612683SJimmy.Vetayases@oracle.com level = apic_level_intr[irqno];
190712683SJimmy.Vetayases@oracle.com
190812683SJimmy.Vetayases@oracle.com /*
190912683SJimmy.Vetayases@oracle.com * Wait for the delivery status bit to be cleared. This should
191012683SJimmy.Vetayases@oracle.com * be a very small amount of time.
191112683SJimmy.Vetayases@oracle.com */
191212683SJimmy.Vetayases@oracle.com repeats = 0;
191312683SJimmy.Vetayases@oracle.com do {
191412683SJimmy.Vetayases@oracle.com repeats++;
191512683SJimmy.Vetayases@oracle.com
191612683SJimmy.Vetayases@oracle.com for (waited = 0; waited < apic_max_reps_clear_pending;
191712683SJimmy.Vetayases@oracle.com waited++) {
191812683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_PENDING) == 0)
191912683SJimmy.Vetayases@oracle.com break;
192012683SJimmy.Vetayases@oracle.com }
192112683SJimmy.Vetayases@oracle.com if (!level)
192212683SJimmy.Vetayases@oracle.com break;
192312683SJimmy.Vetayases@oracle.com
192412683SJimmy.Vetayases@oracle.com /*
192512683SJimmy.Vetayases@oracle.com * Mask the RDT entry for level-triggered interrupts.
192612683SJimmy.Vetayases@oracle.com */
192712683SJimmy.Vetayases@oracle.com irqp->airq_rdt_entry |= AV_MASK;
192812683SJimmy.Vetayases@oracle.com rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
192912683SJimmy.Vetayases@oracle.com intin_no);
193012683SJimmy.Vetayases@oracle.com if ((masked = (rdt_entry & AV_MASK)) == 0) {
193112683SJimmy.Vetayases@oracle.com /* Mask it */
193212683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, intin_no,
193312683SJimmy.Vetayases@oracle.com AV_MASK | rdt_entry);
193412683SJimmy.Vetayases@oracle.com }
193512683SJimmy.Vetayases@oracle.com
193612683SJimmy.Vetayases@oracle.com /*
193712683SJimmy.Vetayases@oracle.com * If there was a race and an interrupt was injected
193812683SJimmy.Vetayases@oracle.com * just before we masked, check for that case here.
193912683SJimmy.Vetayases@oracle.com * Then, unmask the RDT entry and try again. If we're
194012683SJimmy.Vetayases@oracle.com * on our last try, don't unmask (because we want the
194112683SJimmy.Vetayases@oracle.com * RDT entry to remain masked for the rest of the
194212683SJimmy.Vetayases@oracle.com * function).
194312683SJimmy.Vetayases@oracle.com */
194412683SJimmy.Vetayases@oracle.com rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
194512683SJimmy.Vetayases@oracle.com intin_no);
194612683SJimmy.Vetayases@oracle.com if ((masked == 0) && ((rdt_entry & AV_PENDING) != 0) &&
194712683SJimmy.Vetayases@oracle.com (repeats < apic_max_reps_clear_pending)) {
194812683SJimmy.Vetayases@oracle.com /* Unmask it */
194912683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
195012683SJimmy.Vetayases@oracle.com intin_no, rdt_entry & ~AV_MASK);
195112683SJimmy.Vetayases@oracle.com irqp->airq_rdt_entry &= ~AV_MASK;
195212683SJimmy.Vetayases@oracle.com }
195312683SJimmy.Vetayases@oracle.com } while ((rdt_entry & AV_PENDING) &&
195412683SJimmy.Vetayases@oracle.com (repeats < apic_max_reps_clear_pending));
195512683SJimmy.Vetayases@oracle.com
195612683SJimmy.Vetayases@oracle.com #ifdef DEBUG
195712683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_PENDING) != 0)
195812683SJimmy.Vetayases@oracle.com apix_intr_deliver_timeouts++;
195912683SJimmy.Vetayases@oracle.com #endif
196012683SJimmy.Vetayases@oracle.com
196112683SJimmy.Vetayases@oracle.com if (!level || !APIX_IS_MASK_RDT(apix_mul_ioapic_method))
196212683SJimmy.Vetayases@oracle.com goto done;
196312683SJimmy.Vetayases@oracle.com
196412683SJimmy.Vetayases@oracle.com /*
196512683SJimmy.Vetayases@oracle.com * wait for remote IRR to be cleared for level-triggered
196612683SJimmy.Vetayases@oracle.com * interrupts
196712683SJimmy.Vetayases@oracle.com */
196812683SJimmy.Vetayases@oracle.com repeats = 0;
196912683SJimmy.Vetayases@oracle.com do {
197012683SJimmy.Vetayases@oracle.com repeats++;
197112683SJimmy.Vetayases@oracle.com
197212683SJimmy.Vetayases@oracle.com for (waited = 0; waited < apic_max_reps_clear_pending;
197312683SJimmy.Vetayases@oracle.com waited++) {
197412683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR)
197512683SJimmy.Vetayases@oracle.com == 0)
197612683SJimmy.Vetayases@oracle.com break;
197712683SJimmy.Vetayases@oracle.com }
197812683SJimmy.Vetayases@oracle.com
197912683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) {
198012683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
198112683SJimmy.Vetayases@oracle.com intr_restore(iflag);
198212683SJimmy.Vetayases@oracle.com
198312683SJimmy.Vetayases@oracle.com delay(APIX_CHECK_IRR_DELAY);
198412683SJimmy.Vetayases@oracle.com
198512683SJimmy.Vetayases@oracle.com iflag = intr_clear();
198612683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
198712683SJimmy.Vetayases@oracle.com }
198812683SJimmy.Vetayases@oracle.com } while (repeats < apix_max_reps_irr_pending);
198912683SJimmy.Vetayases@oracle.com
199012683SJimmy.Vetayases@oracle.com if (repeats >= apix_max_reps_irr_pending) {
199112683SJimmy.Vetayases@oracle.com #ifdef DEBUG
199212683SJimmy.Vetayases@oracle.com apix_intr_rirr_timeouts++;
199312683SJimmy.Vetayases@oracle.com #endif
199412683SJimmy.Vetayases@oracle.com
199512683SJimmy.Vetayases@oracle.com /*
199612683SJimmy.Vetayases@oracle.com * If we waited and the Remote IRR bit is still not cleared,
199712683SJimmy.Vetayases@oracle.com * AND if we've invoked the timeout APIC_REPROGRAM_MAX_TIMEOUTS
199812683SJimmy.Vetayases@oracle.com * times for this interrupt, try the last-ditch workaround:
199912683SJimmy.Vetayases@oracle.com */
200012683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) {
200112683SJimmy.Vetayases@oracle.com /*
200212683SJimmy.Vetayases@oracle.com * Trying to clear the bit through normal
200312683SJimmy.Vetayases@oracle.com * channels has failed. So as a last-ditch
200412683SJimmy.Vetayases@oracle.com * effort, try to set the trigger mode to
200512683SJimmy.Vetayases@oracle.com * edge, then to level. This has been
200612683SJimmy.Vetayases@oracle.com * observed to work on many systems.
200712683SJimmy.Vetayases@oracle.com */
200812683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
200912683SJimmy.Vetayases@oracle.com intin_no,
201012683SJimmy.Vetayases@oracle.com READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
201112683SJimmy.Vetayases@oracle.com intin_no) & ~AV_LEVEL);
201212683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
201312683SJimmy.Vetayases@oracle.com intin_no,
201412683SJimmy.Vetayases@oracle.com READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix,
201512683SJimmy.Vetayases@oracle.com intin_no) | AV_LEVEL);
201612683SJimmy.Vetayases@oracle.com }
201712683SJimmy.Vetayases@oracle.com
201812683SJimmy.Vetayases@oracle.com if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) {
201912683SJimmy.Vetayases@oracle.com #ifdef DEBUG
202012683SJimmy.Vetayases@oracle.com apix_intr_rirr_reset_failure++;
202112683SJimmy.Vetayases@oracle.com #endif
202212683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
202312683SJimmy.Vetayases@oracle.com intr_restore(iflag);
202412683SJimmy.Vetayases@oracle.com prom_printf("apix: Remote IRR still "
202512683SJimmy.Vetayases@oracle.com "not clear for IOAPIC %d intin %d.\n"
202612683SJimmy.Vetayases@oracle.com "\tInterrupts to this pin may cease "
202712683SJimmy.Vetayases@oracle.com "functioning.\n", ioapic_ix, intin_no);
202812683SJimmy.Vetayases@oracle.com return (1); /* return failure */
202912683SJimmy.Vetayases@oracle.com }
203012683SJimmy.Vetayases@oracle.com }
203112683SJimmy.Vetayases@oracle.com
203212683SJimmy.Vetayases@oracle.com done:
203312683SJimmy.Vetayases@oracle.com /* change apic_irq_table */
203412683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
203512683SJimmy.Vetayases@oracle.com intr_restore(iflag);
203612683SJimmy.Vetayases@oracle.com apix_intx_set_vector(irqno, cpuid, vector);
203712683SJimmy.Vetayases@oracle.com iflag = intr_clear();
203812683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
203912683SJimmy.Vetayases@oracle.com
204012683SJimmy.Vetayases@oracle.com /* reprogramme IO-APIC RDT entry */
204112683SJimmy.Vetayases@oracle.com apix_intx_enable(irqno);
204212683SJimmy.Vetayases@oracle.com
204312683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
204412683SJimmy.Vetayases@oracle.com intr_restore(iflag);
204512683SJimmy.Vetayases@oracle.com
204612683SJimmy.Vetayases@oracle.com return (0);
204712683SJimmy.Vetayases@oracle.com }
204812683SJimmy.Vetayases@oracle.com
204912683SJimmy.Vetayases@oracle.com static int
apix_intx_get_pending(int irqno)205012683SJimmy.Vetayases@oracle.com apix_intx_get_pending(int irqno)
205112683SJimmy.Vetayases@oracle.com {
205212683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
205312683SJimmy.Vetayases@oracle.com int intin, ioapicindex, pending;
205412683SJimmy.Vetayases@oracle.com ulong_t iflag;
205512683SJimmy.Vetayases@oracle.com
205612683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
205712683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
205812683SJimmy.Vetayases@oracle.com if (IS_IRQ_FREE(irqp)) {
205912683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
206012683SJimmy.Vetayases@oracle.com return (0);
206112683SJimmy.Vetayases@oracle.com }
206212683SJimmy.Vetayases@oracle.com
206312683SJimmy.Vetayases@oracle.com /* check IO-APIC delivery status */
206412683SJimmy.Vetayases@oracle.com intin = irqp->airq_intin_no;
206512683SJimmy.Vetayases@oracle.com ioapicindex = irqp->airq_ioapicindex;
206612683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
206712683SJimmy.Vetayases@oracle.com
206812683SJimmy.Vetayases@oracle.com iflag = intr_clear();
206912683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
207012683SJimmy.Vetayases@oracle.com
207112683SJimmy.Vetayases@oracle.com pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapicindex, intin) &
207212683SJimmy.Vetayases@oracle.com AV_PENDING) ? 1 : 0;
207312683SJimmy.Vetayases@oracle.com
207412683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
207512683SJimmy.Vetayases@oracle.com intr_restore(iflag);
207612683SJimmy.Vetayases@oracle.com
207712683SJimmy.Vetayases@oracle.com return (pending);
207812683SJimmy.Vetayases@oracle.com }
207912683SJimmy.Vetayases@oracle.com
208012683SJimmy.Vetayases@oracle.com static void
apix_intx_set_mask(int irqno)208112683SJimmy.Vetayases@oracle.com apix_intx_set_mask(int irqno)
208212683SJimmy.Vetayases@oracle.com {
208312683SJimmy.Vetayases@oracle.com int intin, ioapixindex, rdt_entry;
208412683SJimmy.Vetayases@oracle.com ulong_t iflag;
208512683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
208612683SJimmy.Vetayases@oracle.com
208712683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
208812683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
208912683SJimmy.Vetayases@oracle.com
209012683SJimmy.Vetayases@oracle.com ASSERT(irqp->airq_mps_intr_index != FREE_INDEX);
209112683SJimmy.Vetayases@oracle.com
209212683SJimmy.Vetayases@oracle.com intin = irqp->airq_intin_no;
209312683SJimmy.Vetayases@oracle.com ioapixindex = irqp->airq_ioapicindex;
209412683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
209512683SJimmy.Vetayases@oracle.com
209612683SJimmy.Vetayases@oracle.com iflag = intr_clear();
209712683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
209812683SJimmy.Vetayases@oracle.com
209912683SJimmy.Vetayases@oracle.com rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin);
210012683SJimmy.Vetayases@oracle.com
210112683SJimmy.Vetayases@oracle.com /* clear mask */
210212683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin,
210312683SJimmy.Vetayases@oracle.com (AV_MASK | rdt_entry));
210412683SJimmy.Vetayases@oracle.com
210512683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
210612683SJimmy.Vetayases@oracle.com intr_restore(iflag);
210712683SJimmy.Vetayases@oracle.com }
210812683SJimmy.Vetayases@oracle.com
210912683SJimmy.Vetayases@oracle.com static void
apix_intx_clear_mask(int irqno)211012683SJimmy.Vetayases@oracle.com apix_intx_clear_mask(int irqno)
211112683SJimmy.Vetayases@oracle.com {
211212683SJimmy.Vetayases@oracle.com int intin, ioapixindex, rdt_entry;
211312683SJimmy.Vetayases@oracle.com ulong_t iflag;
211412683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
211512683SJimmy.Vetayases@oracle.com
211612683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
211712683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
211812683SJimmy.Vetayases@oracle.com
211912683SJimmy.Vetayases@oracle.com ASSERT(irqp->airq_mps_intr_index != FREE_INDEX);
212012683SJimmy.Vetayases@oracle.com
212112683SJimmy.Vetayases@oracle.com intin = irqp->airq_intin_no;
212212683SJimmy.Vetayases@oracle.com ioapixindex = irqp->airq_ioapicindex;
212312683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
212412683SJimmy.Vetayases@oracle.com
212512683SJimmy.Vetayases@oracle.com iflag = intr_clear();
212612683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
212712683SJimmy.Vetayases@oracle.com
212812683SJimmy.Vetayases@oracle.com rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin);
212912683SJimmy.Vetayases@oracle.com
213012683SJimmy.Vetayases@oracle.com /* clear mask */
213112683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin,
213212683SJimmy.Vetayases@oracle.com ((~AV_MASK) & rdt_entry));
213312683SJimmy.Vetayases@oracle.com
213412683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
213512683SJimmy.Vetayases@oracle.com intr_restore(iflag);
213612683SJimmy.Vetayases@oracle.com }
213712683SJimmy.Vetayases@oracle.com
213812683SJimmy.Vetayases@oracle.com /*
213912683SJimmy.Vetayases@oracle.com * For level-triggered interrupt, mask the IRQ line. Mask means
214012683SJimmy.Vetayases@oracle.com * new interrupts will not be delivered. The interrupt already
214112683SJimmy.Vetayases@oracle.com * accepted by a local APIC is not affected
214212683SJimmy.Vetayases@oracle.com */
214312683SJimmy.Vetayases@oracle.com void
apix_level_intr_pre_eoi(int irq)214412683SJimmy.Vetayases@oracle.com apix_level_intr_pre_eoi(int irq)
214512683SJimmy.Vetayases@oracle.com {
214612683SJimmy.Vetayases@oracle.com apic_irq_t *irqp = apic_irq_table[irq];
214712683SJimmy.Vetayases@oracle.com int apic_ix, intin_ix;
214812683SJimmy.Vetayases@oracle.com
214912683SJimmy.Vetayases@oracle.com if (irqp == NULL)
215012683SJimmy.Vetayases@oracle.com return;
215112683SJimmy.Vetayases@oracle.com
215212683SJimmy.Vetayases@oracle.com ASSERT(apic_level_intr[irq] == TRIGGER_MODE_LEVEL);
215312683SJimmy.Vetayases@oracle.com
215412683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
215512683SJimmy.Vetayases@oracle.com
215612683SJimmy.Vetayases@oracle.com intin_ix = irqp->airq_intin_no;
215712683SJimmy.Vetayases@oracle.com apic_ix = irqp->airq_ioapicindex;
215812683SJimmy.Vetayases@oracle.com
215912683SJimmy.Vetayases@oracle.com if (irqp->airq_cpu != CPU->cpu_id) {
216012683SJimmy.Vetayases@oracle.com if (!APIX_IS_MASK_RDT(apix_mul_ioapic_method))
216112683SJimmy.Vetayases@oracle.com ioapic_write_eoi(apic_ix, irqp->airq_vector);
216212683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
216312683SJimmy.Vetayases@oracle.com return;
216412683SJimmy.Vetayases@oracle.com }
216512683SJimmy.Vetayases@oracle.com
216612683SJimmy.Vetayases@oracle.com if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_IOXAPIC) {
216712683SJimmy.Vetayases@oracle.com /*
216812683SJimmy.Vetayases@oracle.com * This is a IOxAPIC and there is EOI register:
216912683SJimmy.Vetayases@oracle.com * Change the vector to reserved unused vector, so that
217012683SJimmy.Vetayases@oracle.com * the EOI from Local APIC won't clear the Remote IRR for
217112683SJimmy.Vetayases@oracle.com * this level trigger interrupt. Instead, we'll manually
217212683SJimmy.Vetayases@oracle.com * clear it in apix_post_hardint() after ISR handling.
217312683SJimmy.Vetayases@oracle.com */
217412683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix,
217512683SJimmy.Vetayases@oracle.com (irqp->airq_rdt_entry & (~0xff)) | APIX_RESV_VECTOR);
217612683SJimmy.Vetayases@oracle.com } else {
217712683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix,
217812683SJimmy.Vetayases@oracle.com AV_MASK | irqp->airq_rdt_entry);
217912683SJimmy.Vetayases@oracle.com }
218012683SJimmy.Vetayases@oracle.com
218112683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
218212683SJimmy.Vetayases@oracle.com }
218312683SJimmy.Vetayases@oracle.com
218412683SJimmy.Vetayases@oracle.com /*
218512683SJimmy.Vetayases@oracle.com * For level-triggered interrupt, unmask the IRQ line
218612683SJimmy.Vetayases@oracle.com * or restore the original vector number.
218712683SJimmy.Vetayases@oracle.com */
218812683SJimmy.Vetayases@oracle.com void
apix_level_intr_post_dispatch(int irq)218912683SJimmy.Vetayases@oracle.com apix_level_intr_post_dispatch(int irq)
219012683SJimmy.Vetayases@oracle.com {
219112683SJimmy.Vetayases@oracle.com apic_irq_t *irqp = apic_irq_table[irq];
219212683SJimmy.Vetayases@oracle.com int apic_ix, intin_ix;
219312683SJimmy.Vetayases@oracle.com
219412683SJimmy.Vetayases@oracle.com if (irqp == NULL)
219512683SJimmy.Vetayases@oracle.com return;
219612683SJimmy.Vetayases@oracle.com
219712683SJimmy.Vetayases@oracle.com lock_set(&apic_ioapic_lock);
219812683SJimmy.Vetayases@oracle.com
219912683SJimmy.Vetayases@oracle.com intin_ix = irqp->airq_intin_no;
220012683SJimmy.Vetayases@oracle.com apic_ix = irqp->airq_ioapicindex;
220112683SJimmy.Vetayases@oracle.com
220212683SJimmy.Vetayases@oracle.com if (APIX_IS_DIRECTED_EOI(apix_mul_ioapic_method)) {
220312683SJimmy.Vetayases@oracle.com /*
220412683SJimmy.Vetayases@oracle.com * Already sent EOI back to Local APIC.
220512683SJimmy.Vetayases@oracle.com * Send EOI to IO-APIC
220612683SJimmy.Vetayases@oracle.com */
220712683SJimmy.Vetayases@oracle.com ioapic_write_eoi(apic_ix, irqp->airq_vector);
220812683SJimmy.Vetayases@oracle.com } else {
220912683SJimmy.Vetayases@oracle.com /* clear the mask or restore the vector */
221012683SJimmy.Vetayases@oracle.com WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix,
221112683SJimmy.Vetayases@oracle.com irqp->airq_rdt_entry);
221212683SJimmy.Vetayases@oracle.com
221312683SJimmy.Vetayases@oracle.com /* send EOI to IOxAPIC */
221412683SJimmy.Vetayases@oracle.com if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_IOXAPIC)
221512683SJimmy.Vetayases@oracle.com ioapic_write_eoi(apic_ix, irqp->airq_vector);
221612683SJimmy.Vetayases@oracle.com }
221712683SJimmy.Vetayases@oracle.com
221812683SJimmy.Vetayases@oracle.com lock_clear(&apic_ioapic_lock);
221912683SJimmy.Vetayases@oracle.com }
222012683SJimmy.Vetayases@oracle.com
222112683SJimmy.Vetayases@oracle.com static int
apix_intx_get_shared(int irqno)222212683SJimmy.Vetayases@oracle.com apix_intx_get_shared(int irqno)
222312683SJimmy.Vetayases@oracle.com {
222412683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
222512683SJimmy.Vetayases@oracle.com int share;
222612683SJimmy.Vetayases@oracle.com
222712683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
222812683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
222912683SJimmy.Vetayases@oracle.com if (IS_IRQ_FREE(irqp) || (irqp->airq_cpu == IRQ_UNINIT)) {
223012683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
223112683SJimmy.Vetayases@oracle.com return (0);
223212683SJimmy.Vetayases@oracle.com }
223312683SJimmy.Vetayases@oracle.com share = irqp->airq_share;
223412683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
223512683SJimmy.Vetayases@oracle.com
223612683SJimmy.Vetayases@oracle.com return (share);
223712683SJimmy.Vetayases@oracle.com }
223812683SJimmy.Vetayases@oracle.com
223912683SJimmy.Vetayases@oracle.com static void
apix_intx_set_shared(int irqno,int delta)224012683SJimmy.Vetayases@oracle.com apix_intx_set_shared(int irqno, int delta)
224112683SJimmy.Vetayases@oracle.com {
224212683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
224312683SJimmy.Vetayases@oracle.com
224412683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
224512683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
224612683SJimmy.Vetayases@oracle.com if (IS_IRQ_FREE(irqp)) {
224712683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
224812683SJimmy.Vetayases@oracle.com return;
224912683SJimmy.Vetayases@oracle.com }
225012683SJimmy.Vetayases@oracle.com irqp->airq_share += delta;
225112683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
225212683SJimmy.Vetayases@oracle.com }
225312683SJimmy.Vetayases@oracle.com
225412683SJimmy.Vetayases@oracle.com /*
225512683SJimmy.Vetayases@oracle.com * Setup IRQ table. Return IRQ no or -1 on failure
225612683SJimmy.Vetayases@oracle.com */
225712683SJimmy.Vetayases@oracle.com static int
apix_intx_setup(dev_info_t * dip,int inum,int irqno,struct apic_io_intr * intrp,struct intrspec * ispec,iflag_t * iflagp)225812683SJimmy.Vetayases@oracle.com apix_intx_setup(dev_info_t *dip, int inum, int irqno,
225912683SJimmy.Vetayases@oracle.com struct apic_io_intr *intrp, struct intrspec *ispec, iflag_t *iflagp)
226012683SJimmy.Vetayases@oracle.com {
226112683SJimmy.Vetayases@oracle.com int origirq = ispec->intrspec_vec;
226212683SJimmy.Vetayases@oracle.com int newirq;
226312683SJimmy.Vetayases@oracle.com short intr_index;
226412683SJimmy.Vetayases@oracle.com uchar_t ipin, ioapic, ioapicindex;
226512683SJimmy.Vetayases@oracle.com apic_irq_t *irqp;
226612683SJimmy.Vetayases@oracle.com
226712683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(inum);
226812683SJimmy.Vetayases@oracle.com
226912683SJimmy.Vetayases@oracle.com if (intrp != NULL) {
227012683SJimmy.Vetayases@oracle.com intr_index = (short)(intrp - apic_io_intrp);
227112683SJimmy.Vetayases@oracle.com ioapic = intrp->intr_destid;
227212683SJimmy.Vetayases@oracle.com ipin = intrp->intr_destintin;
227312683SJimmy.Vetayases@oracle.com
227412683SJimmy.Vetayases@oracle.com /* Find ioapicindex. If destid was ALL, we will exit with 0. */
227512683SJimmy.Vetayases@oracle.com for (ioapicindex = apic_io_max - 1; ioapicindex; ioapicindex--)
227612683SJimmy.Vetayases@oracle.com if (apic_io_id[ioapicindex] == ioapic)
227712683SJimmy.Vetayases@oracle.com break;
227812683SJimmy.Vetayases@oracle.com ASSERT((ioapic == apic_io_id[ioapicindex]) ||
227912683SJimmy.Vetayases@oracle.com (ioapic == INTR_ALL_APIC));
228012683SJimmy.Vetayases@oracle.com
228112683SJimmy.Vetayases@oracle.com /* check whether this intin# has been used by another irqno */
228212683SJimmy.Vetayases@oracle.com if ((newirq = apic_find_intin(ioapicindex, ipin)) != -1)
228312683SJimmy.Vetayases@oracle.com return (newirq);
228412683SJimmy.Vetayases@oracle.com
228512683SJimmy.Vetayases@oracle.com } else if (iflagp != NULL) { /* ACPI */
228612683SJimmy.Vetayases@oracle.com intr_index = ACPI_INDEX;
228712683SJimmy.Vetayases@oracle.com ioapicindex = acpi_find_ioapic(irqno);
228812683SJimmy.Vetayases@oracle.com ASSERT(ioapicindex != 0xFF);
228912683SJimmy.Vetayases@oracle.com ioapic = apic_io_id[ioapicindex];
229012683SJimmy.Vetayases@oracle.com ipin = irqno - apic_io_vectbase[ioapicindex];
229112683SJimmy.Vetayases@oracle.com
229212683SJimmy.Vetayases@oracle.com if (apic_irq_table[irqno] &&
229312683SJimmy.Vetayases@oracle.com apic_irq_table[irqno]->airq_mps_intr_index == ACPI_INDEX) {
229412683SJimmy.Vetayases@oracle.com ASSERT(apic_irq_table[irqno]->airq_intin_no == ipin &&
229512683SJimmy.Vetayases@oracle.com apic_irq_table[irqno]->airq_ioapicindex ==
229612683SJimmy.Vetayases@oracle.com ioapicindex);
229712683SJimmy.Vetayases@oracle.com return (irqno);
229812683SJimmy.Vetayases@oracle.com }
229912683SJimmy.Vetayases@oracle.com
230012683SJimmy.Vetayases@oracle.com } else { /* default configuration */
230112683SJimmy.Vetayases@oracle.com intr_index = DEFAULT_INDEX;
230212683SJimmy.Vetayases@oracle.com ioapicindex = 0;
230312683SJimmy.Vetayases@oracle.com ioapic = apic_io_id[ioapicindex];
230412683SJimmy.Vetayases@oracle.com ipin = (uchar_t)irqno;
230512683SJimmy.Vetayases@oracle.com }
230612683SJimmy.Vetayases@oracle.com
230712683SJimmy.Vetayases@oracle.com /* allocate a new IRQ no */
230812683SJimmy.Vetayases@oracle.com if ((irqp = apic_irq_table[irqno]) == NULL) {
230912683SJimmy.Vetayases@oracle.com irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP);
231012683SJimmy.Vetayases@oracle.com apic_irq_table[irqno] = irqp;
231112683SJimmy.Vetayases@oracle.com } else {
231212683SJimmy.Vetayases@oracle.com if (irqp->airq_mps_intr_index != FREE_INDEX) {
231312683SJimmy.Vetayases@oracle.com newirq = apic_allocate_irq(apic_first_avail_irq);
231412683SJimmy.Vetayases@oracle.com if (newirq == -1) {
231512683SJimmy.Vetayases@oracle.com return (-1);
231612683SJimmy.Vetayases@oracle.com }
231712683SJimmy.Vetayases@oracle.com irqno = newirq;
231812683SJimmy.Vetayases@oracle.com irqp = apic_irq_table[irqno];
231912683SJimmy.Vetayases@oracle.com ASSERT(irqp != NULL);
232012683SJimmy.Vetayases@oracle.com }
232112683SJimmy.Vetayases@oracle.com }
232212683SJimmy.Vetayases@oracle.com apic_max_device_irq = max(irqno, apic_max_device_irq);
232312683SJimmy.Vetayases@oracle.com apic_min_device_irq = min(irqno, apic_min_device_irq);
232412683SJimmy.Vetayases@oracle.com
232512683SJimmy.Vetayases@oracle.com irqp->airq_mps_intr_index = intr_index;
232612683SJimmy.Vetayases@oracle.com irqp->airq_ioapicindex = ioapicindex;
232712683SJimmy.Vetayases@oracle.com irqp->airq_intin_no = ipin;
232812683SJimmy.Vetayases@oracle.com irqp->airq_dip = dip;
232912683SJimmy.Vetayases@oracle.com irqp->airq_origirq = (uchar_t)origirq;
233012683SJimmy.Vetayases@oracle.com if (iflagp != NULL)
233112683SJimmy.Vetayases@oracle.com irqp->airq_iflag = *iflagp;
233212683SJimmy.Vetayases@oracle.com irqp->airq_cpu = IRQ_UNINIT;
233312683SJimmy.Vetayases@oracle.com irqp->airq_vector = 0;
233412683SJimmy.Vetayases@oracle.com
233512683SJimmy.Vetayases@oracle.com return (irqno);
233612683SJimmy.Vetayases@oracle.com }
233712683SJimmy.Vetayases@oracle.com
233812683SJimmy.Vetayases@oracle.com /*
233912683SJimmy.Vetayases@oracle.com * Setup IRQ table for non-pci devices. Return IRQ no or -1 on error
234012683SJimmy.Vetayases@oracle.com */
234112683SJimmy.Vetayases@oracle.com static int
apix_intx_setup_nonpci(dev_info_t * dip,int inum,int bustype,struct intrspec * ispec)234212683SJimmy.Vetayases@oracle.com apix_intx_setup_nonpci(dev_info_t *dip, int inum, int bustype,
234312683SJimmy.Vetayases@oracle.com struct intrspec *ispec)
234412683SJimmy.Vetayases@oracle.com {
234512683SJimmy.Vetayases@oracle.com int irqno = ispec->intrspec_vec;
234612683SJimmy.Vetayases@oracle.com int newirq, i;
234712683SJimmy.Vetayases@oracle.com iflag_t intr_flag;
234812683SJimmy.Vetayases@oracle.com ACPI_SUBTABLE_HEADER *hp;
234912683SJimmy.Vetayases@oracle.com ACPI_MADT_INTERRUPT_OVERRIDE *isop;
235012683SJimmy.Vetayases@oracle.com struct apic_io_intr *intrp;
235112683SJimmy.Vetayases@oracle.com
235212683SJimmy.Vetayases@oracle.com if (!apic_enable_acpi || apic_use_acpi_madt_only) {
235312683SJimmy.Vetayases@oracle.com int busid;
235412683SJimmy.Vetayases@oracle.com
235512683SJimmy.Vetayases@oracle.com if (bustype == 0)
235612683SJimmy.Vetayases@oracle.com bustype = eisa_level_intr_mask ? BUS_EISA : BUS_ISA;
235712683SJimmy.Vetayases@oracle.com
235812683SJimmy.Vetayases@oracle.com /* loop checking BUS_ISA/BUS_EISA */
235912683SJimmy.Vetayases@oracle.com for (i = 0; i < 2; i++) {
236012683SJimmy.Vetayases@oracle.com if (((busid = apic_find_bus_id(bustype)) != -1) &&
236112683SJimmy.Vetayases@oracle.com ((intrp = apic_find_io_intr_w_busid(irqno, busid))
236212683SJimmy.Vetayases@oracle.com != NULL)) {
236312683SJimmy.Vetayases@oracle.com return (apix_intx_setup(dip, inum, irqno,
236412683SJimmy.Vetayases@oracle.com intrp, ispec, NULL));
236512683SJimmy.Vetayases@oracle.com }
236612683SJimmy.Vetayases@oracle.com bustype = (bustype == BUS_EISA) ? BUS_ISA : BUS_EISA;
236712683SJimmy.Vetayases@oracle.com }
236812683SJimmy.Vetayases@oracle.com
236912683SJimmy.Vetayases@oracle.com /* fall back to default configuration */
237012683SJimmy.Vetayases@oracle.com return (-1);
237112683SJimmy.Vetayases@oracle.com }
237212683SJimmy.Vetayases@oracle.com
237312683SJimmy.Vetayases@oracle.com /* search iso entries first */
237412683SJimmy.Vetayases@oracle.com if (acpi_iso_cnt != 0) {
237512683SJimmy.Vetayases@oracle.com hp = (ACPI_SUBTABLE_HEADER *)acpi_isop;
237612683SJimmy.Vetayases@oracle.com i = 0;
237712683SJimmy.Vetayases@oracle.com while (i < acpi_iso_cnt) {
237812683SJimmy.Vetayases@oracle.com if (hp->Type == ACPI_MADT_TYPE_INTERRUPT_OVERRIDE) {
237912683SJimmy.Vetayases@oracle.com isop = (ACPI_MADT_INTERRUPT_OVERRIDE *) hp;
238012683SJimmy.Vetayases@oracle.com if (isop->Bus == 0 &&
238112683SJimmy.Vetayases@oracle.com isop->SourceIrq == irqno) {
238212683SJimmy.Vetayases@oracle.com newirq = isop->GlobalIrq;
238312683SJimmy.Vetayases@oracle.com intr_flag.intr_po = isop->IntiFlags &
238412683SJimmy.Vetayases@oracle.com ACPI_MADT_POLARITY_MASK;
238512683SJimmy.Vetayases@oracle.com intr_flag.intr_el = (isop->IntiFlags &
238612683SJimmy.Vetayases@oracle.com ACPI_MADT_TRIGGER_MASK) >> 2;
238712683SJimmy.Vetayases@oracle.com intr_flag.bustype = BUS_ISA;
238812683SJimmy.Vetayases@oracle.com
238912683SJimmy.Vetayases@oracle.com return (apix_intx_setup(dip, inum,
239012683SJimmy.Vetayases@oracle.com newirq, NULL, ispec, &intr_flag));
239112683SJimmy.Vetayases@oracle.com }
239212683SJimmy.Vetayases@oracle.com i++;
239312683SJimmy.Vetayases@oracle.com }
239412683SJimmy.Vetayases@oracle.com hp = (ACPI_SUBTABLE_HEADER *)(((char *)hp) +
239512683SJimmy.Vetayases@oracle.com hp->Length);
239612683SJimmy.Vetayases@oracle.com }
239712683SJimmy.Vetayases@oracle.com }
239812683SJimmy.Vetayases@oracle.com intr_flag.intr_po = INTR_PO_ACTIVE_HIGH;
239912683SJimmy.Vetayases@oracle.com intr_flag.intr_el = INTR_EL_EDGE;
240012683SJimmy.Vetayases@oracle.com intr_flag.bustype = BUS_ISA;
240112683SJimmy.Vetayases@oracle.com return (apix_intx_setup(dip, inum, irqno, NULL, ispec, &intr_flag));
240212683SJimmy.Vetayases@oracle.com }
240312683SJimmy.Vetayases@oracle.com
240412683SJimmy.Vetayases@oracle.com
240512683SJimmy.Vetayases@oracle.com /*
240612683SJimmy.Vetayases@oracle.com * Setup IRQ table for pci devices. Return IRQ no or -1 on error
240712683SJimmy.Vetayases@oracle.com */
240812683SJimmy.Vetayases@oracle.com static int
apix_intx_setup_pci(dev_info_t * dip,int inum,int bustype,struct intrspec * ispec)240912683SJimmy.Vetayases@oracle.com apix_intx_setup_pci(dev_info_t *dip, int inum, int bustype,
241012683SJimmy.Vetayases@oracle.com struct intrspec *ispec)
241112683SJimmy.Vetayases@oracle.com {
241212683SJimmy.Vetayases@oracle.com int busid, devid, pci_irq;
241312683SJimmy.Vetayases@oracle.com ddi_acc_handle_t cfg_handle;
241412683SJimmy.Vetayases@oracle.com uchar_t ipin;
241512683SJimmy.Vetayases@oracle.com iflag_t intr_flag;
241612683SJimmy.Vetayases@oracle.com struct apic_io_intr *intrp;
241712683SJimmy.Vetayases@oracle.com
241812683SJimmy.Vetayases@oracle.com if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0)
241912683SJimmy.Vetayases@oracle.com return (-1);
242012683SJimmy.Vetayases@oracle.com
242112683SJimmy.Vetayases@oracle.com if (busid == 0 && apic_pci_bus_total == 1)
242212683SJimmy.Vetayases@oracle.com busid = (int)apic_single_pci_busid;
242312683SJimmy.Vetayases@oracle.com
242412683SJimmy.Vetayases@oracle.com if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS)
242512683SJimmy.Vetayases@oracle.com return (-1);
242612683SJimmy.Vetayases@oracle.com ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA;
242712683SJimmy.Vetayases@oracle.com pci_config_teardown(&cfg_handle);
242812683SJimmy.Vetayases@oracle.com
242912683SJimmy.Vetayases@oracle.com if (apic_enable_acpi && !apic_use_acpi_madt_only) { /* ACPI */
243012683SJimmy.Vetayases@oracle.com if (apic_acpi_translate_pci_irq(dip, busid, devid,
243112683SJimmy.Vetayases@oracle.com ipin, &pci_irq, &intr_flag) != ACPI_PSM_SUCCESS)
243212683SJimmy.Vetayases@oracle.com return (-1);
243312683SJimmy.Vetayases@oracle.com
243412683SJimmy.Vetayases@oracle.com intr_flag.bustype = (uchar_t)bustype;
243512683SJimmy.Vetayases@oracle.com return (apix_intx_setup(dip, inum, pci_irq, NULL, ispec,
243612683SJimmy.Vetayases@oracle.com &intr_flag));
243712683SJimmy.Vetayases@oracle.com }
243812683SJimmy.Vetayases@oracle.com
243912683SJimmy.Vetayases@oracle.com /* MP configuration table */
244012683SJimmy.Vetayases@oracle.com pci_irq = ((devid & 0x1f) << 2) | (ipin & 0x3);
244112683SJimmy.Vetayases@oracle.com if ((intrp = apic_find_io_intr_w_busid(pci_irq, busid)) == NULL) {
244212683SJimmy.Vetayases@oracle.com pci_irq = apic_handle_pci_pci_bridge(dip, devid, ipin, &intrp);
244312683SJimmy.Vetayases@oracle.com if (pci_irq == -1)
244412683SJimmy.Vetayases@oracle.com return (-1);
244512683SJimmy.Vetayases@oracle.com }
244612683SJimmy.Vetayases@oracle.com
244712683SJimmy.Vetayases@oracle.com return (apix_intx_setup(dip, inum, pci_irq, intrp, ispec, NULL));
244812683SJimmy.Vetayases@oracle.com }
244912683SJimmy.Vetayases@oracle.com
245012683SJimmy.Vetayases@oracle.com /*
245112683SJimmy.Vetayases@oracle.com * Translate and return IRQ no
245212683SJimmy.Vetayases@oracle.com */
245312683SJimmy.Vetayases@oracle.com static int
apix_intx_xlate_irq(dev_info_t * dip,int inum,struct intrspec * ispec)245412683SJimmy.Vetayases@oracle.com apix_intx_xlate_irq(dev_info_t *dip, int inum, struct intrspec *ispec)
245512683SJimmy.Vetayases@oracle.com {
245612683SJimmy.Vetayases@oracle.com int newirq, irqno = ispec->intrspec_vec;
245712683SJimmy.Vetayases@oracle.com int parent_is_pci_or_pciex = 0, child_is_pciex = 0;
245812683SJimmy.Vetayases@oracle.com int bustype = 0, dev_len;
245912683SJimmy.Vetayases@oracle.com char dev_type[16];
246012683SJimmy.Vetayases@oracle.com
246112683SJimmy.Vetayases@oracle.com if (apic_defconf) {
246212683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
246312683SJimmy.Vetayases@oracle.com goto defconf;
246412683SJimmy.Vetayases@oracle.com }
246512683SJimmy.Vetayases@oracle.com
246612683SJimmy.Vetayases@oracle.com if ((dip == NULL) || (!apic_irq_translate && !apic_enable_acpi)) {
246712683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
246812683SJimmy.Vetayases@oracle.com goto nonpci;
246912683SJimmy.Vetayases@oracle.com }
247012683SJimmy.Vetayases@oracle.com
247112683SJimmy.Vetayases@oracle.com /*
247212683SJimmy.Vetayases@oracle.com * use ddi_getlongprop_buf() instead of ddi_prop_lookup_string()
247312683SJimmy.Vetayases@oracle.com * to avoid extra buffer allocation.
247412683SJimmy.Vetayases@oracle.com */
247512683SJimmy.Vetayases@oracle.com dev_len = sizeof (dev_type);
247612683SJimmy.Vetayases@oracle.com if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ddi_get_parent(dip),
247712683SJimmy.Vetayases@oracle.com DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type,
247812683SJimmy.Vetayases@oracle.com &dev_len) == DDI_PROP_SUCCESS) {
247912683SJimmy.Vetayases@oracle.com if ((strcmp(dev_type, "pci") == 0) ||
248012683SJimmy.Vetayases@oracle.com (strcmp(dev_type, "pciex") == 0))
248112683SJimmy.Vetayases@oracle.com parent_is_pci_or_pciex = 1;
248212683SJimmy.Vetayases@oracle.com }
248312683SJimmy.Vetayases@oracle.com
248412683SJimmy.Vetayases@oracle.com if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
248512683SJimmy.Vetayases@oracle.com DDI_PROP_DONTPASS, "compatible", (caddr_t)dev_type,
248612683SJimmy.Vetayases@oracle.com &dev_len) == DDI_PROP_SUCCESS) {
248712683SJimmy.Vetayases@oracle.com if (strstr(dev_type, "pciex"))
248812683SJimmy.Vetayases@oracle.com child_is_pciex = 1;
248912683SJimmy.Vetayases@oracle.com }
249012683SJimmy.Vetayases@oracle.com
249112683SJimmy.Vetayases@oracle.com mutex_enter(&airq_mutex);
249212683SJimmy.Vetayases@oracle.com
249312683SJimmy.Vetayases@oracle.com if (parent_is_pci_or_pciex) {
249412683SJimmy.Vetayases@oracle.com bustype = child_is_pciex ? BUS_PCIE : BUS_PCI;
249512683SJimmy.Vetayases@oracle.com newirq = apix_intx_setup_pci(dip, inum, bustype, ispec);
249612683SJimmy.Vetayases@oracle.com if (newirq != -1)
249712683SJimmy.Vetayases@oracle.com goto done;
249812683SJimmy.Vetayases@oracle.com bustype = 0;
249912683SJimmy.Vetayases@oracle.com } else if (strcmp(dev_type, "isa") == 0)
250012683SJimmy.Vetayases@oracle.com bustype = BUS_ISA;
250112683SJimmy.Vetayases@oracle.com else if (strcmp(dev_type, "eisa") == 0)
250212683SJimmy.Vetayases@oracle.com bustype = BUS_EISA;
250312683SJimmy.Vetayases@oracle.com
250412683SJimmy.Vetayases@oracle.com nonpci:
250512683SJimmy.Vetayases@oracle.com newirq = apix_intx_setup_nonpci(dip, inum, bustype, ispec);
250612683SJimmy.Vetayases@oracle.com if (newirq != -1)
250712683SJimmy.Vetayases@oracle.com goto done;
250812683SJimmy.Vetayases@oracle.com
250912683SJimmy.Vetayases@oracle.com defconf:
251012683SJimmy.Vetayases@oracle.com newirq = apix_intx_setup(dip, inum, irqno, NULL, ispec, NULL);
251112683SJimmy.Vetayases@oracle.com if (newirq == -1) {
251212683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
251312683SJimmy.Vetayases@oracle.com return (-1);
251412683SJimmy.Vetayases@oracle.com }
251512683SJimmy.Vetayases@oracle.com done:
251612683SJimmy.Vetayases@oracle.com ASSERT(apic_irq_table[newirq]);
251712683SJimmy.Vetayases@oracle.com mutex_exit(&airq_mutex);
251812683SJimmy.Vetayases@oracle.com return (newirq);
251912683SJimmy.Vetayases@oracle.com }
252012683SJimmy.Vetayases@oracle.com
252112683SJimmy.Vetayases@oracle.com static int
apix_intx_alloc_vector(dev_info_t * dip,int inum,struct intrspec * ispec)252212683SJimmy.Vetayases@oracle.com apix_intx_alloc_vector(dev_info_t *dip, int inum, struct intrspec *ispec)
252312683SJimmy.Vetayases@oracle.com {
252412683SJimmy.Vetayases@oracle.com int irqno;
252512683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
252612683SJimmy.Vetayases@oracle.com
252712683SJimmy.Vetayases@oracle.com if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1)
252812683SJimmy.Vetayases@oracle.com return (0);
252912683SJimmy.Vetayases@oracle.com
253012683SJimmy.Vetayases@oracle.com if ((vecp = apix_alloc_intx(dip, inum, irqno)) == NULL)
253112683SJimmy.Vetayases@oracle.com return (0);
253212683SJimmy.Vetayases@oracle.com
253312683SJimmy.Vetayases@oracle.com DDI_INTR_IMPLDBG((CE_CONT, "apix_intx_alloc_vector: dip=0x%p name=%s "
253412683SJimmy.Vetayases@oracle.com "irqno=0x%x cpuid=%d vector=0x%x\n",
253512683SJimmy.Vetayases@oracle.com (void *)dip, ddi_driver_name(dip), irqno,
253612683SJimmy.Vetayases@oracle.com vecp->v_cpuid, vecp->v_vector));
253712683SJimmy.Vetayases@oracle.com
253812683SJimmy.Vetayases@oracle.com return (1);
253912683SJimmy.Vetayases@oracle.com }
254012683SJimmy.Vetayases@oracle.com
254112683SJimmy.Vetayases@oracle.com /*
254212683SJimmy.Vetayases@oracle.com * Return the vector number if the translated IRQ for this device
254312683SJimmy.Vetayases@oracle.com * has a vector mapping setup. If no IRQ setup exists or no vector is
254412683SJimmy.Vetayases@oracle.com * allocated to it then return 0.
254512683SJimmy.Vetayases@oracle.com */
254612683SJimmy.Vetayases@oracle.com static apix_vector_t *
apix_intx_xlate_vector(dev_info_t * dip,int inum,struct intrspec * ispec)254712683SJimmy.Vetayases@oracle.com apix_intx_xlate_vector(dev_info_t *dip, int inum, struct intrspec *ispec)
254812683SJimmy.Vetayases@oracle.com {
254912683SJimmy.Vetayases@oracle.com int irqno;
255012683SJimmy.Vetayases@oracle.com apix_vector_t *vecp;
255112683SJimmy.Vetayases@oracle.com
255212683SJimmy.Vetayases@oracle.com /* get the IRQ number */
255312683SJimmy.Vetayases@oracle.com if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1)
255412683SJimmy.Vetayases@oracle.com return (NULL);
255512683SJimmy.Vetayases@oracle.com
255612683SJimmy.Vetayases@oracle.com /* get the vector number if a vector is allocated to this irqno */
255712683SJimmy.Vetayases@oracle.com vecp = apix_intx_get_vector(irqno);
255812683SJimmy.Vetayases@oracle.com
255912683SJimmy.Vetayases@oracle.com return (vecp);
256012683SJimmy.Vetayases@oracle.com }
256112683SJimmy.Vetayases@oracle.com
256212683SJimmy.Vetayases@oracle.com /* stub function */
256312683SJimmy.Vetayases@oracle.com int
apix_loaded(void)256412683SJimmy.Vetayases@oracle.com apix_loaded(void)
256512683SJimmy.Vetayases@oracle.com {
256612683SJimmy.Vetayases@oracle.com return (apix_is_enabled);
256712683SJimmy.Vetayases@oracle.com }
2568