1 /* $OpenBSD: intr.h,v 1.17 2009/04/19 17:50:18 oga Exp $ */ 2 /* $NetBSD: intr.h,v 1.2 2003/05/04 22:01:56 fvdl Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Charles M. Hannum, and by Jason R. Thorpe. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #ifndef _X86_INTR_H_ 34 #define _X86_INTR_H_ 35 36 #include <machine/intrdefs.h> 37 38 #ifndef _LOCORE 39 #include <machine/cpu.h> 40 41 #include <sys/evcount.h> 42 43 /* 44 * Struct describing an interrupt source for a CPU. struct cpu_info 45 * has an array of MAX_INTR_SOURCES of these. The index in the array 46 * is equal to the stub number of the stubcode as present in vector.s 47 * 48 * The primary CPU's array of interrupt sources has its first 16 49 * entries reserved for legacy ISA irq handlers. This means that 50 * they have a 1:1 mapping for arrayindex:irq_num. This is not 51 * true for interrupts that come in through IO APICs, to find 52 * their source, go through ci->ci_isources[index].is_pic 53 * 54 * It's possible to always maintain a 1:1 mapping, but that means 55 * limiting the total number of interrupt sources to MAX_INTR_SOURCES 56 * (32), instead of 32 per CPU. It also would mean that having multiple 57 * IO APICs which deliver interrupts from an equal pin number would 58 * overlap if they were to be sent to the same CPU. 59 */ 60 61 struct intrstub { 62 void *ist_entry; 63 void *ist_recurse; 64 void *ist_resume; 65 }; 66 67 struct intrsource { 68 int is_maxlevel; /* max. IPL for this source */ 69 int is_pin; /* IRQ for legacy; pin for IO APIC */ 70 struct intrhand *is_handlers; /* handler chain */ 71 struct pic *is_pic; /* originating PIC */ 72 void *is_recurse; /* entry for spllower */ 73 void *is_resume; /* entry for doreti */ 74 char is_evname[32]; /* event counter name */ 75 int is_flags; /* see below */ 76 int is_type; /* level, edge */ 77 int is_idtvec; 78 int is_minlevel; 79 }; 80 81 #define IS_LEGACY 0x0001 /* legacy ISA irq source */ 82 #define IS_IPI 0x0002 83 #define IS_LOG 0x0004 84 85 86 /* 87 * Interrupt handler chains. *_intr_establish() insert a handler into 88 * the list. The handler is called with its (single) argument. 89 */ 90 91 struct intrhand { 92 int (*ih_fun)(void *); 93 void *ih_arg; 94 int ih_level; 95 struct intrhand *ih_next; 96 int ih_pin; 97 int ih_slot; 98 struct cpu_info *ih_cpu; 99 int ih_irq; 100 struct evcount ih_count; 101 }; 102 103 #define IMASK(ci,level) (ci)->ci_imask[(level)] 104 #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)] 105 106 extern void Xspllower(int); 107 108 int splraise(int); 109 int spllower(int); 110 void softintr(int); 111 112 /* 113 * Convert spl level to local APIC level 114 */ 115 #define APIC_LEVEL(l) ((l) << 4) 116 117 /* 118 * compiler barrier: prevent reordering of instructions. 119 * XXX something similar will move to <sys/cdefs.h> 120 * or thereabouts. 121 * This prevents the compiler from reordering code around 122 * this "instruction", acting as a sequence point for code generation. 123 */ 124 125 #define __splbarrier() __asm __volatile("":::"memory") 126 127 /* 128 * Hardware interrupt masks 129 */ 130 #define splbio() splraise(IPL_BIO) 131 #define splnet() splraise(IPL_NET) 132 #define spltty() splraise(IPL_TTY) 133 #define splaudio() splraise(IPL_AUDIO) 134 #define splclock() splraise(IPL_CLOCK) 135 #define splstatclock() splclock() 136 #define splipi() splraise(IPL_IPI) 137 138 #define spllpt() spltty() 139 140 #define spllpt() spltty() 141 142 /* 143 * Software interrupt masks 144 */ 145 #define splsoftclock() splraise(IPL_SOFTCLOCK) 146 #define splsoftnet() splraise(IPL_SOFTNET) 147 #define splsofttty() splraise(IPL_SOFTTTY) 148 149 /* 150 * Miscellaneous 151 */ 152 #define splvm() splraise(IPL_VM) 153 #define splhigh() splraise(IPL_HIGH) 154 #define spl0() spllower(IPL_NONE) 155 #define splsched() splraise(IPL_SCHED) 156 #define spllock() splhigh() 157 #define splx(x) spllower(x) 158 159 /* SPL asserts */ 160 #ifdef DIAGNOSTIC 161 /* 162 * Although this function is implemented in MI code, it must be in this MD 163 * header because we don't want this header to include MI includes. 164 */ 165 void splassert_fail(int, int, const char *); 166 extern int splassert_ctl; 167 void splassert_check(int, const char *); 168 #define splassert(__wantipl) do { \ 169 if (splassert_ctl > 0) { \ 170 splassert_check(__wantipl, __func__); \ 171 } \ 172 } while (0) 173 #define splsoftassert(wantipl) splassert(wantipl) 174 #else 175 #define splassert(wantipl) do { /* nada */ } while (0) 176 #define splsoftassert(wantipl) do { /* nada */ } while (0) 177 #endif 178 179 /* 180 * XXX 181 */ 182 #define setsoftnet() softintr(SIR_NET) 183 184 #define IPLSHIFT 4 /* The upper nibble of vectors is the IPL. */ 185 #define IPL(level) ((level) >> IPLSHIFT) /* Extract the IPL. */ 186 187 #include <machine/pic.h> 188 189 /* 190 * Stub declarations. 191 */ 192 193 extern void Xsoftclock(void); 194 extern void Xsoftnet(void); 195 extern void Xsofttty(void); 196 197 extern struct intrstub i8259_stubs[]; 198 extern struct intrstub ioapic_edge_stubs[]; 199 extern struct intrstub ioapic_level_stubs[]; 200 201 struct cpu_info; 202 203 extern char idt_allocmap[]; 204 205 void intr_default_setup(void); 206 int x86_nmi(void); 207 void intr_calculatemasks(struct cpu_info *); 208 int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *); 209 int intr_allocate_slot(struct pic *, int, int, int, struct cpu_info **, int *, 210 int *); 211 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), 212 void *, char *); 213 void intr_disestablish(struct intrhand *); 214 void cpu_intr_init(struct cpu_info *); 215 int intr_find_mpmapping(int bus, int pin, int *handle); 216 void intr_printconfig(void); 217 218 #ifdef MULTIPROCESSOR 219 int x86_send_ipi(struct cpu_info *, int); 220 int x86_fast_ipi(struct cpu_info *, int); 221 void x86_broadcast_ipi(int); 222 void x86_multicast_ipi(int, int); 223 void x86_ipi_handler(void); 224 void x86_intlock(struct intrframe); 225 void x86_intunlock(struct intrframe); 226 void x86_softintlock(void); 227 void x86_softintunlock(void); 228 void x86_setperf_ipi(struct cpu_info *); 229 230 extern void (*ipifunc[X86_NIPI])(struct cpu_info *); 231 #endif 232 233 #endif /* !_LOCORE */ 234 235 /* 236 * Generic software interrupt support. 237 */ 238 239 #define X86_SOFTINTR_SOFTCLOCK 0 240 #define X86_SOFTINTR_SOFTNET 1 241 #define X86_SOFTINTR_SOFTTTY 2 242 #define X86_NSOFTINTR 3 243 244 #ifndef _LOCORE 245 #include <sys/queue.h> 246 247 struct x86_soft_intrhand { 248 TAILQ_ENTRY(x86_soft_intrhand) 249 sih_q; 250 struct x86_soft_intr *sih_intrhead; 251 void (*sih_fn)(void *); 252 void *sih_arg; 253 int sih_pending; 254 }; 255 256 struct x86_soft_intr { 257 TAILQ_HEAD(, x86_soft_intrhand) 258 softintr_q; 259 int softintr_ssir; 260 struct mutex softintr_lock; 261 }; 262 263 void *softintr_establish(int, void (*)(void *), void *); 264 void softintr_disestablish(void *); 265 void softintr_init(void); 266 void softintr_dispatch(int); 267 268 #define softintr_schedule(arg) \ 269 do { \ 270 struct x86_soft_intrhand *__sih = (arg); \ 271 struct x86_soft_intr *__si = __sih->sih_intrhead; \ 272 \ 273 mtx_enter(&__si->softintr_lock); \ 274 if (__sih->sih_pending == 0) { \ 275 TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \ 276 __sih->sih_pending = 1; \ 277 softintr(__si->softintr_ssir); \ 278 } \ 279 mtx_leave(&__si->softintr_lock); \ 280 } while (/*CONSTCOND*/ 0) 281 #endif /* _LOCORE */ 282 283 #endif /* !_X86_INTR_H_ */ 284