1 /* $OpenBSD: intr.c,v 1.61 2024/06/25 12:02:48 kettenis Exp $ */
2 /* $NetBSD: intr.c,v 1.3 2003/03/03 22:16:20 fvdl Exp $ */
3
4 /*
5 * Copyright 2002 (c) Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Frank van der Linden for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /* #define INTRDEBUG */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/errno.h>
48
49 #include <machine/atomic.h>
50 #include <machine/i8259.h>
51 #include <machine/cpu.h>
52 #include <machine/pio.h>
53 #include <machine/cpufunc.h>
54
55 #include "lapic.h"
56 #include "xen.h"
57 #include "hyperv.h"
58
59 #if NLAPIC > 0
60 #include <machine/i82489var.h>
61 #endif
62
63 struct pic softintr_pic = {
64 {0, {NULL}, NULL, 0, "softintr_pic0", NULL, 0, 0},
65 PIC_SOFT,
66 #ifdef MULTIPROCESSOR
67 {},
68 #endif
69 NULL,
70 NULL,
71 NULL,
72 NULL,
73 NULL,
74 };
75
76 int intr_suspended;
77 struct intrhand *intr_nowake;
78
79 /*
80 * Fill in default interrupt table (in case of spurious interrupt
81 * during configuration of kernel), setup interrupt control unit
82 */
83 void
intr_default_setup(void)84 intr_default_setup(void)
85 {
86 int i;
87
88 /* icu vectors */
89 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
90 idt_allocmap[ICU_OFFSET + i] = 1;
91 setgate(&idt[ICU_OFFSET + i],
92 i8259_stubs[i].ist_entry, 0, SDT_SYS386IGT,
93 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
94 }
95
96 /*
97 * Eventually might want to check if it's actually there.
98 */
99 i8259_default_setup();
100 }
101
102 /*
103 * Handle a NMI, possibly a machine check.
104 * return true to panic system, false to ignore.
105 */
106 int
x86_nmi(void)107 x86_nmi(void)
108 {
109 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
110 return(0);
111 }
112
113 /*
114 * Recalculate the interrupt masks from scratch.
115 */
116 void
intr_calculatemasks(struct cpu_info * ci)117 intr_calculatemasks(struct cpu_info *ci)
118 {
119 int irq, level;
120 u_int64_t unusedirqs, intrlevel[MAX_INTR_SOURCES];
121 struct intrhand *q;
122
123 /* First, figure out which levels each IRQ uses. */
124 unusedirqs = 0xffffffffffffffffUL;
125 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
126 int levels = 0;
127
128 if (ci->ci_isources[irq] == NULL) {
129 intrlevel[irq] = 0;
130 continue;
131 }
132 for (q = ci->ci_isources[irq]->is_handlers; q; q = q->ih_next)
133 levels |= (1 << q->ih_level);
134 intrlevel[irq] = levels;
135 if (levels)
136 unusedirqs &= ~(1UL << irq);
137 }
138
139 /* Then figure out which IRQs use each level. */
140 for (level = 0; level < NIPL; level++) {
141 u_int64_t irqs = 0;
142 for (irq = 0; irq < MAX_INTR_SOURCES; irq++)
143 if (intrlevel[irq] & (1 << level))
144 irqs |= (1UL << irq);
145 ci->ci_imask[level] = irqs | unusedirqs;
146 }
147
148 for (level = 0; level< (NIPL - 1); level++)
149 ci->ci_imask[level + 1] |= ci->ci_imask[level];
150
151 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
152 int maxlevel = IPL_NONE;
153 int minlevel = IPL_HIGH;
154
155 if (ci->ci_isources[irq] == NULL)
156 continue;
157 for (q = ci->ci_isources[irq]->is_handlers; q;
158 q = q->ih_next) {
159 if (q->ih_level < minlevel)
160 minlevel = q->ih_level;
161 if (q->ih_level > maxlevel)
162 maxlevel = q->ih_level;
163 }
164 ci->ci_isources[irq]->is_maxlevel = maxlevel;
165 ci->ci_isources[irq]->is_minlevel = minlevel;
166 }
167
168 for (level = 0; level < NIPL; level++)
169 ci->ci_iunmask[level] = ~ci->ci_imask[level];
170 }
171
172 int
intr_allocate_slot_cpu(struct cpu_info * ci,struct pic * pic,int pin,int * index)173 intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
174 int *index)
175 {
176 int start, slot, i;
177 struct intrsource *isp;
178
179 start = CPU_IS_PRIMARY(ci) ? NUM_LEGACY_IRQS : 0;
180 slot = -1;
181
182 for (i = 0; i < start; i++) {
183 isp = ci->ci_isources[i];
184 if (isp != NULL && isp->is_pic == pic && isp->is_pin == pin) {
185 slot = i;
186 start = MAX_INTR_SOURCES;
187 break;
188 }
189 }
190 for (i = start; i < MAX_INTR_SOURCES ; i++) {
191 isp = ci->ci_isources[i];
192 if (isp != NULL && isp->is_pic == pic && isp->is_pin == pin) {
193 slot = i;
194 break;
195 }
196 if (isp == NULL && slot == -1) {
197 slot = i;
198 continue;
199 }
200 }
201 if (slot == -1) {
202 return EBUSY;
203 }
204
205 isp = ci->ci_isources[slot];
206 if (isp == NULL) {
207 isp = malloc(sizeof (struct intrsource), M_DEVBUF,
208 M_NOWAIT|M_ZERO);
209 if (isp == NULL) {
210 return ENOMEM;
211 }
212 snprintf(isp->is_evname, sizeof (isp->is_evname),
213 "pin %d", pin);
214 ci->ci_isources[slot] = isp;
215 }
216
217 *index = slot;
218 return 0;
219 }
220
221 /*
222 * A simple round-robin allocator to assign interrupts to CPUs.
223 */
224 int
intr_allocate_slot(struct pic * pic,int legacy_irq,int pin,int level,struct cpu_info ** cip,int * index,int * idt_slot)225 intr_allocate_slot(struct pic *pic, int legacy_irq, int pin, int level,
226 struct cpu_info **cip, int *index, int *idt_slot)
227 {
228 CPU_INFO_ITERATOR cii;
229 struct cpu_info *ci;
230 struct intrsource *isp;
231 int slot, idtvec, error;
232
233 /*
234 * If a legacy IRQ is wanted, try to use a fixed slot pointing
235 * at the primary CPU. In the case of IO APICs, multiple pins
236 * may map to one legacy IRQ, but they should not be shared
237 * in that case, so the first one gets the legacy slot, but
238 * a subsequent allocation with a different pin will get
239 * a different slot.
240 */
241 if (legacy_irq != -1) {
242 ci = &cpu_info_primary;
243 /* must check for duplicate pic + pin first */
244 for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) {
245 isp = ci->ci_isources[slot];
246 if (isp != NULL && isp->is_pic == pic &&
247 isp->is_pin == pin ) {
248 goto duplicate;
249 }
250 }
251 slot = legacy_irq;
252 isp = ci->ci_isources[slot];
253 if (isp == NULL) {
254 isp = malloc(sizeof (struct intrsource), M_DEVBUF,
255 M_NOWAIT|M_ZERO);
256 if (isp == NULL)
257 return ENOMEM;
258 snprintf(isp->is_evname, sizeof (isp->is_evname),
259 "pin %d", pin);
260
261 ci->ci_isources[slot] = isp;
262 } else {
263 if (isp->is_pic != pic || isp->is_pin != pin) {
264 if (pic == &i8259_pic)
265 return EINVAL;
266 goto other;
267 }
268 }
269 duplicate:
270 if (pic == &i8259_pic)
271 idtvec = ICU_OFFSET + legacy_irq;
272 else {
273 #ifdef IOAPIC_HWMASK
274 if (level > isp->is_maxlevel) {
275 #else
276 if (isp->is_minlevel == 0 || level < isp->is_minlevel) {
277 #endif
278 idtvec = idt_vec_alloc(APIC_LEVEL(level),
279 IDT_INTR_HIGH);
280 if (idtvec == 0)
281 return EBUSY;
282 } else
283 idtvec = isp->is_idtvec;
284 }
285 } else {
286 other:
287 /*
288 * Otherwise, look for a free slot elsewhere. If cip is null, it
289 * means try primary cpu but accept secondary, otherwise we need
290 * a slot on the requested cpu.
291 */
292 if (*cip == NULL)
293 ci = &cpu_info_primary;
294 else
295 ci = *cip;
296
297 error = intr_allocate_slot_cpu(ci, pic, pin, &slot);
298 if (error == 0)
299 goto found;
300 /* Can't alloc on the requested cpu, fail. */
301 if (*cip != NULL)
302 return EBUSY;
303
304 /*
305 * ..now try the others.
306 */
307 CPU_INFO_FOREACH(cii, ci) {
308 if (CPU_IS_PRIMARY(ci))
309 continue;
310 error = intr_allocate_slot_cpu(ci, pic, pin, &slot);
311 if (error == 0)
312 goto found;
313 }
314 return EBUSY;
315 found:
316 if (pic->pic_allocidtvec) {
317 idtvec = pic->pic_allocidtvec(pic, pin,
318 APIC_LEVEL(level), IDT_INTR_HIGH);
319 } else {
320 idtvec = idt_vec_alloc(APIC_LEVEL(level),
321 IDT_INTR_HIGH);
322 }
323 if (idtvec == 0) {
324 free(ci->ci_isources[slot], M_DEVBUF,
325 sizeof (struct intrsource));
326 ci->ci_isources[slot] = NULL;
327 return EBUSY;
328 }
329 }
330 *idt_slot = idtvec;
331 *index = slot;
332 *cip = ci;
333 return 0;
334 }
335
336 /*
337 * True if the system has any non-level interrupts which are shared
338 * on the same pin.
339 */
340 int intr_shared_edge;
341
342 void *
343 intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
344 struct cpu_info *ci, int (*handler)(void *), void *arg, const char *what)
345 {
346 struct intrhand **p, *q, *ih;
347 int slot, error, idt_vec;
348 struct intrsource *source;
349 struct intrstub *stubp;
350 int flags;
351
352 #ifdef DIAGNOSTIC
353 if (legacy_irq != -1 && (legacy_irq < 0 || legacy_irq > 15))
354 panic("intr_establish: bad legacy IRQ value");
355
356 if (legacy_irq == -1 && pic == &i8259_pic)
357 panic("intr_establish: non-legacy IRQ on i8259");
358 #endif
359
360 flags = level & (IPL_MPSAFE | IPL_WAKEUP);
361 level &= ~(IPL_MPSAFE | IPL_WAKEUP);
362
363 KASSERT(level <= IPL_TTY || level >= IPL_CLOCK || flags & IPL_MPSAFE);
364
365 error = intr_allocate_slot(pic, legacy_irq, pin, level, &ci, &slot,
366 &idt_vec);
367 if (error != 0) {
368 printf("failed to allocate interrupt slot for PIC %s pin %d\n",
369 pic->pic_dev.dv_xname, pin);
370 return NULL;
371 }
372
373 /* no point in sleeping unless someone can free memory. */
374 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
375 if (ih == NULL) {
376 printf("intr_establish: can't allocate handler info\n");
377 return NULL;
378 }
379
380 source = ci->ci_isources[slot];
381
382 if (source->is_handlers != NULL &&
383 source->is_pic->pic_type != pic->pic_type) {
384 free(ih, M_DEVBUF, sizeof(*ih));
385 printf("intr_establish: can't share intr source between "
386 "different PIC types (legacy_irq %d pin %d slot %d)\n",
387 legacy_irq, pin, slot);
388 return NULL;
389 }
390
391 source->is_pin = pin;
392 source->is_pic = pic;
393
394 switch (source->is_type) {
395 case IST_NONE:
396 source->is_type = type;
397 break;
398 case IST_EDGE:
399 intr_shared_edge = 1;
400 /* FALLTHROUGH */
401 case IST_LEVEL:
402 if (source->is_type == type)
403 break;
404 case IST_PULSE:
405 if (type != IST_NONE) {
406 printf("intr_establish: pic %s pin %d: can't share "
407 "type %d with %d\n", pic->pic_name, pin,
408 source->is_type, type);
409 free(ih, M_DEVBUF, sizeof(*ih));
410 return NULL;
411 }
412 break;
413 default:
414 panic("intr_establish: bad intr type %d for pic %s pin %d",
415 source->is_type, pic->pic_dev.dv_xname, pin);
416 }
417
418 if (!cold)
419 pic->pic_hwmask(pic, pin);
420
421 /*
422 * Figure out where to put the handler.
423 * This is O(N^2), but we want to preserve the order, and N is
424 * generally small.
425 */
426 for (p = &ci->ci_isources[slot]->is_handlers;
427 (q = *p) != NULL && q->ih_level > level;
428 p = &q->ih_next)
429 ;
430
431 ih->ih_fun = handler;
432 ih->ih_arg = arg;
433 ih->ih_next = *p;
434 ih->ih_level = level;
435 ih->ih_flags = flags;
436 ih->ih_pin = pin;
437 ih->ih_cpu = ci;
438 ih->ih_slot = slot;
439 evcount_attach(&ih->ih_count, what, &source->is_idtvec);
440
441 *p = ih;
442
443 intr_calculatemasks(ci);
444
445 if (ci->ci_isources[slot]->is_resume == NULL ||
446 source->is_idtvec != idt_vec) {
447 if (source->is_idtvec != 0 && source->is_idtvec != idt_vec)
448 idt_vec_free(source->is_idtvec);
449 source->is_idtvec = idt_vec;
450 stubp = type == IST_LEVEL ?
451 &pic->pic_level_stubs[slot] : &pic->pic_edge_stubs[slot];
452 ci->ci_isources[slot]->is_resume = stubp->ist_resume;
453 ci->ci_isources[slot]->is_recurse = stubp->ist_recurse;
454 setgate(&idt[idt_vec], stubp->ist_entry, 0, SDT_SYS386IGT,
455 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
456 }
457
458 pic->pic_addroute(pic, ci, pin, idt_vec, type);
459
460 if (!cold)
461 pic->pic_hwunmask(pic, pin);
462
463 #ifdef INTRDEBUG
464 printf("allocated pic %s type %s pin %d level %d to cpu%u slot %d idt entry %d\n",
465 pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, level,
466 ci->ci_apicid, slot, idt_vec);
467 #endif
468
469 return (ih);
470 }
471
472 /*
473 * Deregister an interrupt handler.
474 */
475 void
476 intr_disestablish(struct intrhand *ih)
477 {
478 struct intrhand **p, *q;
479 struct cpu_info *ci;
480 struct pic *pic;
481 struct intrsource *source;
482 int idtvec;
483
484 ci = ih->ih_cpu;
485 pic = ci->ci_isources[ih->ih_slot]->is_pic;
486 source = ci->ci_isources[ih->ih_slot];
487 idtvec = source->is_idtvec;
488
489 pic->pic_hwmask(pic, ih->ih_pin);
490 x86_atomic_clearbits_u64(&ci->ci_ipending, (1UL << ih->ih_slot));
491
492 /*
493 * Remove the handler from the chain.
494 */
495 for (p = &source->is_handlers; (q = *p) != NULL && q != ih;
496 p = &q->ih_next)
497 ;
498 if (q == NULL) {
499 panic("intr_disestablish: handler not registered");
500 }
501
502 *p = q->ih_next;
503
504 intr_calculatemasks(ci);
505 if (source->is_handlers == NULL)
506 pic->pic_delroute(pic, ci, ih->ih_pin, idtvec, source->is_type);
507 else
508 pic->pic_hwunmask(pic, ih->ih_pin);
509
510 #ifdef INTRDEBUG
511 printf("cpu%u: remove slot %d (pic %s pin %d vec %d)\n",
512 ci->ci_apicid, ih->ih_slot, pic->pic_dev.dv_xname, ih->ih_pin,
513 idtvec);
514 #endif
515
516 if (source->is_handlers == NULL) {
517 free(source, M_DEVBUF, sizeof (struct intrsource));
518 ci->ci_isources[ih->ih_slot] = NULL;
519 if (pic != &i8259_pic)
520 idt_vec_free(idtvec);
521 }
522
523 evcount_detach(&ih->ih_count);
524 free(ih, M_DEVBUF, sizeof(*ih));
525 }
526
527 int
528 intr_handler(struct intrframe *frame, struct intrhand *ih)
529 {
530 struct cpu_info *ci = curcpu();
531 int floor;
532 int rc;
533 #ifdef MULTIPROCESSOR
534 int need_lock;
535 #endif
536
537 /*
538 * We may not be able to mask MSIs, so block non-wakeup
539 * interrupts while we're suspended.
540 */
541 if (intr_suspended && (ih->ih_flags & IPL_WAKEUP) == 0) {
542 intr_nowake = ih;
543 return 0;
544 }
545
546 #ifdef MULTIPROCESSOR
547 if (ih->ih_flags & IPL_MPSAFE)
548 need_lock = 0;
549 else
550 need_lock = 1;
551
552 if (need_lock)
553 __mp_lock(&kernel_lock);
554 #endif
555 floor = ci->ci_handled_intr_level;
556 ci->ci_handled_intr_level = ih->ih_level;
557 rc = (*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : frame);
558 ci->ci_handled_intr_level = floor;
559 #ifdef MULTIPROCESSOR
560 if (need_lock)
561 __mp_unlock(&kernel_lock);
562 #endif
563 return rc;
564 }
565
566 /*
567 * Fake interrupt handler structures for the benefit of symmetry with
568 * other interrupt sources, and the benefit of intr_calculatemasks()
569 */
570 struct intrhand fake_softclock_intrhand;
571 struct intrhand fake_softnet_intrhand;
572 struct intrhand fake_softtty_intrhand;
573 struct intrhand fake_timer_intrhand;
574 struct intrhand fake_ipi_intrhand;
575 #if NXEN > 0
576 struct intrhand fake_xen_intrhand;
577 #endif
578 #if NHYPERV > 0
579 struct intrhand fake_hyperv_intrhand;
580 #endif
581
582 /*
583 * Initialize all handlers that aren't dynamically allocated, and exist
584 * for each CPU.
585 */
586 void
587 cpu_intr_init(struct cpu_info *ci)
588 {
589 struct intrsource *isp;
590 #if NLAPIC > 0 && defined(MULTIPROCESSOR) && 0
591 int i;
592 #endif
593
594 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
595 if (isp == NULL)
596 panic("can't allocate fixed interrupt source");
597 isp->is_recurse = Xsoftclock;
598 isp->is_resume = Xsoftclock;
599 fake_softclock_intrhand.ih_level = IPL_SOFTCLOCK;
600 isp->is_handlers = &fake_softclock_intrhand;
601 isp->is_pic = &softintr_pic;
602 ci->ci_isources[SIR_CLOCK] = isp;
603 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
604 if (isp == NULL)
605 panic("can't allocate fixed interrupt source");
606 isp->is_recurse = Xsoftnet;
607 isp->is_resume = Xsoftnet;
608 fake_softnet_intrhand.ih_level = IPL_SOFTNET;
609 isp->is_handlers = &fake_softnet_intrhand;
610 isp->is_pic = &softintr_pic;
611 ci->ci_isources[SIR_NET] = isp;
612 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
613 if (isp == NULL)
614 panic("can't allocate fixed interrupt source");
615 isp->is_recurse = Xsofttty;
616 isp->is_resume = Xsofttty;
617 fake_softtty_intrhand.ih_level = IPL_SOFTTTY;
618 isp->is_handlers = &fake_softtty_intrhand;
619 isp->is_pic = &softintr_pic;
620 ci->ci_isources[SIR_TTY] = isp;
621 #if NLAPIC > 0
622 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
623 if (isp == NULL)
624 panic("can't allocate fixed interrupt source");
625 isp->is_recurse = Xrecurse_lapic_ltimer;
626 isp->is_resume = Xresume_lapic_ltimer;
627 fake_timer_intrhand.ih_level = IPL_CLOCK;
628 isp->is_handlers = &fake_timer_intrhand;
629 isp->is_pic = &local_pic;
630 ci->ci_isources[LIR_TIMER] = isp;
631 #ifdef MULTIPROCESSOR
632 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
633 if (isp == NULL)
634 panic("can't allocate fixed interrupt source");
635 isp->is_recurse = Xrecurse_lapic_ipi;
636 isp->is_resume = Xresume_lapic_ipi;
637 fake_ipi_intrhand.ih_level = IPL_IPI;
638 isp->is_handlers = &fake_ipi_intrhand;
639 isp->is_pic = &local_pic;
640 ci->ci_isources[LIR_IPI] = isp;
641 #endif
642 #if NXEN > 0
643 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
644 if (isp == NULL)
645 panic("can't allocate fixed interrupt source");
646 isp->is_recurse = Xrecurse_xen_upcall;
647 isp->is_resume = Xresume_xen_upcall;
648 fake_xen_intrhand.ih_level = IPL_NET;
649 isp->is_handlers = &fake_xen_intrhand;
650 isp->is_pic = &local_pic;
651 ci->ci_isources[LIR_XEN] = isp;
652 #endif
653 #if NHYPERV > 0
654 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO);
655 if (isp == NULL)
656 panic("can't allocate fixed interrupt source");
657 isp->is_recurse = Xrecurse_hyperv_upcall;
658 isp->is_resume = Xresume_hyperv_upcall;
659 fake_hyperv_intrhand.ih_level = IPL_NET;
660 isp->is_handlers = &fake_hyperv_intrhand;
661 isp->is_pic = &local_pic;
662 ci->ci_isources[LIR_HYPERV] = isp;
663 #endif
664 #endif /* NLAPIC */
665
666 intr_calculatemasks(ci);
667
668 }
669
670 void
671 intr_printconfig(void)
672 {
673 #ifdef INTRDEBUG
674 int i;
675 struct intrhand *ih;
676 struct intrsource *isp;
677 struct cpu_info *ci;
678 CPU_INFO_ITERATOR cii;
679
680 CPU_INFO_FOREACH(cii, ci) {
681 printf("cpu%d: interrupt masks:\n", ci->ci_apicid);
682 for (i = 0; i < NIPL; i++)
683 printf("IPL %d mask %lx unmask %lx\n", i,
684 (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]);
685 for (i = 0; i < MAX_INTR_SOURCES; i++) {
686 isp = ci->ci_isources[i];
687 if (isp == NULL)
688 continue;
689 printf("cpu%u source %d is pin %d from pic %s maxlevel %d\n",
690 ci->ci_apicid, i, isp->is_pin,
691 isp->is_pic->pic_name, isp->is_maxlevel);
692 for (ih = isp->is_handlers; ih != NULL;
693 ih = ih->ih_next)
694 printf("\thandler %p level %d\n",
695 ih->ih_fun, ih->ih_level);
696
697 }
698 }
699 #endif
700 }
701
702 void
703 intr_barrier(void *cookie)
704 {
705 struct intrhand *ih = cookie;
706 sched_barrier(ih->ih_cpu);
707 }
708
709 #ifdef SUSPEND
710
711 void
712 intr_enable_wakeup(void)
713 {
714 struct cpu_info *ci = curcpu();
715 struct pic *pic;
716 int irq, pin;
717
718 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
719 if (ci->ci_isources[irq] == NULL)
720 continue;
721
722 if (ci->ci_isources[irq]->is_handlers->ih_flags & IPL_WAKEUP)
723 continue;
724
725 pic = ci->ci_isources[irq]->is_pic;
726 pin = ci->ci_isources[irq]->is_pin;
727 if (pic->pic_hwmask)
728 pic->pic_hwmask(pic, pin);
729 }
730
731 intr_suspended = 1;
732 }
733
734 void
735 intr_disable_wakeup(void)
736 {
737 struct cpu_info *ci = curcpu();
738 struct pic *pic;
739 int irq, pin;
740
741 intr_suspended = 0;
742
743 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
744 if (ci->ci_isources[irq] == NULL)
745 continue;
746
747 if (ci->ci_isources[irq]->is_handlers->ih_flags & IPL_WAKEUP)
748 continue;
749
750 pic = ci->ci_isources[irq]->is_pic;
751 pin = ci->ci_isources[irq]->is_pin;
752 if (pic->pic_hwunmask)
753 pic->pic_hwunmask(pic, pin);
754 }
755
756 if (intr_nowake) {
757 printf("last non-wakeup interrupt: irq%d/%s\n",
758 *(int *)intr_nowake->ih_count.ec_data,
759 intr_nowake->ih_count.ec_name);
760 intr_nowake = NULL;
761 }
762 }
763
764 #endif
765
766 /*
767 * Add a mask to cpl, and return the old value of cpl.
768 */
769 int
770 splraise(int nlevel)
771 {
772 int olevel;
773 struct cpu_info *ci = curcpu();
774
775 KASSERT(nlevel >= IPL_NONE);
776
777 olevel = ci->ci_ilevel;
778 ci->ci_ilevel = MAX(ci->ci_ilevel, nlevel);
779 return (olevel);
780 }
781
782 /*
783 * Restore a value to cpl (unmasking interrupts). If any unmasked
784 * interrupts are pending, call Xspllower() to process them.
785 */
786 int
787 spllower(int nlevel)
788 {
789 int olevel;
790 struct cpu_info *ci = curcpu();
791 u_int64_t imask;
792 u_long flags;
793
794 imask = IUNMASK(ci, nlevel);
795 olevel = ci->ci_ilevel;
796
797 flags = intr_disable();
798
799 if (ci->ci_ipending & imask) {
800 Xspllower(nlevel);
801 } else {
802 ci->ci_ilevel = nlevel;
803 intr_restore(flags);
804 }
805 return (olevel);
806 }
807
808 /*
809 * Software interrupt registration
810 *
811 * We hand-code this to ensure that it's atomic.
812 *
813 * XXX always scheduled on the current CPU.
814 */
815 void
816 softintr(int sir)
817 {
818 struct cpu_info *ci = curcpu();
819
820 __asm volatile("lock; orq %1, %0" :
821 "=m"(ci->ci_ipending) : "ir" (1UL << sir));
822 }
823