xref: /netbsd-src/sys/arch/xen/x86/xen_intr.c (revision 867d70fc718005c0918b8b8b2f9d7f2d52d0a0db)
1 /*	$NetBSD: xen_intr.c,v 1.30 2022/05/24 14:00:23 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.30 2022/05/24 14:00:23 bouyer Exp $");
34 
35 #include "opt_multiprocessor.h"
36 #include "opt_pci.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 
44 #include <xen/intr.h>
45 #include <xen/evtchn.h>
46 #include <xen/xenfunc.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <machine/cpu.h>
51 #include <machine/intr.h>
52 
53 #include "acpica.h"
54 #include "ioapic.h"
55 #include "lapic.h"
56 #include "pci.h"
57 
58 #if NACPICA > 0
59 #include <dev/acpi/acpivar.h>
60 #endif
61 
62 #if NIOAPIC > 0 || NACPICA > 0
63 #include <machine/i82093var.h>
64 #endif
65 
66 #if NLAPIC > 0
67 #include <machine/i82489var.h>
68 #endif
69 
70 #if NPCI > 0
71 #include <dev/pci/ppbreg.h>
72 #ifdef __HAVE_PCI_MSI_MSIX
73 #include <x86/pci/msipic.h>
74 #include <x86/pci/pci_msi_machdep.h>
75 #endif
76 #endif
77 
78 #if defined(MULTIPROCESSOR)
79 static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
80 #endif
81 
82 #if !defined(XENPVHVM)
83 void
84 x86_disable_intr(void)
85 {
86 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
87 	x86_lfence();
88 }
89 
90 void
91 x86_enable_intr(void)
92 {
93 	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
94 	__insn_barrier();
95 	_vci->evtchn_upcall_mask = 0;
96 	x86_lfence(); /* unmask then check (avoid races) */
97 	if (__predict_false(_vci->evtchn_upcall_pending))
98 		hypervisor_force_callback();
99 }
100 
101 #endif /* !XENPVHVM */
102 
103 u_long
104 xen_read_psl(void)
105 {
106 
107 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
108 }
109 
110 void
111 xen_write_psl(u_long psl)
112 {
113 	struct cpu_info *ci = curcpu();
114 
115 	ci->ci_vcpu->evtchn_upcall_mask = psl;
116 	xen_rmb();
117 	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
118 	    	hypervisor_force_callback();
119 	}
120 }
121 
122 void *
123 xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
124     int type, int level, int (*handler)(void *), void *arg,
125     bool known_mpsafe)
126 {
127 
128 	return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
129 	    handler, arg, known_mpsafe, "XEN");
130 }
131 
132 void *
133 xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
134     int type, int level, int (*handler)(void *), void *arg,
135     bool known_mpsafe, const char *xname)
136 {
137 	const char *intrstr;
138 	char intrstr_buf[INTRIDBUF];
139 
140 	if (pic->pic_type == PIC_XEN) {
141 		struct intrhand *rih;
142 
143 		intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
144 		    sizeof(intrstr_buf));
145 
146 		rih = event_set_handler(pin, handler, arg, level,
147 		    intrstr, xname, known_mpsafe, NULL);
148 
149 		if (rih == NULL) {
150 			printf("%s: can't establish interrupt\n", __func__);
151 			return NULL;
152 		}
153 
154 		return rih;
155 	} 	/* Else we assume pintr */
156 
157 #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
158 	struct pintrhand *pih;
159 	int gsi;
160 	int evtchn;
161 	/* the hack below is from x86's intr_establish_xname() */
162 	bool mpsafe = (known_mpsafe || level != IPL_VM);
163 
164 	KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
165 	    "bad legacy IRQ value: %d", legacy_irq);
166 	KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
167 	    "non-legacy IRQon i8259 ");
168 
169 	gsi = xen_pic_to_gsi(pic, pin);
170 	if (gsi < 0)
171 		return NULL;
172 	KASSERTMSG(gsi < NR_EVENT_CHANNELS, "gsi %d >= NR_EVENT_CHANNELS %u",
173 	    gsi, (int)NR_EVENT_CHANNELS);
174 
175 	intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
176 	    sizeof(intrstr_buf));
177 
178 	if (irq2port[gsi] == 0) {
179 		extern struct cpu_info phycpu_info_primary; /* XXX */
180 		struct cpu_info *ci = &phycpu_info_primary;
181 
182 		pic->pic_addroute(pic, ci, pin, gsi, type);
183 
184 		evtchn = bind_pirq_to_evtch(gsi);
185 		KASSERT(evtchn > 0);
186 		KASSERT(evtchn < NR_EVENT_CHANNELS);
187 		irq2port[gsi] = evtchn + 1;
188 		xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
189 	} else {
190 		/*
191 		 * Shared interrupt - we can't rebind.
192 		 * The port is shared instead.
193 		 */
194 		evtchn = irq2port[gsi] - 1;
195 	}
196 
197 	pih = pirq_establish(gsi, evtchn, handler, arg, level,
198 			     intrstr, xname, mpsafe);
199 	pih->pic = pic;
200 	if (msipic_is_msi_pic(pic))
201 		pic->pic_hwunmask(pic, pin);
202 	return pih;
203 #endif /* NPCI > 0 || NISA > 0 */
204 
205 	/* FALLTHROUGH */
206 	return NULL;
207 }
208 
209 /*
210  * Mask an interrupt source.
211  */
212 void
213 xen_intr_mask(struct intrhand *ih)
214 {
215 	/* XXX */
216 	panic("xen_intr_mask: not yet implemented.");
217 }
218 
219 /*
220  * Unmask an interrupt source.
221  */
222 void
223 xen_intr_unmask(struct intrhand *ih)
224 {
225 	/* XXX */
226 	panic("xen_intr_unmask: not yet implemented.");
227 }
228 
229 /*
230  * Deregister an interrupt handler.
231  */
232 void
233 xen_intr_disestablish(struct intrhand *ih)
234 {
235 
236 	if (ih->ih_pic->pic_type == PIC_XEN) {
237 		event_remove_handler(ih->ih_pin, ih->ih_realfun,
238 		    ih->ih_realarg);
239 		/* event_remove_handler frees ih */
240 		return;
241 	}
242 #if defined(DOM0OPS) && defined(XENPV)
243 	/*
244 	 * Cache state, to prevent a use after free situation with
245 	 * ih.
246 	 */
247 
248 	struct pintrhand *pih = (struct pintrhand *)ih;
249 
250 	int pirq = pih->pirq;
251 	int port = pih->evtch;
252 	KASSERT(irq2port[pirq] != 0);
253 
254 	pirq_disestablish(pih);
255 
256 	if (evtsource[port] == NULL) {
257 			/*
258 			 * Last handler was removed by
259 			 * event_remove_handler().
260 			 *
261 			 * We can safely unbind the pirq now.
262 			 */
263 
264 			port = unbind_pirq_from_evtch(pirq);
265 			KASSERT(port == pih->evtch);
266 			irq2port[pirq] = 0;
267 	}
268 #endif
269 	return;
270 }
271 
272 /* MI interface for kern_cpu.c */
273 void xen_cpu_intr_redistribute(void);
274 
275 void
276 xen_cpu_intr_redistribute(void)
277 {
278 	KASSERT(mutex_owned(&cpu_lock));
279 	KASSERT(mp_online);
280 
281 	return;
282 }
283 
284 /* MD - called by x86/cpu.c */
285 #if defined(INTRSTACKSIZE)
286 static inline bool
287 redzone_const_or_false(bool x)
288 {
289 #ifdef DIAGNOSTIC
290 	return x;
291 #else
292 	return false;
293 #endif /* !DIAGNOSTIC */
294 }
295 
296 static inline int
297 redzone_const_or_zero(int x)
298 {
299 	return redzone_const_or_false(true) ? x : 0;
300 }
301 #endif
302 
303 void xen_cpu_intr_init(struct cpu_info *);
304 void
305 xen_cpu_intr_init(struct cpu_info *ci)
306 {
307 #if defined(__HAVE_PREEMPTION)
308 	x86_init_preempt(ci);
309 #endif
310 	x86_intr_calculatemasks(ci);
311 
312 #if defined(INTRSTACKSIZE)
313 	vaddr_t istack;
314 
315 	/*
316 	 * If the red zone is activated, protect both the top and
317 	 * the bottom of the stack with an unmapped page.
318 	 */
319 	istack = uvm_km_alloc(kernel_map,
320 	    INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
321 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
322 	if (redzone_const_or_false(true)) {
323 		pmap_kremove(istack, PAGE_SIZE);
324 		pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
325 		pmap_update(pmap_kernel());
326 	}
327 
328 	/*
329 	 * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
330 	 * of space for ddb(4) to examine some subroutine arguments
331 	 * and to hunt for the next stack frame.
332 	 */
333 	ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
334 	    INTRSTACKSIZE - 33 * sizeof(register_t);
335 #endif
336 
337 #ifdef MULTIPROCESSOR
338 	for (int i = 0; i < XEN_NIPIS; i++)
339 		evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
340 		    NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
341 #endif
342 
343 	ci->ci_idepth = -1;
344 }
345 
346 /*
347  * Everything below from here is duplicated from x86/intr.c
348  * When intr.c and xen_intr.c are unified, these will need to be
349  * merged.
350  */
351 
352 u_int xen_cpu_intr_count(struct cpu_info *ci);
353 
354 u_int
355 xen_cpu_intr_count(struct cpu_info *ci)
356 {
357 
358 	KASSERT(ci->ci_nintrhand >= 0);
359 
360 	return ci->ci_nintrhand;
361 }
362 
363 static const char *
364 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
365 {
366 	KASSERT(pic->pic_type == PIC_XEN);
367 
368 	KASSERT(port >= 0);
369 	KASSERT(port < NR_EVENT_CHANNELS);
370 
371 	snprintf(buf, len, "%s chan %d", pic->pic_name, port);
372 
373 	return buf;
374 }
375 
376 static const char *
377 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
378 {
379 	int legacy_irq;
380 
381 	KASSERT(pic->pic_type == PIC_I8259);
382 #if NLAPIC > 0
383 	KASSERT(APIC_IRQ_ISLEGACY(ih));
384 
385 	legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
386 #else
387 	legacy_irq = ih;
388 #endif
389 	KASSERT(legacy_irq >= 0 && legacy_irq < 16);
390 
391 	snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
392 
393 	return buf;
394 }
395 
396 const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
397 
398 const char *
399 xintr_string(intr_handle_t ih, char *buf, size_t len)
400 {
401 #if NIOAPIC > 0
402 	struct ioapic_softc *pic;
403 #endif
404 
405 	if (ih == 0)
406 		panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
407 
408 #if NIOAPIC > 0
409 	if (ih & APIC_INT_VIA_APIC) {
410 		pic = ioapic_find(APIC_IRQ_APIC(ih));
411 		if (pic != NULL) {
412 			snprintf(buf, len, "%s pin %d",
413 			    device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
414 		} else {
415 			snprintf(buf, len,
416 			    "apic %d int %d (irq %d)",
417 			    APIC_IRQ_APIC(ih),
418 			    APIC_IRQ_PIN(ih),
419 			    APIC_IRQ_LEGACY_IRQ(ih));
420 		}
421 	} else
422 		snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
423 
424 #elif NLAPIC > 0
425 	snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
426 #else
427 	snprintf(buf, len, "irq %d", (int) ih);
428 #endif
429 	return buf;
430 
431 }
432 
433 /*
434  * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
435  * by MI code and intrctl(8).
436  */
437 const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
438     int pin, char *buf, size_t len);
439 
440 const char *
441 xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
442 {
443 	int ih = 0;
444 
445 #if NPCI > 0 && defined(XENPV)
446 #if defined(__HAVE_PCI_MSI_MSIX)
447 	if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
448 		uint64_t pih;
449 		int dev, vec;
450 
451 		dev = msipic_get_devid(pic);
452 		vec = pin;
453 		pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
454 			| __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
455 			| APIC_INT_VIA_MSI;
456 		if (pic->pic_type == PIC_MSI)
457 			MSI_INT_MAKE_MSI(pih);
458 		else if (pic->pic_type == PIC_MSIX)
459 			MSI_INT_MAKE_MSIX(pih);
460 
461 		return x86_pci_msi_string(NULL, pih, buf, len);
462 	}
463 #endif /* __HAVE_PCI_MSI_MSIX */
464 #endif
465 
466 	if (pic->pic_type == PIC_XEN) {
467 		ih = pin;	/* Port == pin */
468 		return xen_intr_string(pin, buf, len, pic);
469 	}
470 
471 	/*
472 	 * If the device is pci, "legacy_irq" is always -1. Least 8 bit of "ih"
473 	 * is only used in intr_string() to show the irq number.
474 	 * If the device is "legacy"(such as floppy), it should not use
475 	 * intr_string().
476 	 */
477 	if (pic->pic_type == PIC_I8259) {
478 		ih = legacy_irq;
479 		return legacy_intr_string(ih, buf, len, pic);
480 	}
481 
482 #if NIOAPIC > 0 || NACPICA > 0
483 	ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
484 	    | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
485 	if (pic->pic_type == PIC_IOAPIC) {
486 		ih |= APIC_INT_VIA_APIC;
487 	}
488 	ih |= pin;
489 	return intr_string(ih, buf, len);
490 #endif
491 
492 	return NULL; /* No pic found! */
493 }
494 
495 static struct intrsource xen_dummy_intrsource;
496 
497 struct intrsource *
498 xen_intr_allocate_io_intrsource(const char *intrid)
499 {
500 	/* Nothing to do, required by MSI code */
501 	return &xen_dummy_intrsource;
502 }
503 
504 void
505 xen_intr_free_io_intrsource(const char *intrid)
506 {
507 	/* Nothing to do, required by MSI code */
508 }
509 
510 #if defined(XENPV)
511 __strong_alias(x86_read_psl, xen_read_psl);
512 __strong_alias(x86_write_psl, xen_write_psl);
513 
514 __strong_alias(intr_string, xintr_string);
515 __strong_alias(intr_create_intrid, xen_intr_create_intrid);
516 __strong_alias(intr_establish, xen_intr_establish);
517 __strong_alias(intr_establish_xname, xen_intr_establish_xname);
518 __strong_alias(intr_mask, xen_intr_mask);
519 __strong_alias(intr_unmask, xen_intr_unmask);
520 __strong_alias(intr_disestablish, xen_intr_disestablish);
521 __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
522 __strong_alias(cpu_intr_count, xen_cpu_intr_count);
523 __strong_alias(cpu_intr_init, xen_cpu_intr_init);
524 __strong_alias(intr_allocate_io_intrsource, xen_intr_allocate_io_intrsource);
525 __strong_alias(intr_free_io_intrsource, xen_intr_free_io_intrsource);
526 #endif /* XENPV */
527