xref: /netbsd-src/sys/arch/xen/x86/xen_intr.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: xen_intr.c,v 1.27 2020/05/07 19:48:58 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.27 2020/05/07 19:48:58 bouyer Exp $");
34 
35 #include "opt_multiprocessor.h"
36 
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/kmem.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 
43 #include <xen/intr.h>
44 #include <xen/evtchn.h>
45 #include <xen/xenfunc.h>
46 
47 #include <uvm/uvm.h>
48 
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51 
52 #include "acpica.h"
53 #include "ioapic.h"
54 #include "lapic.h"
55 #include "pci.h"
56 
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #endif
60 
61 #if NIOAPIC > 0 || NACPICA > 0
62 #include <machine/i82093var.h>
63 #endif
64 
65 #if NLAPIC > 0
66 #include <machine/i82489var.h>
67 #endif
68 
69 #if NPCI > 0
70 #include <dev/pci/ppbreg.h>
71 #ifdef __HAVE_PCI_MSI_MSIX
72 #include <x86/pci/msipic.h>
73 #include <x86/pci/pci_msi_machdep.h>
74 #endif
75 #endif
76 
77 #if defined(MULTIPROCESSOR)
78 static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
79 #endif
80 
81 #if !defined(XENPVHVM)
82 void
83 x86_disable_intr(void)
84 {
85 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
86 	x86_lfence();
87 }
88 
89 void
90 x86_enable_intr(void)
91 {
92 	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
93 	__insn_barrier();
94 	_vci->evtchn_upcall_mask = 0;
95 	x86_lfence(); /* unmask then check (avoid races) */
96 	if (__predict_false(_vci->evtchn_upcall_pending))
97 		hypervisor_force_callback();
98 }
99 
100 #endif /* !XENPVHVM */
101 
102 u_long
103 xen_read_psl(void)
104 {
105 
106 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
107 }
108 
109 void
110 xen_write_psl(u_long psl)
111 {
112 	struct cpu_info *ci = curcpu();
113 
114 	ci->ci_vcpu->evtchn_upcall_mask = psl;
115 	xen_rmb();
116 	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
117 	    	hypervisor_force_callback();
118 	}
119 }
120 
121 void *
122 xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
123     int type, int level, int (*handler)(void *), void *arg,
124     bool known_mpsafe)
125 {
126 
127 	return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
128 	    handler, arg, known_mpsafe, "XEN");
129 }
130 
131 void *
132 xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
133     int type, int level, int (*handler)(void *), void *arg,
134     bool known_mpsafe, const char *xname)
135 {
136 	const char *intrstr;
137 	char intrstr_buf[INTRIDBUF];
138 
139 	if (pic->pic_type == PIC_XEN) {
140 		struct intrhand *rih;
141 
142 		intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
143 		    sizeof(intrstr_buf));
144 
145 		rih = event_set_handler(pin, handler, arg, level,
146 		    intrstr, xname, known_mpsafe, NULL);
147 
148 		if (rih == NULL) {
149 			printf("%s: can't establish interrupt\n", __func__);
150 			return NULL;
151 		}
152 
153 		return rih;
154 	} 	/* Else we assume pintr */
155 
156 #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
157 	struct pintrhand *pih;
158 	int gsi;
159 	int evtchn;
160 	/* the hack below is from x86's intr_establish_xname() */
161 	bool mpsafe = (known_mpsafe || level != IPL_VM);
162 
163 	KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
164 	    "bad legacy IRQ value: %d", legacy_irq);
165 	KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
166 	    "non-legacy IRQon i8259 ");
167 
168 	gsi = xen_pic_to_gsi(pic, pin);
169 	KASSERTMSG(gsi < NR_EVENT_CHANNELS, "gsi %d >= NR_EVENT_CHANNELS %u",
170 	    gsi, (int)NR_EVENT_CHANNELS);
171 
172 	intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
173 	    sizeof(intrstr_buf));
174 
175 	if (irq2port[gsi] == 0) {
176 		extern struct cpu_info phycpu_info_primary; /* XXX */
177 		struct cpu_info *ci = &phycpu_info_primary;
178 
179 		pic->pic_addroute(pic, ci, pin, gsi, type);
180 
181 		evtchn = bind_pirq_to_evtch(gsi);
182 		KASSERT(evtchn > 0);
183 		KASSERT(evtchn < NR_EVENT_CHANNELS);
184 		irq2port[gsi] = evtchn + 1;
185 		xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
186 	} else {
187 		/*
188 		 * Shared interrupt - we can't rebind.
189 		 * The port is shared instead.
190 		 */
191 		evtchn = irq2port[gsi] - 1;
192 	}
193 
194 	pih = pirq_establish(gsi, evtchn, handler, arg, level,
195 			     intrstr, xname, mpsafe);
196 	pih->pic = pic;
197 	return pih;
198 #endif /* NPCI > 0 || NISA > 0 */
199 
200 	/* FALLTHROUGH */
201 	return NULL;
202 }
203 
204 /*
205  * Mask an interrupt source.
206  */
207 void
208 xen_intr_mask(struct intrhand *ih)
209 {
210 	/* XXX */
211 	panic("xen_intr_mask: not yet implemented.");
212 }
213 
214 /*
215  * Unmask an interrupt source.
216  */
217 void
218 xen_intr_unmask(struct intrhand *ih)
219 {
220 	/* XXX */
221 	panic("xen_intr_unmask: not yet implemented.");
222 }
223 
224 /*
225  * Deregister an interrupt handler.
226  */
227 void
228 xen_intr_disestablish(struct intrhand *ih)
229 {
230 
231 	if (ih->ih_pic->pic_type == PIC_XEN) {
232 		event_remove_handler(ih->ih_pin, ih->ih_realfun,
233 		    ih->ih_realarg);
234 		/* event_remove_handler frees ih */
235 		return;
236 	}
237 #if defined(DOM0OPS) && defined(XENPV)
238 	/*
239 	 * Cache state, to prevent a use after free situation with
240 	 * ih.
241 	 */
242 
243 	struct pintrhand *pih = (struct pintrhand *)ih;
244 
245 	int pirq = pih->pirq;
246 	int port = pih->evtch;
247 	KASSERT(irq2port[pirq] != 0);
248 
249 	pirq_disestablish(pih);
250 
251 	if (evtsource[port] == NULL) {
252 			/*
253 			 * Last handler was removed by
254 			 * event_remove_handler().
255 			 *
256 			 * We can safely unbind the pirq now.
257 			 */
258 
259 			port = unbind_pirq_from_evtch(pirq);
260 			KASSERT(port == pih->evtch);
261 			irq2port[pirq] = 0;
262 	}
263 #endif
264 	return;
265 }
266 
267 /* MI interface for kern_cpu.c */
268 void xen_cpu_intr_redistribute(void);
269 
270 void
271 xen_cpu_intr_redistribute(void)
272 {
273 	KASSERT(mutex_owned(&cpu_lock));
274 	KASSERT(mp_online);
275 
276 	return;
277 }
278 
279 /* MD - called by x86/cpu.c */
280 #if defined(INTRSTACKSIZE)
281 static inline bool
282 redzone_const_or_false(bool x)
283 {
284 #ifdef DIAGNOSTIC
285 	return x;
286 #else
287 	return false;
288 #endif /* !DIAGNOSTIC */
289 }
290 
291 static inline int
292 redzone_const_or_zero(int x)
293 {
294 	return redzone_const_or_false(true) ? x : 0;
295 }
296 #endif
297 
298 void xen_cpu_intr_init(struct cpu_info *);
299 void
300 xen_cpu_intr_init(struct cpu_info *ci)
301 {
302 #if defined(__HAVE_PREEMPTION)
303 	x86_init_preempt(ci);
304 #endif
305 	x86_intr_calculatemasks(ci);
306 
307 #if defined(INTRSTACKSIZE)
308 	vaddr_t istack;
309 
310 	/*
311 	 * If the red zone is activated, protect both the top and
312 	 * the bottom of the stack with an unmapped page.
313 	 */
314 	istack = uvm_km_alloc(kernel_map,
315 	    INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
316 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
317 	if (redzone_const_or_false(true)) {
318 		pmap_kremove(istack, PAGE_SIZE);
319 		pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
320 		pmap_update(pmap_kernel());
321 	}
322 
323 	/*
324 	 * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
325 	 * of space for ddb(4) to examine some subroutine arguments
326 	 * and to hunt for the next stack frame.
327 	 */
328 	ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
329 	    INTRSTACKSIZE - 33 * sizeof(register_t);
330 #endif
331 
332 #ifdef MULTIPROCESSOR
333 	for (int i = 0; i < XEN_NIPIS; i++)
334 		evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
335 		    NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
336 #endif
337 
338 	ci->ci_idepth = -1;
339 }
340 
341 /*
342  * Everything below from here is duplicated from x86/intr.c
343  * When intr.c and xen_intr.c are unified, these will need to be
344  * merged.
345  */
346 
347 u_int xen_cpu_intr_count(struct cpu_info *ci);
348 
349 u_int
350 xen_cpu_intr_count(struct cpu_info *ci)
351 {
352 
353 	KASSERT(ci->ci_nintrhand >= 0);
354 
355 	return ci->ci_nintrhand;
356 }
357 
358 static const char *
359 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
360 {
361 	KASSERT(pic->pic_type == PIC_XEN);
362 
363 	KASSERT(port >= 0);
364 	KASSERT(port < NR_EVENT_CHANNELS);
365 
366 	snprintf(buf, len, "%s chan %d", pic->pic_name, port);
367 
368 	return buf;
369 }
370 
371 static const char *
372 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
373 {
374 	int legacy_irq;
375 
376 	KASSERT(pic->pic_type == PIC_I8259);
377 #if NLAPIC > 0
378 	KASSERT(APIC_IRQ_ISLEGACY(ih));
379 
380 	legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
381 #else
382 	legacy_irq = ih;
383 #endif
384 	KASSERT(legacy_irq >= 0 && legacy_irq < 16);
385 
386 	snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
387 
388 	return buf;
389 }
390 
391 const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
392 
393 const char *
394 xintr_string(intr_handle_t ih, char *buf, size_t len)
395 {
396 #if NIOAPIC > 0
397 	struct ioapic_softc *pic;
398 #endif
399 
400 	if (ih == 0)
401 		panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
402 
403 #if NIOAPIC > 0
404 	if (ih & APIC_INT_VIA_APIC) {
405 		pic = ioapic_find(APIC_IRQ_APIC(ih));
406 		if (pic != NULL) {
407 			snprintf(buf, len, "%s pin %d",
408 			    device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
409 		} else {
410 			snprintf(buf, len,
411 			    "apic %d int %d (irq %d)",
412 			    APIC_IRQ_APIC(ih),
413 			    APIC_IRQ_PIN(ih),
414 			    APIC_IRQ_LEGACY_IRQ(ih));
415 		}
416 	} else
417 		snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
418 
419 #elif NLAPIC > 0
420 	snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
421 #else
422 	snprintf(buf, len, "irq %d", (int) ih);
423 #endif
424 	return buf;
425 
426 }
427 
428 /*
429  * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
430  * by MI code and intrctl(8).
431  */
432 const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
433     int pin, char *buf, size_t len);
434 
435 const char *
436 xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
437 {
438 	int ih = 0;
439 
440 #if NPCI > 0 && defined(XENPV)
441 #if defined(__HAVE_PCI_MSI_MSIX)
442 	if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
443 		uint64_t pih;
444 		int dev, vec;
445 
446 		dev = msipic_get_devid(pic);
447 		vec = pin;
448 		pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
449 			| __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
450 			| APIC_INT_VIA_MSI;
451 		if (pic->pic_type == PIC_MSI)
452 			MSI_INT_MAKE_MSI(pih);
453 		else if (pic->pic_type == PIC_MSIX)
454 			MSI_INT_MAKE_MSIX(pih);
455 
456 		return x86_pci_msi_string(NULL, pih, buf, len);
457 	}
458 #endif /* __HAVE_PCI_MSI_MSIX */
459 #endif
460 
461 	if (pic->pic_type == PIC_XEN) {
462 		ih = pin;	/* Port == pin */
463 		return xen_intr_string(pin, buf, len, pic);
464 	}
465 
466 	/*
467 	 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
468 	 * is only used in intr_string() to show the irq number.
469 	 * If the device is "legacy"(such as floppy), it should not use
470 	 * intr_string().
471 	 */
472 	if (pic->pic_type == PIC_I8259) {
473 		ih = legacy_irq;
474 		return legacy_intr_string(ih, buf, len, pic);
475 	}
476 
477 #if NIOAPIC > 0 || NACPICA > 0
478 	ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
479 	    | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
480 	if (pic->pic_type == PIC_IOAPIC) {
481 		ih |= APIC_INT_VIA_APIC;
482 	}
483 	ih |= pin;
484 	return intr_string(ih, buf, len);
485 #endif
486 
487 	return NULL; /* No pic found! */
488 }
489 
490 static struct intrsource xen_dummy_intrsource;
491 
492 struct intrsource *
493 xen_intr_allocate_io_intrsource(const char *intrid)
494 {
495 	/* Nothing to do, required by MSI code */
496 	return &xen_dummy_intrsource;
497 }
498 
499 void
500 xen_intr_free_io_intrsource(const char *intrid)
501 {
502 	/* Nothing to do, required by MSI code */
503 }
504 
505 #if defined(XENPV)
506 __strong_alias(x86_read_psl, xen_read_psl);
507 __strong_alias(x86_write_psl, xen_write_psl);
508 
509 __strong_alias(intr_string, xintr_string);
510 __strong_alias(intr_create_intrid, xen_intr_create_intrid);
511 __strong_alias(intr_establish, xen_intr_establish);
512 __strong_alias(intr_establish_xname, xen_intr_establish_xname);
513 __strong_alias(intr_mask, xen_intr_mask);
514 __strong_alias(intr_unmask, xen_intr_unmask);
515 __strong_alias(intr_disestablish, xen_intr_disestablish);
516 __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
517 __strong_alias(cpu_intr_count, xen_cpu_intr_count);
518 __strong_alias(cpu_intr_init, xen_cpu_intr_init);
519 __strong_alias(intr_allocate_io_intrsource, xen_intr_allocate_io_intrsource);
520 __strong_alias(intr_free_io_intrsource, xen_intr_free_io_intrsource);
521 #endif /* XENPV */
522