xref: /netbsd-src/sys/arch/xen/x86/xen_intr.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: xen_intr.c,v 1.16 2019/05/09 17:09:51 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.16 2019/05/09 17:09:51 bouyer Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/kmem.h>
38 
39 #include <sys/cpu.h>
40 
41 #include <xen/evtchn.h>
42 #include <xen/xenfunc.h>
43 
44 #include <uvm/uvm.h>
45 
46 #include <machine/cpu.h>
47 #include <machine/intr.h>
48 
49 #include "acpica.h"
50 #include "ioapic.h"
51 #include "lapic.h"
52 #include "pci.h"
53 
54 #if NACPICA > 0
55 #include <dev/acpi/acpivar.h>
56 #endif
57 
58 #if NIOAPIC > 0 || NACPICA > 0
59 #include <machine/i82093var.h>
60 #endif
61 
62 #if NLAPIC > 0
63 #include <machine/i82489var.h>
64 #endif
65 
66 #if NPCI > 0
67 #include <dev/pci/ppbreg.h>
68 #endif
69 
70 /*
71  * Restore a value to cpl (unmasking interrupts).  If any unmasked
72  * interrupts are pending, call Xspllower() to process them.
73  */
74 void xen_spllower(int nlevel);
75 
76 void
77 xen_spllower(int nlevel)
78 {
79 	struct cpu_info *ci = curcpu();
80 	uint32_t xmask;
81 	u_long psl;
82 
83 	if (ci->ci_ilevel <= nlevel)
84 		return;
85 
86 	__insn_barrier();
87 
88 	xmask = XUNMASK(ci, nlevel);
89 	psl = xen_read_psl();
90 	x86_disable_intr();
91 	if (ci->ci_xpending & xmask) {
92 		KASSERT(psl == 0);
93 		Xspllower(nlevel);
94 		/* Xspllower does enable_intr() */
95 	} else {
96 		ci->ci_ilevel = nlevel;
97 		xen_write_psl(psl);
98 	}
99 }
100 
101 
102 void
103 x86_disable_intr(void)
104 {
105 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
106 	x86_lfence();
107 }
108 
109 void
110 x86_enable_intr(void)
111 {
112 	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
113 	__insn_barrier();
114 	_vci->evtchn_upcall_mask = 0;
115 	x86_lfence(); /* unmask then check (avoid races) */
116 	if (__predict_false(_vci->evtchn_upcall_pending))
117 		hypervisor_force_callback();
118 }
119 
120 u_long
121 xen_read_psl(void)
122 {
123 
124 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
125 }
126 
127 void
128 xen_write_psl(u_long psl)
129 {
130 	struct cpu_info *ci = curcpu();
131 
132 	ci->ci_vcpu->evtchn_upcall_mask = psl;
133 	xen_rmb();
134 	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
135 	    	hypervisor_force_callback();
136 	}
137 }
138 
139 void *
140 xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
141     int type, int level, int (*handler)(void *), void *arg,
142     bool known_mpsafe)
143 {
144 
145 	return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
146 	    handler, arg, known_mpsafe, "XEN");
147 }
148 
149 void *
150 xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
151     int type, int level, int (*handler)(void *), void *arg,
152     bool known_mpsafe, const char *xname)
153 {
154 	const char *intrstr;
155 	char intrstr_buf[INTRIDBUF];
156 
157 	if (pic->pic_type == PIC_XEN) {
158 		struct intrhand *rih;
159 
160 		/*
161 		 * event_set_handler interprets `level != IPL_VM' to
162 		 * mean MP-safe, so we require the caller to match that
163 		 * for the moment.
164 		 */
165 		KASSERT(known_mpsafe == (level != IPL_VM));
166 
167 		intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
168 		    sizeof(intrstr_buf));
169 
170 		event_set_handler(pin, handler, arg, level, intrstr, xname);
171 
172 		rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
173 		if (rih == NULL) {
174 			printf("%s: can't allocate handler info\n", __func__);
175 			return NULL;
176 		}
177 
178 		/*
179 		 * XXX:
180 		 * This is just a copy for API conformance.
181 		 * The real ih is lost in the innards of
182 		 * event_set_handler(); where the details of
183 		 * biglock_wrapper etc are taken care of.
184 		 * All that goes away when we nuke event_set_handler()
185 		 * et. al. and unify with x86/intr.c
186 		 */
187 		rih->ih_pin = pin; /* port */
188 		rih->ih_fun = rih->ih_realfun = handler;
189 		rih->ih_arg = rih->ih_realarg = arg;
190 		rih->pic_type = pic->pic_type;
191 		return rih;
192 	} 	/* Else we assume pintr */
193 
194 #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
195 	struct pintrhand *pih;
196 	int gsi;
197 	int vector, evtchn;
198 
199 	KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
200 	    "bad legacy IRQ value: %d", legacy_irq);
201 	KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
202 	    "non-legacy IRQon i8259 ");
203 
204 	gsi = xen_pic_to_gsi(pic, pin);
205 
206 	intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
207 	    sizeof(intrstr_buf));
208 
209 	vector = xen_vec_alloc(gsi);
210 
211 	if (irq2port[gsi] == 0) {
212 		extern struct cpu_info phycpu_info_primary; /* XXX */
213 		struct cpu_info *ci = &phycpu_info_primary;
214 
215 		pic->pic_addroute(pic, ci, pin, vector, type);
216 
217 		evtchn = bind_pirq_to_evtch(gsi);
218 		KASSERT(evtchn > 0);
219 		KASSERT(evtchn < NR_EVENT_CHANNELS);
220 		irq2port[gsi] = evtchn + 1;
221 		xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
222 	} else {
223 		/*
224 		 * Shared interrupt - we can't rebind.
225 		 * The port is shared instead.
226 		 */
227 		evtchn = irq2port[gsi] - 1;
228 	}
229 
230 	pih = pirq_establish(gsi, evtchn, handler, arg, level,
231 			     intrstr, xname);
232 	pih->pic_type = pic->pic_type;
233 	return pih;
234 #endif /* NPCI > 0 || NISA > 0 */
235 
236 	/* FALLTHROUGH */
237 	return NULL;
238 }
239 
240 /*
241  * Deregister an interrupt handler.
242  */
243 void
244 xen_intr_disestablish(struct intrhand *ih)
245 {
246 
247 	if (ih->pic_type == PIC_XEN) {
248 		event_remove_handler(ih->ih_pin, ih->ih_realfun,
249 		    ih->ih_realarg);
250 		kmem_free(ih, sizeof(*ih));
251 		return;
252 	}
253 #if defined(DOM0OPS)
254 	/*
255 	 * Cache state, to prevent a use after free situation with
256 	 * ih.
257 	 */
258 
259 	struct pintrhand *pih = (struct pintrhand *)ih;
260 
261 	int pirq = pih->pirq;
262 	int port = pih->evtch;
263 	KASSERT(irq2port[pirq] != 0);
264 
265 	pirq_disestablish(pih);
266 
267 	if (evtsource[port] == NULL) {
268 			/*
269 			 * Last handler was removed by
270 			 * event_remove_handler().
271 			 *
272 			 * We can safely unbind the pirq now.
273 			 */
274 
275 			port = unbind_pirq_from_evtch(pirq);
276 			KASSERT(port == pih->evtch);
277 			irq2port[pirq] = 0;
278 	}
279 #endif
280 	return;
281 }
282 
283 /* MI interface for kern_cpu.c */
284 void xen_cpu_intr_redistribute(void);
285 
286 void
287 xen_cpu_intr_redistribute(void)
288 {
289 	KASSERT(mutex_owned(&cpu_lock));
290 	KASSERT(mp_online);
291 
292 	return;
293 }
294 
295 /* MD - called by x86/cpu.c */
296 #if defined(INTRSTACKSIZE)
297 static inline bool
298 redzone_const_or_false(bool x)
299 {
300 #ifdef DIAGNOSTIC
301 	return x;
302 #else
303 	return false;
304 #endif /* !DIAGNOSTIC */
305 }
306 
307 static inline int
308 redzone_const_or_zero(int x)
309 {
310 	return redzone_const_or_false(true) ? x : 0;
311 }
312 #endif
313 
314 void xen_cpu_intr_init(struct cpu_info *);
315 void
316 xen_cpu_intr_init(struct cpu_info *ci)
317 {
318 	int i; /* XXX: duplicate */
319 
320 	ci->ci_xunmask[0] = 0xfffffffe;
321 	for (i = 1; i < NIPL; i++)
322 		ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
323 
324 #if defined(INTRSTACKSIZE)
325 	vaddr_t istack;
326 
327 	/*
328 	 * If the red zone is activated, protect both the top and
329 	 * the bottom of the stack with an unmapped page.
330 	 */
331 	istack = uvm_km_alloc(kernel_map,
332 	    INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
333 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
334 	if (redzone_const_or_false(true)) {
335 		pmap_kremove(istack, PAGE_SIZE);
336 		pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
337 		pmap_update(pmap_kernel());
338 	}
339 
340 	/*
341 	 * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
342 	 * of space for ddb(4) to examine some subroutine arguments
343 	 * and to hunt for the next stack frame.
344 	 */
345 	ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
346 	    INTRSTACKSIZE - 33 * sizeof(register_t);
347 #endif
348 
349 	ci->ci_idepth = -1;
350 }
351 
352 /*
353  * Everything below from here is duplicated from x86/intr.c
354  * When intr.c and xen_intr.c are unified, these will need to be
355  * merged.
356  */
357 
358 u_int xen_cpu_intr_count(struct cpu_info *ci);
359 
360 u_int
361 xen_cpu_intr_count(struct cpu_info *ci)
362 {
363 
364 	KASSERT(ci->ci_nintrhand >= 0);
365 
366 	return ci->ci_nintrhand;
367 }
368 
369 static const char *
370 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
371 {
372 	KASSERT(pic->pic_type == PIC_XEN);
373 
374 	KASSERT(port >= 0);
375 	KASSERT(port < NR_EVENT_CHANNELS);
376 
377 	snprintf(buf, len, "%s channel %d", pic->pic_name, port);
378 
379 	return buf;
380 }
381 
382 static const char *
383 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
384 {
385 	int legacy_irq;
386 
387 	KASSERT(pic->pic_type == PIC_I8259);
388 #if NLAPIC > 0
389 	KASSERT(APIC_IRQ_ISLEGACY(ih));
390 
391 	legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
392 #else
393 	legacy_irq = ih;
394 #endif
395 	KASSERT(legacy_irq >= 0 && legacy_irq < 16);
396 
397 	snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
398 
399 	return buf;
400 }
401 
402 const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
403 
404 const char *
405 xintr_string(intr_handle_t ih, char *buf, size_t len)
406 {
407 #if NIOAPIC > 0
408 	struct ioapic_softc *pic;
409 #endif
410 
411 	if (ih == 0)
412 		panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
413 
414 #if NIOAPIC > 0
415 	if (ih & APIC_INT_VIA_APIC) {
416 		pic = ioapic_find(APIC_IRQ_APIC(ih));
417 		if (pic != NULL) {
418 			snprintf(buf, len, "%s pin %d",
419 			    device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
420 		} else {
421 			snprintf(buf, len,
422 			    "apic %d int %d (irq %d)",
423 			    APIC_IRQ_APIC(ih),
424 			    APIC_IRQ_PIN(ih),
425 			    APIC_IRQ_LEGACY_IRQ(ih));
426 		}
427 	} else
428 		snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
429 
430 #elif NLAPIC > 0
431 	snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
432 #else
433 	snprintf(buf, len, "irq %d", (int) ih);
434 #endif
435 	return buf;
436 
437 }
438 
439 /*
440  * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
441  * by MI code and intrctl(8).
442  */
443 const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
444     int pin, char *buf, size_t len);
445 
446 const char *
447 xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
448 {
449 	int ih = 0;
450 
451 #if NPCI > 0
452 #if defined(__HAVE_PCI_MSI_MSIX)
453 	if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
454 		uint64_t pih;
455 		int dev, vec;
456 
457 		dev = msipic_get_devid(pic);
458 		vec = pin;
459 		pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
460 			| __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
461 			| APIC_INT_VIA_MSI;
462 		if (pic->pic_type == PIC_MSI)
463 			MSI_INT_MAKE_MSI(pih);
464 		else if (pic->pic_type == PIC_MSIX)
465 			MSI_INT_MAKE_MSIX(pih);
466 
467 		return x86_pci_msi_string(NULL, pih, buf, len);
468 	}
469 #endif /* __HAVE_PCI_MSI_MSIX */
470 #endif
471 
472 	if (pic->pic_type == PIC_XEN) {
473 		ih = pin;	/* Port == pin */
474 		return xen_intr_string(pin, buf, len, pic);
475 	}
476 
477 	/*
478 	 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
479 	 * is only used in intr_string() to show the irq number.
480 	 * If the device is "legacy"(such as floppy), it should not use
481 	 * intr_string().
482 	 */
483 	if (pic->pic_type == PIC_I8259) {
484 		ih = legacy_irq;
485 		return legacy_intr_string(ih, buf, len, pic);
486 	}
487 
488 #if NIOAPIC > 0 || NACPICA > 0
489 	ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
490 	    | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
491 	if (pic->pic_type == PIC_IOAPIC) {
492 		ih |= APIC_INT_VIA_APIC;
493 	}
494 	ih |= pin;
495 	return intr_string(ih, buf, len);
496 #endif
497 
498 	return NULL; /* No pic found! */
499 }
500 
501 #if !defined(XENPVHVM)
502 __strong_alias(spllower, xen_spllower);
503 __strong_alias(x86_read_psl, xen_read_psl);
504 __strong_alias(x86_write_psl, xen_write_psl);
505 
506 __strong_alias(intr_string, xintr_string);
507 __strong_alias(intr_create_intrid, xen_intr_create_intrid);
508 __strong_alias(intr_establish, xen_intr_establish);
509 __strong_alias(intr_establish_xname, xen_intr_establish_xname);
510 __strong_alias(intr_disestablish, xen_intr_disestablish);
511 __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
512 __strong_alias(cpu_intr_count, xen_cpu_intr_count);
513 __strong_alias(cpu_intr_init, xen_cpu_intr_init);
514 #endif /* !XENPVHVM */
515