xref: /netbsd-src/sys/arch/xen/x86/xen_intr.c (revision cd22f25e6f6d1cc1f197fe8c5468a80f51d1c4e1)
1 /*	$NetBSD: xen_intr.c,v 1.5 2008/04/28 20:23:40 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.5 2008/04/28 20:23:40 martin Exp $");
34 
35 #include <sys/param.h>
36 
37 #include <machine/cpu.h>
38 #include <machine/intr.h>
39 
40 /*
41  * Add a mask to cpl, and return the old value of cpl.
42  */
43 int
44 splraise(int nlevel)
45 {
46 	int olevel;
47 	struct cpu_info *ci = curcpu();
48 
49 	olevel = ci->ci_ilevel;
50 	if (nlevel > olevel)
51 		ci->ci_ilevel = nlevel;
52 	__insn_barrier();
53 	return (olevel);
54 }
55 
56 /*
57  * Restore a value to cpl (unmasking interrupts).  If any unmasked
58  * interrupts are pending, call Xspllower() to process them.
59  */
60 void
61 spllower(int nlevel)
62 {
63 	struct cpu_info *ci = curcpu();
64 	uint32_t imask;
65 	u_long psl;
66 
67 	__insn_barrier();
68 
69 	imask = IUNMASK(ci, nlevel);
70 	psl = x86_read_psl();
71 	x86_disable_intr();
72 	if (ci->ci_ipending & imask) {
73 		Xspllower(nlevel);
74 		/* Xspllower does enable_intr() */
75 	} else {
76 		ci->ci_ilevel = nlevel;
77 		x86_write_psl(psl);
78 	}
79 }
80 
81 #ifndef __x86_64__
82 
83 /*
84  * Software interrupt registration
85  *
86  * We hand-code this to ensure that it's atomic.
87  *
88  * XXX always scheduled on the current CPU.
89  */
90 void
91 softintr(int sir)
92 {
93 	struct cpu_info *ci = curcpu();
94 
95 	__asm volatile("orl %1, %0" : "=m"(ci->ci_ipending) : "ir" (1 << sir));
96 }
97 #endif
98 
99 void
100 x86_disable_intr(void)
101 {
102 	__cli();
103 }
104 
105 void
106 x86_enable_intr(void)
107 {
108 	__sti();
109 }
110 
111 u_long
112 x86_read_psl(void)
113 {
114 
115 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
116 }
117 
118 void
119 x86_write_psl(u_long psl)
120 {
121 	struct cpu_info *ci = curcpu();
122 
123 	ci->ci_vcpu->evtchn_upcall_mask = psl;
124 	x86_lfence();
125 	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
126 	    	hypervisor_force_callback();
127 	}
128 }
129