xref: /netbsd-src/sys/arch/xen/x86/xen_intr.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: xen_intr.c,v 1.2 2007/11/22 16:17:05 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.2 2007/11/22 16:17:05 bouyer Exp $");
41 
42 #include <sys/param.h>
43 
44 #include <machine/cpu.h>
45 #include <machine/intr.h>
46 
47 /*
48  * Add a mask to cpl, and return the old value of cpl.
49  */
50 int
51 splraise(int nlevel)
52 {
53 	int olevel;
54 	struct cpu_info *ci = curcpu();
55 
56 	olevel = ci->ci_ilevel;
57 	if (nlevel > olevel)
58 		ci->ci_ilevel = nlevel;
59 	__insn_barrier();
60 	return (olevel);
61 }
62 
63 /*
64  * Restore a value to cpl (unmasking interrupts).  If any unmasked
65  * interrupts are pending, call Xspllower() to process them.
66  */
67 void
68 spllower(int nlevel)
69 {
70 	struct cpu_info *ci = curcpu();
71 	u_int32_t imask;
72 	u_long psl;
73 
74 	__insn_barrier();
75 
76 	imask = IUNMASK(ci, nlevel);
77 	psl = x86_read_psl();
78 	x86_disable_intr();
79 	if (ci->ci_ipending & imask) {
80 		Xspllower(nlevel);
81 		/* Xspllower does enable_intr() */
82 	} else {
83 		ci->ci_ilevel = nlevel;
84 		x86_write_psl(psl);
85 	}
86 }
87 
88 #ifndef __x86_64__
89 
90 /*
91  * Software interrupt registration
92  *
93  * We hand-code this to ensure that it's atomic.
94  *
95  * XXX always scheduled on the current CPU.
96  */
97 void
98 softintr(int sir)
99 {
100 	struct cpu_info *ci = curcpu();
101 
102 	__asm volatile("orl %1, %0" : "=m"(ci->ci_ipending) : "ir" (1 << sir));
103 }
104 #endif
105 
106 void
107 x86_disable_intr(void)
108 {
109 	__cli();
110 }
111 
112 void
113 x86_enable_intr(void)
114 {
115 	__sti();
116 }
117 
118 u_long
119 x86_read_psl(void)
120 {
121 
122 	return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
123 }
124 
125 void
126 x86_write_psl(u_long psl)
127 {
128 
129 	HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
130 	x86_lfence();
131 	if (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_pending &&
132 	    psl == 0) {
133 	    	hypervisor_force_callback();
134 	}
135 }
136