xref: /netbsd-src/sys/arch/xen/include/xen.h (revision 4b896b232495b7a9b8b94a1cf1e21873296d53b8)
1 /*	$NetBSD: xen.h,v 1.7 2004/05/07 15:51:04 cl Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6  * All rights reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to
10  * deal in the Software without restriction, including without limitation the
11  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12  * sell copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  */
26 
27 
28 #ifndef _XEN_H
29 #define _XEN_H
30 
31 #ifndef _LOCORE
32 
33 struct xen_netinfo {
34 	uint32_t xi_ifno;
35 	char *xi_root;
36 	uint32_t xi_ip[5];
37 };
38 
39 union xen_cmdline_parseinfo {
40 	char			xcp_bootdev[16]; /* sizeof(dv_xname) */
41 	struct xen_netinfo	xcp_netinfo;
42 	char			xcp_console[16];
43 };
44 
45 #define	XEN_PARSE_BOOTDEV	0
46 #define	XEN_PARSE_NETINFO	1
47 #define	XEN_PARSE_CONSOLE	2
48 
49 void	xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
50 
51 void	xenconscn_attach(void);
52 
53 void	xenmachmem_init(void);
54 void	xenprivcmd_init(void);
55 void	xenvfr_init(void);
56 
57 typedef uint16_t u16;
58 typedef uint32_t u32;
59 typedef uint64_t u64;
60 
61 #ifdef XENDEBUG
62 void printk(const char *, ...);
63 void vprintk(const char *, va_list);
64 #endif
65 
66 #endif
67 
68 #define hypervisor_asm_ack(num) \
69 	movl	HYPERVISOR_shared_info,%eax		;\
70 	lock						;\
71 	btsl	$num,EVENTS_MASK(%eax)
72 
73 #endif /* _XEN_H */
74 
75 /******************************************************************************
76  * os.h
77  *
78  * random collection of macros and definition
79  */
80 
81 #ifndef _OS_H_
82 #define _OS_H_
83 
84 /*
85  * These are the segment descriptors provided for us by the hypervisor.
86  * For now, these are hardwired -- guest OSes cannot update the GDT
87  * or LDT.
88  *
89  * It shouldn't be hard to support descriptor-table frobbing -- let me
90  * know if the BSD or XP ports require flexibility here.
91  */
92 
93 
94 /*
95  * these are also defined in hypervisor-if.h but can't be pulled in as
96  * they are used in start of day assembly. Need to clean up the .h files
97  * a bit more...
98  */
99 
100 #ifndef FLAT_RING1_CS
101 #define FLAT_RING1_CS		0x0819
102 #define FLAT_RING1_DS		0x0821
103 #define FLAT_RING3_CS		0x082b
104 #define FLAT_RING3_DS		0x0833
105 #endif
106 
107 #define __KERNEL_CS        FLAT_RING1_CS
108 #define __KERNEL_DS        FLAT_RING1_DS
109 
110 /* Everything below this point is not included by assembler (.S) files. */
111 #ifndef _LOCORE
112 
113 #include <machine/hypervisor-ifs/hypervisor-if.h>
114 
115 /* some function prototypes */
116 void trap_init(void);
117 
118 
119 /*
120  * STI/CLI equivalents. These basically set and clear the virtual
121  * event_enable flag in teh shared_info structure. Note that when
122  * the enable bit is set, there may be pending events to be handled.
123  * We may therefore call into do_hypervisor_callback() directly.
124  */
125 #define unlikely(x)  __builtin_expect((x),0)
126 #define __save_flags(x)                                                       \
127 do {                                                                          \
128     (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
129                    &HYPERVISOR_shared_info->events_mask);                     \
130     barrier();                                                                \
131 } while (0)
132 
133 #define __restore_flags(x)                                                    \
134 do {                                                                          \
135     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
136     if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
137     barrier();                                                                \
138 } while (0)
139 /*     if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \ */
140 
141 #define __cli()                                                               \
142 do {                                                                          \
143     clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
144     barrier();                                                                \
145 } while (0)
146 
147 #define __sti()                                                               \
148 do {                                                                          \
149     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
150     set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
151     barrier();                                                                \
152 } while (0)
153 /*     if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \ */
154 #define cli() __cli()
155 #define sti() __sti()
156 #define save_flags(x) __save_flags(x)
157 #define restore_flags(x) __restore_flags(x)
158 #define save_and_cli(x) __save_and_cli(x)
159 #define save_and_sti(x) __save_and_sti(x)
160 
161 
162 
163 /* This is a barrier for the compiler only, NOT the processor! */
164 #define barrier() __asm__ __volatile__("": : :"memory")
165 
166 #define __LOCK_PREFIX ""
167 #define __LOCK ""
168 #define __ADDR (*(volatile long *) addr)
169 /*
170  * Make sure gcc doesn't try to be clever and move things around
171  * on us. We need to use _exactly_ the address the user gave us,
172  * not some alias that contains the same information.
173  */
174 typedef struct { volatile int counter; } atomic_t;
175 
176 
177 #define xchg(ptr,v) \
178         ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
179 struct __xchg_dummy { unsigned long a[100]; };
180 #define __xg(x) ((struct __xchg_dummy *)(x))
181 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
182                                    int size)
183 {
184     switch (size) {
185     case 1:
186         __asm__ __volatile__("xchgb %b0,%1"
187                              :"=q" (x)
188                              :"m" (*__xg(ptr)), "0" (x)
189                              :"memory");
190         break;
191     case 2:
192         __asm__ __volatile__("xchgw %w0,%1"
193                              :"=r" (x)
194                              :"m" (*__xg(ptr)), "0" (x)
195                              :"memory");
196         break;
197     case 4:
198         __asm__ __volatile__("xchgl %0,%1"
199                              :"=r" (x)
200                              :"m" (*__xg(ptr)), "0" (x)
201                              :"memory");
202         break;
203     }
204     return x;
205 }
206 
207 /**
208  * test_and_clear_bit - Clear a bit and return its old value
209  * @nr: Bit to set
210  * @addr: Address to count from
211  *
212  * This operation is atomic and cannot be reordered.
213  * It also implies a memory barrier.
214  */
215 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
216 {
217         int oldbit;
218 
219         __asm__ __volatile__( __LOCK_PREFIX
220                 "btrl %2,%1\n\tsbbl %0,%0"
221                 :"=r" (oldbit),"=m" (__ADDR)
222                 :"Ir" (nr) : "memory");
223         return oldbit;
224 }
225 
226 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
227 {
228     return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
229 }
230 
231 static __inline__ int variable_test_bit(int nr, volatile void * addr)
232 {
233     int oldbit;
234 
235     __asm__ __volatile__(
236         "btl %2,%1\n\tsbbl %0,%0"
237         :"=r" (oldbit)
238         :"m" (__ADDR),"Ir" (nr));
239     return oldbit;
240 }
241 
242 #define test_bit(nr,addr) \
243 (__builtin_constant_p(nr) ? \
244  constant_test_bit((nr),(addr)) : \
245  variable_test_bit((nr),(addr)))
246 
247 
248 /**
249  * set_bit - Atomically set a bit in memory
250  * @nr: the bit to set
251  * @addr: the address to start counting from
252  *
253  * This function is atomic and may not be reordered.  See __set_bit()
254  * if you do not require the atomic guarantees.
255  * Note that @nr may be almost arbitrarily large; this function is not
256  * restricted to acting on a single-word quantity.
257  */
258 static __inline__ void set_bit(int nr, volatile void * addr)
259 {
260         __asm__ __volatile__( __LOCK_PREFIX
261                 "btsl %1,%0"
262                 :"=m" (__ADDR)
263                 :"Ir" (nr));
264 }
265 
266 /**
267  * clear_bit - Clears a bit in memory
268  * @nr: Bit to clear
269  * @addr: Address to start counting from
270  *
271  * clear_bit() is atomic and may not be reordered.  However, it does
272  * not contain a memory barrier, so if it is used for locking purposes,
273  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
274  * in order to ensure changes are visible on other processors.
275  */
276 static __inline__ void clear_bit(int nr, volatile void * addr)
277 {
278         __asm__ __volatile__( __LOCK_PREFIX
279                 "btrl %1,%0"
280                 :"=m" (__ADDR)
281                 :"Ir" (nr));
282 }
283 
284 /**
285  * atomic_inc - increment atomic variable
286  * @v: pointer of type atomic_t
287  *
288  * Atomically increments @v by 1.  Note that the guaranteed
289  * useful range of an atomic_t is only 24 bits.
290  */
291 static __inline__ void atomic_inc(atomic_t *v)
292 {
293         __asm__ __volatile__(
294                 __LOCK "incl %0"
295                 :"=m" (v->counter)
296                 :"m" (v->counter));
297 }
298 
299 
300 #define rdtscll(val) \
301      __asm__ __volatile__("rdtsc" : "=A" (val))
302 
303 
304 #endif /* !__ASSEMBLY__ */
305 
306 #endif /* _OS_H_ */
307