1 /* $NetBSD: xen.h,v 1.31 2009/02/13 21:03:59 bouyer Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 6 * All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to 10 * deal in the Software without restriction, including without limitation the 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 * sell copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27 28 #ifndef _XEN_H 29 #define _XEN_H 30 #include "opt_xen.h" 31 32 33 #ifndef _LOCORE 34 35 #include <machine/cpufunc.h> 36 37 struct xen_netinfo { 38 uint32_t xi_ifno; 39 char *xi_root; 40 uint32_t xi_ip[5]; 41 }; 42 43 union xen_cmdline_parseinfo { 44 char xcp_bootdev[16]; /* sizeof(dv_xname) */ 45 struct xen_netinfo xcp_netinfo; 46 char xcp_console[16]; 47 char xcp_pcidevs[64]; 48 }; 49 50 #define XEN_PARSE_BOOTDEV 0 51 #define XEN_PARSE_NETINFO 1 52 #define XEN_PARSE_CONSOLE 2 53 #define XEN_PARSE_BOOTFLAGS 3 54 #define XEN_PARSE_PCIBACK 4 55 56 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); 57 58 void xenconscn_attach(void); 59 60 void xenprivcmd_init(void); 61 62 void xbdback_init(void); 63 void xennetback_init(void); 64 void xen_shm_init(void); 65 66 void xenevt_event(int); 67 void xenevt_setipending(int, int); 68 void xenevt_notify(void); 69 70 void idle_block(void); 71 72 #if defined(XENDEBUG) || 1 /* XXX */ 73 void printk(const char *, ...); 74 void vprintk(const char *, _BSD_VA_LIST_); 75 #endif 76 77 #endif 78 79 #endif /* _XEN_H */ 80 81 /****************************************************************************** 82 * os.h 83 * 84 * random collection of macros and definition 85 */ 86 87 #ifndef _OS_H_ 88 #define _OS_H_ 89 90 /* 91 * These are the segment descriptors provided for us by the hypervisor. 92 * For now, these are hardwired -- guest OSes cannot update the GDT 93 * or LDT. 94 * 95 * It shouldn't be hard to support descriptor-table frobbing -- let me 96 * know if the BSD or XP ports require flexibility here. 97 */ 98 99 100 /* 101 * these are also defined in xen-public/xen.h but can't be pulled in as 102 * they are used in start of day assembly. Need to clean up the .h files 103 * a bit more... 104 */ 105 106 #ifdef XEN3 107 #ifndef FLAT_RING1_CS 108 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ 109 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ 110 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ 111 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ 112 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ 113 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ 114 #endif 115 #else /* XEN3 */ 116 #ifndef FLAT_RING1_CS 117 #define FLAT_RING1_CS 0x0819 118 #define FLAT_RING1_DS 0x0821 119 #define FLAT_RING3_CS 0x082b 120 #define FLAT_RING3_DS 0x0833 121 #endif 122 #endif /* XEN3 */ 123 124 #define __KERNEL_CS FLAT_RING1_CS 125 #define __KERNEL_DS FLAT_RING1_DS 126 127 /* Everything below this point is not included by assembler (.S) files. */ 128 #ifndef _LOCORE 129 130 /* some function prototypes */ 131 void trap_init(void); 132 void xpq_flush_cache(void); 133 134 #define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN) 135 #define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED) 136 137 /* 138 * STI/CLI equivalents. These basically set and clear the virtual 139 * event_enable flag in the shared_info structure. Note that when 140 * the enable bit is set, there may be pending events to be handled. 141 * We may therefore call into do_hypervisor_callback() directly. 142 */ 143 144 #define __save_flags(x) \ 145 do { \ 146 (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \ 147 } while (0) 148 149 #define __restore_flags(x) \ 150 do { \ 151 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ 152 __insn_barrier(); \ 153 if ((_vci->evtchn_upcall_mask = (x)) == 0) { \ 154 x86_lfence(); \ 155 if (__predict_false(_vci->evtchn_upcall_pending)) \ 156 hypervisor_force_callback(); \ 157 } \ 158 } while (0) 159 160 #define __cli() \ 161 do { \ 162 curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \ 163 x86_lfence(); \ 164 } while (0) 165 166 #define __sti() \ 167 do { \ 168 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ 169 __insn_barrier(); \ 170 _vci->evtchn_upcall_mask = 0; \ 171 x86_lfence(); /* unmask then check (avoid races) */ \ 172 if (__predict_false(_vci->evtchn_upcall_pending)) \ 173 hypervisor_force_callback(); \ 174 } while (0) 175 176 #define cli() __cli() 177 #define sti() __sti() 178 #define save_flags(x) __save_flags(x) 179 #define restore_flags(x) __restore_flags(x) 180 #define save_and_cli(x) do { \ 181 __save_flags(x); \ 182 __cli(); \ 183 } while (/* CONSTCOND */ 0) 184 #define save_and_sti(x) __save_and_sti(x) 185 186 /* 187 * always assume we're on multiprocessor. We don't know how many CPU the 188 * underlying hardware has. 189 */ 190 #define __LOCK_PREFIX "lock; " 191 192 #ifdef XEN3 193 #define XATOMIC_T u_long 194 #ifdef __x86_64__ 195 #define LONG_SHIFT 6 196 #define LONG_MASK 63 197 #else /* __x86_64__ */ 198 #define LONG_SHIFT 5 199 #define LONG_MASK 31 200 #endif /* __x86_64__ */ 201 #else /* XEN3 */ 202 #define XATOMIC_T uint32_t 203 #define LONG_SHIFT 5 204 #define LONG_MASK 31 205 #endif /* XEN3 */ 206 207 #define xen_ffs __builtin_ffsl 208 209 static __inline XATOMIC_T 210 xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val) 211 { 212 unsigned long result; 213 214 __asm volatile(__LOCK_PREFIX 215 #ifdef __x86_64__ 216 "xchgq %0,%1" 217 #else 218 "xchgl %0,%1" 219 #endif 220 :"=r" (result) 221 :"m" (*ptr), "0" (val) 222 :"memory"); 223 224 return result; 225 } 226 227 static inline uint16_t 228 xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval) 229 { 230 unsigned long result; 231 232 __asm volatile(__LOCK_PREFIX 233 "cmpxchgw %w1,%2" 234 :"=a" (result) 235 :"q"(newval), "m" (*ptr), "0" (val) 236 :"memory"); 237 238 return result; 239 } 240 241 static __inline void 242 xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 243 #ifdef __x86_64__ 244 __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits)); 245 #else 246 __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits)); 247 #endif 248 } 249 250 static __inline void 251 xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 252 #ifdef __x86_64__ 253 __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits)); 254 #else 255 __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits)); 256 #endif 257 } 258 259 static __inline XATOMIC_T 260 xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno) 261 { 262 int result; 263 264 __asm volatile(__LOCK_PREFIX 265 #ifdef __x86_64__ 266 "btrq %2,%1 ;" 267 "sbbq %0,%0" 268 #else 269 "btrl %2,%1 ;" 270 "sbbl %0,%0" 271 #endif 272 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 273 :"Ir" (bitno) : "memory"); 274 return result; 275 } 276 277 static __inline XATOMIC_T 278 xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno) 279 { 280 long result; 281 282 __asm volatile(__LOCK_PREFIX 283 #ifdef __x86_64__ 284 "btsq %2,%1 ;" 285 "sbbq %0,%0" 286 #else 287 "btsl %2,%1 ;" 288 "sbbl %0,%0" 289 #endif 290 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 291 :"Ir" (bitno) : "memory"); 292 return result; 293 } 294 295 static __inline int 296 xen_constant_test_bit(const volatile void *ptr, unsigned long bitno) 297 { 298 return ((1UL << (bitno & LONG_MASK)) & 299 (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0; 300 } 301 302 static __inline XATOMIC_T 303 xen_variable_test_bit(const volatile void *ptr, unsigned long bitno) 304 { 305 long result; 306 307 __asm volatile( 308 #ifdef __x86_64__ 309 "btq %2,%1 ;" 310 "sbbq %0,%0" 311 #else 312 "btl %2,%1 ;" 313 "sbbl %0,%0" 314 #endif 315 :"=r" (result) 316 :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno)); 317 return result; 318 } 319 320 #define xen_atomic_test_bit(ptr, bitno) \ 321 (__builtin_constant_p(bitno) ? \ 322 xen_constant_test_bit((ptr),(bitno)) : \ 323 xen_variable_test_bit((ptr),(bitno))) 324 325 static __inline void 326 xen_atomic_set_bit(volatile void *ptr, unsigned long bitno) 327 { 328 __asm volatile(__LOCK_PREFIX 329 #ifdef __x86_64__ 330 "btsq %1,%0" 331 #else 332 "btsl %1,%0" 333 #endif 334 :"=m" (*(volatile XATOMIC_T *)(ptr)) 335 :"Ir" (bitno)); 336 } 337 338 static __inline void 339 xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno) 340 { 341 __asm volatile(__LOCK_PREFIX 342 #ifdef __x86_64__ 343 "btrq %1,%0" 344 #else 345 "btrl %1,%0" 346 #endif 347 :"=m" (*(volatile XATOMIC_T *)(ptr)) 348 :"Ir" (bitno)); 349 } 350 351 #undef XATOMIC_T 352 353 void wbinvd(void); 354 355 #endif /* !__ASSEMBLY__ */ 356 357 #endif /* _OS_H_ */ 358