1 /* $NetBSD: xen.h,v 1.25 2007/11/22 16:16:58 bouyer Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 6 * All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to 10 * deal in the Software without restriction, including without limitation the 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 * sell copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27 28 #ifndef _XEN_H 29 #define _XEN_H 30 31 #include "opt_xen.h" 32 33 #ifndef _LOCORE 34 35 struct xen_netinfo { 36 uint32_t xi_ifno; 37 char *xi_root; 38 uint32_t xi_ip[5]; 39 }; 40 41 union xen_cmdline_parseinfo { 42 char xcp_bootdev[16]; /* sizeof(dv_xname) */ 43 struct xen_netinfo xcp_netinfo; 44 char xcp_console[16]; 45 }; 46 47 #define XEN_PARSE_BOOTDEV 0 48 #define XEN_PARSE_NETINFO 1 49 #define XEN_PARSE_CONSOLE 2 50 #define XEN_PARSE_BOOTFLAGS 3 51 52 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); 53 54 void xenconscn_attach(void); 55 56 void xenprivcmd_init(void); 57 58 void xbdback_init(void); 59 void xennetback_init(void); 60 void xen_shm_init(void); 61 62 void xenevt_event(int); 63 void xenevt_notify(void); 64 65 void idle_block(void); 66 67 #if defined(XENDEBUG) || 1 /* XXX */ 68 void printk(const char *, ...); 69 void vprintk(const char *, _BSD_VA_LIST_); 70 #endif 71 72 #endif 73 74 #endif /* _XEN_H */ 75 76 /****************************************************************************** 77 * os.h 78 * 79 * random collection of macros and definition 80 */ 81 82 #ifndef _OS_H_ 83 #define _OS_H_ 84 85 /* 86 * These are the segment descriptors provided for us by the hypervisor. 87 * For now, these are hardwired -- guest OSes cannot update the GDT 88 * or LDT. 89 * 90 * It shouldn't be hard to support descriptor-table frobbing -- let me 91 * know if the BSD or XP ports require flexibility here. 92 */ 93 94 95 /* 96 * these are also defined in xen-public/xen.h but can't be pulled in as 97 * they are used in start of day assembly. Need to clean up the .h files 98 * a bit more... 99 */ 100 101 #ifdef XEN3 102 #ifndef FLAT_RING1_CS 103 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ 104 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ 105 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ 106 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ 107 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ 108 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ 109 #endif 110 #else /* XEN3 */ 111 #ifndef FLAT_RING1_CS 112 #define FLAT_RING1_CS 0x0819 113 #define FLAT_RING1_DS 0x0821 114 #define FLAT_RING3_CS 0x082b 115 #define FLAT_RING3_DS 0x0833 116 #endif 117 #endif /* XEN3 */ 118 119 #define __KERNEL_CS FLAT_RING1_CS 120 #define __KERNEL_DS FLAT_RING1_DS 121 122 /* Everything below this point is not included by assembler (.S) files. */ 123 #ifndef _LOCORE 124 125 /* some function prototypes */ 126 void trap_init(void); 127 void xpq_flush_cache(void); 128 129 130 /* 131 * STI/CLI equivalents. These basically set and clear the virtual 132 * event_enable flag in the shared_info structure. Note that when 133 * the enable bit is set, there may be pending events to be handled. 134 * We may therefore call into do_hypervisor_callback() directly. 135 */ 136 137 #define __save_flags(x) \ 138 do { \ 139 (x) = HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask; \ 140 } while (0) 141 142 #define __restore_flags(x) \ 143 do { \ 144 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 145 __insn_barrier(); \ 146 if ((_shared->vcpu_info[0].evtchn_upcall_mask = (x)) == 0) { \ 147 x86_lfence(); \ 148 if (__predict_false(_shared->vcpu_info[0].evtchn_upcall_pending)) \ 149 hypervisor_force_callback(); \ 150 } \ 151 } while (0) 152 153 #define __cli() \ 154 do { \ 155 HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = 1; \ 156 x86_lfence(); \ 157 } while (0) 158 159 #define __sti() \ 160 do { \ 161 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 162 __insn_barrier(); \ 163 _shared->vcpu_info[0].evtchn_upcall_mask = 0; \ 164 x86_lfence(); /* unmask then check (avoid races) */ \ 165 if (__predict_false(_shared->vcpu_info[0].evtchn_upcall_pending)) \ 166 hypervisor_force_callback(); \ 167 } while (0) 168 169 #define cli() __cli() 170 #define sti() __sti() 171 #define save_flags(x) __save_flags(x) 172 #define restore_flags(x) __restore_flags(x) 173 #define save_and_cli(x) do { \ 174 __save_flags(x); \ 175 __cli(); \ 176 } while (/* CONSTCOND */ 0) 177 #define save_and_sti(x) __save_and_sti(x) 178 179 /* 180 * always assume we're on multiprocessor. We don't know how many CPU the 181 * underlying hardware has. 182 */ 183 #define __LOCK_PREFIX "lock; " 184 185 #ifdef XEN3 186 #define XATOMIC_T long 187 #else 188 #define XATOMIC_T uint32_t 189 #endif 190 static __inline XATOMIC_T 191 xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val) 192 { 193 unsigned long result; 194 195 __asm volatile(__LOCK_PREFIX 196 #ifdef __x86_64__ 197 "xchgq %0,%1" 198 #else 199 "xchgl %0,%1" 200 #endif 201 :"=r" (result) 202 :"m" (*ptr), "0" (val) 203 :"memory"); 204 205 return result; 206 } 207 208 static inline uint16_t 209 xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval) 210 { 211 unsigned long result; 212 213 __asm volatile(__LOCK_PREFIX 214 "cmpxchgw %w1,%2" 215 :"=a" (result) 216 :"q"(newval), "m" (*ptr), "0" (val) 217 :"memory"); 218 219 return result; 220 } 221 222 static __inline void 223 xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 224 #ifdef __x86_64__ 225 __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits)); 226 #else 227 __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits)); 228 #endif 229 } 230 231 static __inline void 232 xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 233 #ifdef __x86_64__ 234 __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits)); 235 #else 236 __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits)); 237 #endif 238 } 239 240 static __inline XATOMIC_T 241 xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno) 242 { 243 int result; 244 245 __asm volatile(__LOCK_PREFIX 246 #ifdef __x86_64__ 247 "btrq %2,%1 ;" 248 "sbbq %0,%0" 249 #else 250 "btrl %2,%1 ;" 251 "sbbl %0,%0" 252 #endif 253 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 254 :"Ir" (bitno) : "memory"); 255 return result; 256 } 257 258 static __inline XATOMIC_T 259 xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno) 260 { 261 long result; 262 263 __asm volatile(__LOCK_PREFIX 264 #ifdef __x86_64__ 265 "btsq %2,%1 ;" 266 "sbbq %0,%0" 267 #else 268 "btsl %2,%1 ;" 269 "sbbl %0,%0" 270 #endif 271 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 272 :"Ir" (bitno) : "memory"); 273 return result; 274 } 275 276 static __inline int 277 xen_constant_test_bit(const volatile void *ptr, unsigned long bitno) 278 { 279 return ((1UL << (bitno & 31)) & 280 (((const volatile XATOMIC_T *) ptr)[bitno >> 5])) != 0; 281 } 282 283 static __inline XATOMIC_T 284 xen_variable_test_bit(const volatile void *ptr, unsigned long bitno) 285 { 286 long result; 287 288 __asm volatile( 289 #ifdef __x86_64__ 290 "btq %2,%1 ;" 291 "sbbq %0,%0" 292 #else 293 "btl %2,%1 ;" 294 "sbbl %0,%0" 295 #endif 296 :"=r" (result) 297 :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno)); 298 return result; 299 } 300 301 #define xen_atomic_test_bit(ptr, bitno) \ 302 (__builtin_constant_p(bitno) ? \ 303 xen_constant_test_bit((ptr),(bitno)) : \ 304 xen_variable_test_bit((ptr),(bitno))) 305 306 static __inline void 307 xen_atomic_set_bit(volatile void *ptr, unsigned long bitno) 308 { 309 __asm volatile(__LOCK_PREFIX 310 #ifdef __x86_64__ 311 "btsq %1,%0" 312 #else 313 "btsl %1,%0" 314 #endif 315 :"=m" (*(volatile XATOMIC_T *)(ptr)) 316 :"Ir" (bitno)); 317 } 318 319 static __inline void 320 xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno) 321 { 322 __asm volatile(__LOCK_PREFIX 323 #ifdef __x86_64__ 324 "btrq %1,%0" 325 #else 326 "btrl %1,%0" 327 #endif 328 :"=m" (*(volatile XATOMIC_T *)(ptr)) 329 :"Ir" (bitno)); 330 } 331 332 #undef XATOMIC_T 333 334 void wbinvd(void); 335 336 #endif /* !__ASSEMBLY__ */ 337 338 #endif /* _OS_H_ */ 339