1 /* $NetBSD: xen.h,v 1.18 2005/12/24 23:24:07 perry Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 6 * All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to 10 * deal in the Software without restriction, including without limitation the 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 * sell copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27 28 #ifndef _XEN_H 29 #define _XEN_H 30 31 #ifndef _LOCORE 32 33 struct xen_netinfo { 34 uint32_t xi_ifno; 35 char *xi_root; 36 uint32_t xi_ip[5]; 37 }; 38 39 union xen_cmdline_parseinfo { 40 char xcp_bootdev[16]; /* sizeof(dv_xname) */ 41 struct xen_netinfo xcp_netinfo; 42 char xcp_console[16]; 43 }; 44 45 #define XEN_PARSE_BOOTDEV 0 46 #define XEN_PARSE_NETINFO 1 47 #define XEN_PARSE_CONSOLE 2 48 #define XEN_PARSE_BOOTFLAGS 3 49 50 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); 51 52 void xenconscn_attach(void); 53 54 void xenprivcmd_init(void); 55 56 void xbdback_init(void); 57 void xennetback_init(void); 58 void xen_shm_init(void); 59 60 void xenevt_event(int); 61 void xenevt_notify(void); 62 63 void idle_block(void); 64 65 #if defined(XENDEBUG) || 1 /* XXX */ 66 void printk(const char *, ...); 67 void vprintk(const char *, _BSD_VA_LIST_); 68 #endif 69 70 #endif 71 72 #endif /* _XEN_H */ 73 74 /****************************************************************************** 75 * os.h 76 * 77 * random collection of macros and definition 78 */ 79 80 #ifndef _OS_H_ 81 #define _OS_H_ 82 83 /* 84 * These are the segment descriptors provided for us by the hypervisor. 85 * For now, these are hardwired -- guest OSes cannot update the GDT 86 * or LDT. 87 * 88 * It shouldn't be hard to support descriptor-table frobbing -- let me 89 * know if the BSD or XP ports require flexibility here. 90 */ 91 92 93 /* 94 * these are also defined in xen-public/xen.h but can't be pulled in as 95 * they are used in start of day assembly. Need to clean up the .h files 96 * a bit more... 97 */ 98 99 #ifndef FLAT_RING1_CS 100 #define FLAT_RING1_CS 0x0819 101 #define FLAT_RING1_DS 0x0821 102 #define FLAT_RING3_CS 0x082b 103 #define FLAT_RING3_DS 0x0833 104 #endif 105 106 #define __KERNEL_CS FLAT_RING1_CS 107 #define __KERNEL_DS FLAT_RING1_DS 108 109 /* Everything below this point is not included by assembler (.S) files. */ 110 #ifndef _LOCORE 111 112 /* some function prototypes */ 113 void trap_init(void); 114 void xpq_flush_cache(void); 115 116 117 /* 118 * STI/CLI equivalents. These basically set and clear the virtual 119 * event_enable flag in the shared_info structure. Note that when 120 * the enable bit is set, there may be pending events to be handled. 121 * We may therefore call into do_hypervisor_callback() directly. 122 */ 123 124 #define __save_flags(x) \ 125 do { \ 126 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 127 } while (0) 128 129 #define __restore_flags(x) \ 130 do { \ 131 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 132 __insn_barrier(); \ 133 if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \ 134 x86_lfence(); \ 135 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 136 hypervisor_force_callback(); \ 137 } \ 138 } while (0) 139 140 #define __cli() \ 141 do { \ 142 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \ 143 x86_lfence(); \ 144 } while (0) 145 146 #define __sti() \ 147 do { \ 148 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 149 __insn_barrier(); \ 150 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \ 151 x86_lfence(); /* unmask then check (avoid races) */ \ 152 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 153 hypervisor_force_callback(); \ 154 } while (0) 155 156 #define cli() __cli() 157 #define sti() __sti() 158 #define save_flags(x) __save_flags(x) 159 #define restore_flags(x) __restore_flags(x) 160 #define save_and_cli(x) do { \ 161 __save_flags(x); \ 162 __cli(); \ 163 } while (/* CONSTCOND */ 0) 164 #define save_and_sti(x) __save_and_sti(x) 165 166 /* 167 * always assume we're on multiprocessor. We don't know how many CPU the 168 * underlying hardware has. 169 */ 170 #define __LOCK_PREFIX "lock; " 171 172 static inline uint32_t 173 x86_atomic_xchg(volatile uint32_t *ptr, unsigned long val) 174 { 175 unsigned long result; 176 177 __asm volatile(__LOCK_PREFIX 178 "xchgl %0,%1" 179 :"=r" (result) 180 :"m" (*ptr), "0" (val) 181 :"memory"); 182 183 return result; 184 } 185 186 static inline int 187 x86_atomic_test_and_clear_bit(volatile void *ptr, int bitno) 188 { 189 int result; 190 191 __asm volatile(__LOCK_PREFIX 192 "btrl %2,%1 ;" 193 "sbbl %0,%0" 194 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr)) 195 :"Ir" (bitno) : "memory"); 196 return result; 197 } 198 199 static inline int 200 x86_atomic_test_and_set_bit(volatile void *ptr, int bitno) 201 { 202 int result; 203 204 __asm volatile(__LOCK_PREFIX 205 "btsl %2,%1 ;" 206 "sbbl %0,%0" 207 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr)) 208 :"Ir" (bitno) : "memory"); 209 return result; 210 } 211 212 static inline int 213 x86_constant_test_bit(const volatile void *ptr, int bitno) 214 { 215 return ((1UL << (bitno & 31)) & 216 (((const volatile uint32_t *) ptr)[bitno >> 5])) != 0; 217 } 218 219 static inline int 220 x86_variable_test_bit(const volatile void *ptr, int bitno) 221 { 222 int result; 223 224 __asm volatile( 225 "btl %2,%1 ;" 226 "sbbl %0,%0" 227 :"=r" (result) 228 :"m" (*(const volatile uint32_t *)(ptr)), "Ir" (bitno)); 229 return result; 230 } 231 232 #define x86_atomic_test_bit(ptr, bitno) \ 233 (__builtin_constant_p(bitno) ? \ 234 x86_constant_test_bit((ptr),(bitno)) : \ 235 x86_variable_test_bit((ptr),(bitno))) 236 237 static inline void 238 x86_atomic_set_bit(volatile void *ptr, int bitno) 239 { 240 __asm volatile(__LOCK_PREFIX 241 "btsl %1,%0" 242 :"=m" (*(volatile uint32_t *)(ptr)) 243 :"Ir" (bitno)); 244 } 245 246 static inline void 247 x86_atomic_clear_bit(volatile void *ptr, int bitno) 248 { 249 __asm volatile(__LOCK_PREFIX 250 "btrl %1,%0" 251 :"=m" (*(volatile uint32_t *)(ptr)) 252 :"Ir" (bitno)); 253 } 254 255 static inline void 256 wbinvd(void) 257 { 258 xpq_flush_cache(); 259 } 260 261 #endif /* !__ASSEMBLY__ */ 262 263 #endif /* _OS_H_ */ 264