1 /* $NetBSD: xen.h,v 1.12 2005/04/11 12:10:31 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 6 * All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to 10 * deal in the Software without restriction, including without limitation the 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 * sell copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27 28 #ifndef _XEN_H 29 #define _XEN_H 30 31 #ifndef _LOCORE 32 33 struct xen_netinfo { 34 uint32_t xi_ifno; 35 char *xi_root; 36 uint32_t xi_ip[5]; 37 }; 38 39 union xen_cmdline_parseinfo { 40 char xcp_bootdev[16]; /* sizeof(dv_xname) */ 41 struct xen_netinfo xcp_netinfo; 42 char xcp_console[16]; 43 }; 44 45 #define XEN_PARSE_BOOTDEV 0 46 #define XEN_PARSE_NETINFO 1 47 #define XEN_PARSE_CONSOLE 2 48 49 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); 50 51 void xenconscn_attach(void); 52 53 void xenprivcmd_init(void); 54 55 void xbdback_init(void); 56 void xennetback_init(void); 57 void xen_shm_init(void); 58 59 void xenevt_event(int); 60 void xenevt_notify(void); 61 62 void idle_block(void); 63 64 #ifdef XENDEBUG 65 void printk(const char *, ...); 66 void vprintk(const char *, _BSD_VA_LIST_); 67 #endif 68 69 #endif 70 71 #endif /* _XEN_H */ 72 73 /****************************************************************************** 74 * os.h 75 * 76 * random collection of macros and definition 77 */ 78 79 #ifndef _OS_H_ 80 #define _OS_H_ 81 82 /* 83 * These are the segment descriptors provided for us by the hypervisor. 84 * For now, these are hardwired -- guest OSes cannot update the GDT 85 * or LDT. 86 * 87 * It shouldn't be hard to support descriptor-table frobbing -- let me 88 * know if the BSD or XP ports require flexibility here. 89 */ 90 91 92 /* 93 * these are also defined in xen-public/xen.h but can't be pulled in as 94 * they are used in start of day assembly. Need to clean up the .h files 95 * a bit more... 96 */ 97 98 #ifndef FLAT_RING1_CS 99 #define FLAT_RING1_CS 0x0819 100 #define FLAT_RING1_DS 0x0821 101 #define FLAT_RING3_CS 0x082b 102 #define FLAT_RING3_DS 0x0833 103 #endif 104 105 #define __KERNEL_CS FLAT_RING1_CS 106 #define __KERNEL_DS FLAT_RING1_DS 107 108 /* Everything below this point is not included by assembler (.S) files. */ 109 #ifndef _LOCORE 110 111 /* some function prototypes */ 112 void trap_init(void); 113 void xpq_flush_cache(void); 114 115 116 /* 117 * STI/CLI equivalents. These basically set and clear the virtual 118 * event_enable flag in the shared_info structure. Note that when 119 * the enable bit is set, there may be pending events to be handled. 120 * We may therefore call into do_hypervisor_callback() directly. 121 */ 122 123 #define __save_flags(x) \ 124 do { \ 125 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 126 } while (0) 127 128 #define __restore_flags(x) \ 129 do { \ 130 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 131 __insn_barrier(); \ 132 if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \ 133 x86_lfence(); \ 134 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 135 hypervisor_force_callback(); \ 136 } \ 137 } while (0) 138 139 #define __cli() \ 140 do { \ 141 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \ 142 x86_lfence(); \ 143 } while (0) 144 145 #define __sti() \ 146 do { \ 147 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 148 __insn_barrier(); \ 149 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \ 150 x86_lfence(); /* unmask then check (avoid races) */ \ 151 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 152 hypervisor_force_callback(); \ 153 } while (0) 154 155 #define cli() __cli() 156 #define sti() __sti() 157 #define save_flags(x) __save_flags(x) 158 #define restore_flags(x) __restore_flags(x) 159 #define save_and_cli(x) do { \ 160 __save_flags(x); \ 161 __cli(); \ 162 } while (/* CONSTCOND */ 0) 163 #define save_and_sti(x) __save_and_sti(x) 164 165 /* 166 * always assume we're on multiprocessor. We don't know how many CPU the 167 * underlying hardware has. 168 */ 169 #define __LOCK_PREFIX "lock; " 170 171 static __inline__ uint32_t 172 x86_atomic_xchg(volatile uint32_t *ptr, unsigned long val) 173 { 174 unsigned long result; 175 176 __asm __volatile(__LOCK_PREFIX 177 "xchgl %0,%1" 178 :"=r" (result) 179 :"m" (*ptr), "0" (val) 180 :"memory"); 181 182 return result; 183 } 184 185 static __inline__ int 186 x86_atomic_test_and_clear_bit(volatile void *ptr, int bitno) 187 { 188 int result; 189 190 __asm __volatile(__LOCK_PREFIX 191 "btrl %2,%1 ;" 192 "sbbl %0,%0" 193 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr)) 194 :"Ir" (bitno) : "memory"); 195 return result; 196 } 197 198 static __inline__ int 199 x86_atomic_test_and_set_bit(volatile void *ptr, int bitno) 200 { 201 int result; 202 203 __asm __volatile(__LOCK_PREFIX 204 "btsl %2,%1 ;" 205 "sbbl %0,%0" 206 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr)) 207 :"Ir" (bitno) : "memory"); 208 return result; 209 } 210 211 static __inline int 212 x86_constant_test_bit(const volatile void *ptr, int bitno) 213 { 214 return ((1UL << (bitno & 31)) & 215 (((const volatile uint32_t *) ptr)[bitno >> 5])) != 0; 216 } 217 218 static __inline int 219 x86_variable_test_bit(const volatile void *ptr, int bitno) 220 { 221 int result; 222 223 __asm __volatile( 224 "btl %2,%1 ;" 225 "sbbl %0,%0" 226 :"=r" (result) 227 :"m" (*(volatile uint32_t *)(ptr)), "Ir" (bitno)); 228 return result; 229 } 230 231 #define x86_atomic_test_bit(ptr, bitno) \ 232 (__builtin_constant_p(bitno) ? \ 233 x86_constant_test_bit((ptr),(bitno)) : \ 234 x86_variable_test_bit((ptr),(bitno))) 235 236 static __inline void 237 x86_atomic_set_bit(volatile void *ptr, int bitno) 238 { 239 __asm __volatile(__LOCK_PREFIX 240 "btsl %1,%0" 241 :"=m" (*(volatile uint32_t *)(ptr)) 242 :"Ir" (bitno)); 243 } 244 245 static __inline void 246 x86_atomic_clear_bit(volatile void *ptr, int bitno) 247 { 248 __asm __volatile(__LOCK_PREFIX 249 "btrl %1,%0" 250 :"=m" (*(volatile uint32_t *)(ptr)) 251 :"Ir" (bitno)); 252 } 253 254 static __inline void 255 wbinvd(void) 256 { 257 xpq_flush_cache(); 258 } 259 260 #endif /* !__ASSEMBLY__ */ 261 262 #endif /* _OS_H_ */ 263