1*fbae48b9Sperry /* $NetBSD: xen.h,v 1.20 2006/02/16 20:17:15 perry Exp $ */ 241d325dcScl 341d325dcScl /* 441d325dcScl * 541d325dcScl * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 641d325dcScl * All rights reserved. 741d325dcScl * 841d325dcScl * Permission is hereby granted, free of charge, to any person obtaining a copy 941d325dcScl * of this software and associated documentation files (the "Software"), to 1041d325dcScl * deal in the Software without restriction, including without limitation the 1141d325dcScl * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 1241d325dcScl * sell copies of the Software, and to permit persons to whom the Software is 1341d325dcScl * furnished to do so, subject to the following conditions: 1441d325dcScl * 1541d325dcScl * The above copyright notice and this permission notice shall be included in 1641d325dcScl * all copies or substantial portions of the Software. 1741d325dcScl * 1841d325dcScl * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1941d325dcScl * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2041d325dcScl * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2141d325dcScl * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2241d325dcScl * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 2341d325dcScl * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 2441d325dcScl * DEALINGS IN THE SOFTWARE. 2541d325dcScl */ 2641d325dcScl 2741d325dcScl 2841d325dcScl #ifndef _XEN_H 2941d325dcScl #define _XEN_H 3041d325dcScl 318f162b7eSbouyer #include "opt_xen.h" 328f162b7eSbouyer 3341d325dcScl #ifndef _LOCORE 3441d325dcScl 3579d1a319Scl struct xen_netinfo { 3679d1a319Scl uint32_t xi_ifno; 3779d1a319Scl char *xi_root; 3879d1a319Scl uint32_t xi_ip[5]; 3979d1a319Scl }; 4079d1a319Scl 414c5f9e53Scl union xen_cmdline_parseinfo { 424c5f9e53Scl char xcp_bootdev[16]; /* sizeof(dv_xname) */ 434c5f9e53Scl struct xen_netinfo xcp_netinfo; 444c5f9e53Scl char xcp_console[16]; 454c5f9e53Scl }; 464c5f9e53Scl 474c5f9e53Scl #define XEN_PARSE_BOOTDEV 0 484c5f9e53Scl #define XEN_PARSE_NETINFO 1 494c5f9e53Scl #define XEN_PARSE_CONSOLE 2 506b930c6dSbouyer #define XEN_PARSE_BOOTFLAGS 3 514c5f9e53Scl 524c5f9e53Scl void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); 534c5f9e53Scl 54fb65e916Scl void xenconscn_attach(void); 55fb65e916Scl 56977bfc35Scl void xenprivcmd_init(void); 57e9666f30Sbouyer 58e9666f30Sbouyer void xbdback_init(void); 59e9666f30Sbouyer void xennetback_init(void); 60e9666f30Sbouyer void xen_shm_init(void); 61e9666f30Sbouyer 62e9666f30Sbouyer void xenevt_event(int); 63f12efb99Syamt void xenevt_notify(void); 64e9666f30Sbouyer 65e9666f30Sbouyer void idle_block(void); 66977bfc35Scl 67457b7ee7Syamt #if defined(XENDEBUG) || 1 /* XXX */ 6841d325dcScl void printk(const char *, ...); 69d4ea0343Schristos void vprintk(const char *, _BSD_VA_LIST_); 7041d325dcScl #endif 7141d325dcScl 7241d325dcScl #endif 7341d325dcScl 74fb65e916Scl #endif /* _XEN_H */ 7541d325dcScl 7641d325dcScl /****************************************************************************** 7741d325dcScl * os.h 7841d325dcScl * 7941d325dcScl * random collection of macros and definition 8041d325dcScl */ 8141d325dcScl 8241d325dcScl #ifndef _OS_H_ 8341d325dcScl #define _OS_H_ 8441d325dcScl 8541d325dcScl /* 8641d325dcScl * These are the segment descriptors provided for us by the hypervisor. 8741d325dcScl * For now, these are hardwired -- guest OSes cannot update the GDT 8841d325dcScl * or LDT. 8941d325dcScl * 9041d325dcScl * It shouldn't be hard to support descriptor-table frobbing -- let me 9141d325dcScl * know if the BSD or XP ports require flexibility here. 9241d325dcScl */ 9341d325dcScl 9441d325dcScl 9541d325dcScl /* 96e9666f30Sbouyer * these are also defined in xen-public/xen.h but can't be pulled in as 9741d325dcScl * they are used in start of day assembly. Need to clean up the .h files 9841d325dcScl * a bit more... 9941d325dcScl */ 10041d325dcScl 1018f162b7eSbouyer #ifdef XEN3 1028f162b7eSbouyer #ifndef FLAT_RING1_CS 1038f162b7eSbouyer #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ 1048f162b7eSbouyer #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ 1058f162b7eSbouyer #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ 1068f162b7eSbouyer #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ 1078f162b7eSbouyer #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ 1088f162b7eSbouyer #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ 1098f162b7eSbouyer #endif 1108f162b7eSbouyer #else /* XEN3 */ 11141d325dcScl #ifndef FLAT_RING1_CS 11241d325dcScl #define FLAT_RING1_CS 0x0819 11341d325dcScl #define FLAT_RING1_DS 0x0821 11441d325dcScl #define FLAT_RING3_CS 0x082b 11541d325dcScl #define FLAT_RING3_DS 0x0833 11641d325dcScl #endif 1178f162b7eSbouyer #endif /* XEN3 */ 11841d325dcScl 11941d325dcScl #define __KERNEL_CS FLAT_RING1_CS 12041d325dcScl #define __KERNEL_DS FLAT_RING1_DS 12141d325dcScl 12241d325dcScl /* Everything below this point is not included by assembler (.S) files. */ 12341d325dcScl #ifndef _LOCORE 12441d325dcScl 12541d325dcScl /* some function prototypes */ 12641d325dcScl void trap_init(void); 127e9666f30Sbouyer void xpq_flush_cache(void); 12841d325dcScl 12941d325dcScl 13041d325dcScl /* 13141d325dcScl * STI/CLI equivalents. These basically set and clear the virtual 132e9666f30Sbouyer * event_enable flag in the shared_info structure. Note that when 13341d325dcScl * the enable bit is set, there may be pending events to be handled. 13441d325dcScl * We may therefore call into do_hypervisor_callback() directly. 13541d325dcScl */ 1360ebd7623Scl 13741d325dcScl #define __save_flags(x) \ 13841d325dcScl do { \ 139e9666f30Sbouyer (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 14041d325dcScl } while (0) 14141d325dcScl 14241d325dcScl #define __restore_flags(x) \ 14341d325dcScl do { \ 144e9666f30Sbouyer volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 1450ebd7623Scl __insn_barrier(); \ 146e9666f30Sbouyer if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \ 14757d0b7f8Sbouyer x86_lfence(); \ 148e9666f30Sbouyer if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 149e9666f30Sbouyer hypervisor_force_callback(); \ 150e9666f30Sbouyer } \ 15141d325dcScl } while (0) 15241d325dcScl 15341d325dcScl #define __cli() \ 15441d325dcScl do { \ 155e9666f30Sbouyer HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \ 15657d0b7f8Sbouyer x86_lfence(); \ 15741d325dcScl } while (0) 15841d325dcScl 15941d325dcScl #define __sti() \ 16041d325dcScl do { \ 161e9666f30Sbouyer volatile shared_info_t *_shared = HYPERVISOR_shared_info; \ 1620ebd7623Scl __insn_barrier(); \ 163e9666f30Sbouyer _shared->vcpu_data[0].evtchn_upcall_mask = 0; \ 16457d0b7f8Sbouyer x86_lfence(); /* unmask then check (avoid races) */ \ 165e9666f30Sbouyer if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \ 166e9666f30Sbouyer hypervisor_force_callback(); \ 16741d325dcScl } while (0) 1680ebd7623Scl 16941d325dcScl #define cli() __cli() 17041d325dcScl #define sti() __sti() 17141d325dcScl #define save_flags(x) __save_flags(x) 17241d325dcScl #define restore_flags(x) __restore_flags(x) 173e9666f30Sbouyer #define save_and_cli(x) do { \ 174e9666f30Sbouyer __save_flags(x); \ 175e9666f30Sbouyer __cli(); \ 176e9666f30Sbouyer } while (/* CONSTCOND */ 0) 17741d325dcScl #define save_and_sti(x) __save_and_sti(x) 17841d325dcScl 17957d0b7f8Sbouyer /* 18057d0b7f8Sbouyer * always assume we're on multiprocessor. We don't know how many CPU the 18157d0b7f8Sbouyer * underlying hardware has. 18257d0b7f8Sbouyer */ 1830ebd7623Scl #define __LOCK_PREFIX "lock; " 18441d325dcScl 1858f162b7eSbouyer #ifdef XEN3 1868f162b7eSbouyer #define XATOMIC_T long 1878f162b7eSbouyer #else 1888f162b7eSbouyer #define XATOMIC_T uint32_t 1898f162b7eSbouyer #endif 190*fbae48b9Sperry static __inline XATOMIC_T 1918f162b7eSbouyer xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val) 19241d325dcScl { 1930ebd7623Scl unsigned long result; 1940ebd7623Scl 1955f1c88d7Sperry __asm volatile(__LOCK_PREFIX 19657d0b7f8Sbouyer "xchgl %0,%1" 1970ebd7623Scl :"=r" (result) 1980ebd7623Scl :"m" (*ptr), "0" (val) 19941d325dcScl :"memory"); 2000ebd7623Scl 2010ebd7623Scl return result; 20241d325dcScl } 20341d325dcScl 2048f162b7eSbouyer static __inline void 2058f162b7eSbouyer xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 2068f162b7eSbouyer __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits)); 2078f162b7eSbouyer } 2088f162b7eSbouyer 2098f162b7eSbouyer static __inline void 2108f162b7eSbouyer xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { 2118f162b7eSbouyer __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits)); 2128f162b7eSbouyer } 2138f162b7eSbouyer 214*fbae48b9Sperry static __inline int 2158f162b7eSbouyer xen_atomic_test_and_clear_bit(volatile void *ptr, int bitno) 21641d325dcScl { 2170ebd7623Scl int result; 21841d325dcScl 2195f1c88d7Sperry __asm volatile(__LOCK_PREFIX 2200ebd7623Scl "btrl %2,%1 ;" 2210ebd7623Scl "sbbl %0,%0" 2228f162b7eSbouyer :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 2230ebd7623Scl :"Ir" (bitno) : "memory"); 2240ebd7623Scl return result; 22541d325dcScl } 22641d325dcScl 227*fbae48b9Sperry static __inline int 2288f162b7eSbouyer xen_atomic_test_and_set_bit(volatile void *ptr, int bitno) 229e9666f30Sbouyer { 230e9666f30Sbouyer int result; 231e9666f30Sbouyer 2325f1c88d7Sperry __asm volatile(__LOCK_PREFIX 233e9666f30Sbouyer "btsl %2,%1 ;" 234e9666f30Sbouyer "sbbl %0,%0" 2358f162b7eSbouyer :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) 236e9666f30Sbouyer :"Ir" (bitno) : "memory"); 237e9666f30Sbouyer return result; 238e9666f30Sbouyer } 239e9666f30Sbouyer 240*fbae48b9Sperry static __inline int 2418f162b7eSbouyer xen_constant_test_bit(const volatile void *ptr, int bitno) 24241d325dcScl { 2430ebd7623Scl return ((1UL << (bitno & 31)) & 2448f162b7eSbouyer (((const volatile XATOMIC_T *) ptr)[bitno >> 5])) != 0; 24541d325dcScl } 24641d325dcScl 247*fbae48b9Sperry static __inline int 2488f162b7eSbouyer xen_variable_test_bit(const volatile void *ptr, int bitno) 24941d325dcScl { 2500ebd7623Scl int result; 25141d325dcScl 2525f1c88d7Sperry __asm volatile( 2530ebd7623Scl "btl %2,%1 ;" 2540ebd7623Scl "sbbl %0,%0" 2550ebd7623Scl :"=r" (result) 2568f162b7eSbouyer :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno)); 2570ebd7623Scl return result; 25841d325dcScl } 25941d325dcScl 2608f162b7eSbouyer #define xen_atomic_test_bit(ptr, bitno) \ 2610ebd7623Scl (__builtin_constant_p(bitno) ? \ 2628f162b7eSbouyer xen_constant_test_bit((ptr),(bitno)) : \ 2638f162b7eSbouyer xen_variable_test_bit((ptr),(bitno))) 26441d325dcScl 265*fbae48b9Sperry static __inline void 2668f162b7eSbouyer xen_atomic_set_bit(volatile void *ptr, int bitno) 26741d325dcScl { 2685f1c88d7Sperry __asm volatile(__LOCK_PREFIX 26941d325dcScl "btsl %1,%0" 2708f162b7eSbouyer :"=m" (*(volatile XATOMIC_T *)(ptr)) 2710ebd7623Scl :"Ir" (bitno)); 27241d325dcScl } 27341d325dcScl 274*fbae48b9Sperry static __inline void 2758f162b7eSbouyer xen_atomic_clear_bit(volatile void *ptr, int bitno) 27641d325dcScl { 2775f1c88d7Sperry __asm volatile(__LOCK_PREFIX 27841d325dcScl "btrl %1,%0" 2798f162b7eSbouyer :"=m" (*(volatile XATOMIC_T *)(ptr)) 2800ebd7623Scl :"Ir" (bitno)); 28141d325dcScl } 28241d325dcScl 2838f162b7eSbouyer #undef XATOMIC_T 2848f162b7eSbouyer 285*fbae48b9Sperry static __inline void 286e9666f30Sbouyer wbinvd(void) 287e9666f30Sbouyer { 288e9666f30Sbouyer xpq_flush_cache(); 289e9666f30Sbouyer } 290e9666f30Sbouyer 29141d325dcScl #endif /* !__ASSEMBLY__ */ 29241d325dcScl 29341d325dcScl #endif /* _OS_H_ */ 294