15084Sjohnlev /****************************************************************************** 25084Sjohnlev * xen.h 35084Sjohnlev * 45084Sjohnlev * Guest OS interface to Xen. 55084Sjohnlev * 65084Sjohnlev * Permission is hereby granted, free of charge, to any person obtaining a copy 75084Sjohnlev * of this software and associated documentation files (the "Software"), to 85084Sjohnlev * deal in the Software without restriction, including without limitation the 95084Sjohnlev * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 105084Sjohnlev * sell copies of the Software, and to permit persons to whom the Software is 115084Sjohnlev * furnished to do so, subject to the following conditions: 125084Sjohnlev * 135084Sjohnlev * The above copyright notice and this permission notice shall be included in 145084Sjohnlev * all copies or substantial portions of the Software. 155084Sjohnlev * 165084Sjohnlev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 175084Sjohnlev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 185084Sjohnlev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 195084Sjohnlev * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 205084Sjohnlev * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 215084Sjohnlev * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 225084Sjohnlev * DEALINGS IN THE SOFTWARE. 235084Sjohnlev * 245084Sjohnlev * Copyright (c) 2004, K A Fraser 255084Sjohnlev */ 265084Sjohnlev 275084Sjohnlev #ifndef __XEN_PUBLIC_XEN_H__ 285084Sjohnlev #define __XEN_PUBLIC_XEN_H__ 295084Sjohnlev 305084Sjohnlev #include "xen-compat.h" 315084Sjohnlev 325084Sjohnlev #if defined(__i386) && !defined(__i386__) 336144Srab #define __i386__ /* foo */ 345084Sjohnlev #endif 355084Sjohnlev 365084Sjohnlev #if defined(__amd64) && !defined(__x86_64__) 375084Sjohnlev #define __x86_64__ 385084Sjohnlev #endif 395084Sjohnlev 405084Sjohnlev #if defined(_ASM) && !defined(__ASSEMBLY__) 415084Sjohnlev #define __ASSEMBLY__ 425084Sjohnlev #endif 435084Sjohnlev 445084Sjohnlev #if defined(__i386__) || defined(__x86_64__) 455084Sjohnlev #include "arch-x86/xen.h" 465084Sjohnlev #elif defined(__ia64__) 475084Sjohnlev #include "arch-ia64.h" 485084Sjohnlev #else 495084Sjohnlev #error "Unsupported architecture" 505084Sjohnlev #endif 515084Sjohnlev 52*10175SStuart.Maybee@Sun.COM #ifndef __ASSEMBLY__ 53*10175SStuart.Maybee@Sun.COM /* Guest handles for primitive C types. */ 54*10175SStuart.Maybee@Sun.COM DEFINE_XEN_GUEST_HANDLE(char); 55*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); 56*10175SStuart.Maybee@Sun.COM DEFINE_XEN_GUEST_HANDLE(int); 57*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); 58*10175SStuart.Maybee@Sun.COM DEFINE_XEN_GUEST_HANDLE(long); 59*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); 60*10175SStuart.Maybee@Sun.COM DEFINE_XEN_GUEST_HANDLE(void); 61*10175SStuart.Maybee@Sun.COM 62*10175SStuart.Maybee@Sun.COM DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); 63*10175SStuart.Maybee@Sun.COM #endif 64*10175SStuart.Maybee@Sun.COM 655084Sjohnlev /* 665084Sjohnlev * HYPERCALLS 675084Sjohnlev */ 685084Sjohnlev 695084Sjohnlev #define __HYPERVISOR_set_trap_table 0 705084Sjohnlev #define __HYPERVISOR_mmu_update 1 715084Sjohnlev #define __HYPERVISOR_set_gdt 2 725084Sjohnlev #define __HYPERVISOR_stack_switch 3 735084Sjohnlev #define __HYPERVISOR_set_callbacks 4 745084Sjohnlev #define __HYPERVISOR_fpu_taskswitch 5 755084Sjohnlev #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ 765084Sjohnlev #define __HYPERVISOR_platform_op 7 775084Sjohnlev #define __HYPERVISOR_set_debugreg 8 785084Sjohnlev #define __HYPERVISOR_get_debugreg 9 795084Sjohnlev #define __HYPERVISOR_update_descriptor 10 805084Sjohnlev #define __HYPERVISOR_memory_op 12 815084Sjohnlev #define __HYPERVISOR_multicall 13 825084Sjohnlev #define __HYPERVISOR_update_va_mapping 14 835084Sjohnlev #define __HYPERVISOR_set_timer_op 15 845084Sjohnlev #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ 855084Sjohnlev #define __HYPERVISOR_xen_version 17 865084Sjohnlev #define __HYPERVISOR_console_io 18 875084Sjohnlev #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ 885084Sjohnlev #define __HYPERVISOR_grant_table_op 20 895084Sjohnlev #define __HYPERVISOR_vm_assist 21 905084Sjohnlev #define __HYPERVISOR_update_va_mapping_otherdomain 22 915084Sjohnlev #define __HYPERVISOR_iret 23 /* x86 only */ 925084Sjohnlev #define __HYPERVISOR_vcpu_op 24 935084Sjohnlev #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ 945084Sjohnlev #define __HYPERVISOR_mmuext_op 26 95*10175SStuart.Maybee@Sun.COM #define __HYPERVISOR_xsm_op 27 965084Sjohnlev #define __HYPERVISOR_nmi_op 28 975084Sjohnlev #define __HYPERVISOR_sched_op 29 985084Sjohnlev #define __HYPERVISOR_callback_op 30 995084Sjohnlev #define __HYPERVISOR_xenoprof_op 31 1005084Sjohnlev #define __HYPERVISOR_event_channel_op 32 1015084Sjohnlev #define __HYPERVISOR_physdev_op 33 1025084Sjohnlev #define __HYPERVISOR_hvm_op 34 1035084Sjohnlev #define __HYPERVISOR_sysctl 35 1045084Sjohnlev #define __HYPERVISOR_domctl 36 1055084Sjohnlev #define __HYPERVISOR_kexec_op 37 1065084Sjohnlev 1075084Sjohnlev /* Architecture-specific hypercall definitions. */ 1085084Sjohnlev #define __HYPERVISOR_arch_0 48 1095084Sjohnlev #define __HYPERVISOR_arch_1 49 1105084Sjohnlev #define __HYPERVISOR_arch_2 50 1115084Sjohnlev #define __HYPERVISOR_arch_3 51 1125084Sjohnlev #define __HYPERVISOR_arch_4 52 1135084Sjohnlev #define __HYPERVISOR_arch_5 53 1145084Sjohnlev #define __HYPERVISOR_arch_6 54 1155084Sjohnlev #define __HYPERVISOR_arch_7 55 1165084Sjohnlev 1175084Sjohnlev /* 1185084Sjohnlev * HYPERCALL COMPATIBILITY. 1195084Sjohnlev */ 1205084Sjohnlev 1215084Sjohnlev /* New sched_op hypercall introduced in 0x00030101. */ 1225084Sjohnlev #if __XEN_INTERFACE_VERSION__ < 0x00030101 1235084Sjohnlev #undef __HYPERVISOR_sched_op 1245084Sjohnlev #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat 1255084Sjohnlev #endif 1265084Sjohnlev 1275084Sjohnlev /* New event-channel and physdev hypercalls introduced in 0x00030202. */ 1285084Sjohnlev #if __XEN_INTERFACE_VERSION__ < 0x00030202 1295084Sjohnlev #undef __HYPERVISOR_event_channel_op 1305084Sjohnlev #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat 1315084Sjohnlev #undef __HYPERVISOR_physdev_op 1325084Sjohnlev #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat 1335084Sjohnlev #endif 1345084Sjohnlev 1355084Sjohnlev /* New platform_op hypercall introduced in 0x00030204. */ 1365084Sjohnlev #if __XEN_INTERFACE_VERSION__ < 0x00030204 1375084Sjohnlev #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op 1385084Sjohnlev #endif 1395084Sjohnlev 1405084Sjohnlev /* 1415084Sjohnlev * VIRTUAL INTERRUPTS 1425084Sjohnlev * 1435084Sjohnlev * Virtual interrupts that a guest OS may receive from Xen. 1445084Sjohnlev * 1455084Sjohnlev * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a 1465084Sjohnlev * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. 1475084Sjohnlev * The latter can be allocated only once per guest: they must initially be 1485084Sjohnlev * allocated to VCPU0 but can subsequently be re-bound. 1495084Sjohnlev */ 1505084Sjohnlev #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ 1515084Sjohnlev #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ 1525084Sjohnlev #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ 1535084Sjohnlev #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ 1545084Sjohnlev #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ 1555084Sjohnlev #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ 1565084Sjohnlev #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ 1576144Srab #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ 1585084Sjohnlev 1595084Sjohnlev /* Architecture-specific VIRQ definitions. */ 1605084Sjohnlev #define VIRQ_ARCH_0 16 1615084Sjohnlev #define VIRQ_ARCH_1 17 1625084Sjohnlev #define VIRQ_ARCH_2 18 1635084Sjohnlev #define VIRQ_ARCH_3 19 1645084Sjohnlev #define VIRQ_ARCH_4 20 1655084Sjohnlev #define VIRQ_ARCH_5 21 1665084Sjohnlev #define VIRQ_ARCH_6 22 1675084Sjohnlev #define VIRQ_ARCH_7 23 1685084Sjohnlev 1695084Sjohnlev #define NR_VIRQS 24 1705084Sjohnlev 1715084Sjohnlev /* 1725084Sjohnlev * MMU-UPDATE REQUESTS 1735084Sjohnlev * 1745084Sjohnlev * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. 1755084Sjohnlev * A foreigndom (FD) can be specified (or DOMID_SELF for none). 1765084Sjohnlev * Where the FD has some effect, it is described below. 1775084Sjohnlev * ptr[1:0] specifies the appropriate MMU_* command. 1785084Sjohnlev * 1795084Sjohnlev * ptr[1:0] == MMU_NORMAL_PT_UPDATE: 1805084Sjohnlev * Updates an entry in a page table. If updating an L1 table, and the new 1815084Sjohnlev * table entry is valid/present, the mapped frame must belong to the FD, if 1825084Sjohnlev * an FD has been specified. If attempting to map an I/O page then the 1835084Sjohnlev * caller assumes the privilege of the FD. 1845084Sjohnlev * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. 1855084Sjohnlev * FD == DOMID_XEN: Map restricted areas of Xen's heap space. 1865084Sjohnlev * ptr[:2] -- Machine address of the page-table entry to modify. 1875084Sjohnlev * val -- Value to write. 1885084Sjohnlev * 1895084Sjohnlev * ptr[1:0] == MMU_MACHPHYS_UPDATE: 1905084Sjohnlev * Updates an entry in the machine->pseudo-physical mapping table. 1915084Sjohnlev * ptr[:2] -- Machine address within the frame whose mapping to modify. 1925084Sjohnlev * The frame must belong to the FD, if one is specified. 1935084Sjohnlev * val -- Value to write into the mapping entry. 194*10175SStuart.Maybee@Sun.COM * 195*10175SStuart.Maybee@Sun.COM * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: 196*10175SStuart.Maybee@Sun.COM * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed 197*10175SStuart.Maybee@Sun.COM * with those in @val. 1985084Sjohnlev */ 199*10175SStuart.Maybee@Sun.COM #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ 200*10175SStuart.Maybee@Sun.COM #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ 201*10175SStuart.Maybee@Sun.COM #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ 2025084Sjohnlev 2035084Sjohnlev /* 2045084Sjohnlev * MMU EXTENDED OPERATIONS 2055084Sjohnlev * 2065084Sjohnlev * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. 2075084Sjohnlev * A foreigndom (FD) can be specified (or DOMID_SELF for none). 2085084Sjohnlev * Where the FD has some effect, it is described below. 2095084Sjohnlev * 2105084Sjohnlev * cmd: MMUEXT_(UN)PIN_*_TABLE 2115084Sjohnlev * mfn: Machine frame number to be (un)pinned as a p.t. page. 2125084Sjohnlev * The frame must belong to the FD, if one is specified. 2135084Sjohnlev * 2145084Sjohnlev * cmd: MMUEXT_NEW_BASEPTR 2155084Sjohnlev * mfn: Machine frame number of new page-table base to install in MMU. 2165084Sjohnlev * 2175084Sjohnlev * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] 2185084Sjohnlev * mfn: Machine frame number of new page-table base to install in MMU 2195084Sjohnlev * when in user space. 2205084Sjohnlev * 2215084Sjohnlev * cmd: MMUEXT_TLB_FLUSH_LOCAL 2225084Sjohnlev * No additional arguments. Flushes local TLB. 2235084Sjohnlev * 2245084Sjohnlev * cmd: MMUEXT_INVLPG_LOCAL 2255084Sjohnlev * linear_addr: Linear address to be flushed from the local TLB. 2265084Sjohnlev * 2275084Sjohnlev * cmd: MMUEXT_TLB_FLUSH_MULTI 2285084Sjohnlev * vcpumask: Pointer to bitmap of VCPUs to be flushed. 2295084Sjohnlev * 2305084Sjohnlev * cmd: MMUEXT_INVLPG_MULTI 2315084Sjohnlev * linear_addr: Linear address to be flushed. 2325084Sjohnlev * vcpumask: Pointer to bitmap of VCPUs to be flushed. 2335084Sjohnlev * 2345084Sjohnlev * cmd: MMUEXT_TLB_FLUSH_ALL 2355084Sjohnlev * No additional arguments. Flushes all VCPUs' TLBs. 2365084Sjohnlev * 2375084Sjohnlev * cmd: MMUEXT_INVLPG_ALL 2385084Sjohnlev * linear_addr: Linear address to be flushed from all VCPUs' TLBs. 2395084Sjohnlev * 2405084Sjohnlev * cmd: MMUEXT_FLUSH_CACHE 2415084Sjohnlev * No additional arguments. Writes back and flushes cache contents. 2425084Sjohnlev * 2435084Sjohnlev * cmd: MMUEXT_SET_LDT 2445084Sjohnlev * linear_addr: Linear address of LDT base (NB. must be page-aligned). 2455084Sjohnlev * nr_ents: Number of entries in LDT. 2465084Sjohnlev */ 2475084Sjohnlev #define MMUEXT_PIN_L1_TABLE 0 2485084Sjohnlev #define MMUEXT_PIN_L2_TABLE 1 2495084Sjohnlev #define MMUEXT_PIN_L3_TABLE 2 2505084Sjohnlev #define MMUEXT_PIN_L4_TABLE 3 2515084Sjohnlev #define MMUEXT_UNPIN_TABLE 4 2525084Sjohnlev #define MMUEXT_NEW_BASEPTR 5 2535084Sjohnlev #define MMUEXT_TLB_FLUSH_LOCAL 6 2545084Sjohnlev #define MMUEXT_INVLPG_LOCAL 7 2555084Sjohnlev #define MMUEXT_TLB_FLUSH_MULTI 8 2565084Sjohnlev #define MMUEXT_INVLPG_MULTI 9 2575084Sjohnlev #define MMUEXT_TLB_FLUSH_ALL 10 2585084Sjohnlev #define MMUEXT_INVLPG_ALL 11 2595084Sjohnlev #define MMUEXT_FLUSH_CACHE 12 2605084Sjohnlev #define MMUEXT_SET_LDT 13 2615084Sjohnlev #define MMUEXT_NEW_USER_BASEPTR 15 2625084Sjohnlev 2635084Sjohnlev #ifndef __ASSEMBLY__ 2645084Sjohnlev struct mmuext_op { 2655084Sjohnlev unsigned int cmd; 2665084Sjohnlev union { 2675084Sjohnlev /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ 2685084Sjohnlev xen_pfn_t mfn; 2695084Sjohnlev /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ 2705084Sjohnlev unsigned long linear_addr; 2715084Sjohnlev } arg1; 2725084Sjohnlev union { 2735084Sjohnlev /* SET_LDT */ 2745084Sjohnlev unsigned int nr_ents; 2755084Sjohnlev /* TLB_FLUSH_MULTI, INVLPG_MULTI */ 276*10175SStuart.Maybee@Sun.COM #if __XEN_INTERFACE_VERSION__ >= 0x00030205 277*10175SStuart.Maybee@Sun.COM XEN_GUEST_HANDLE(void) vcpumask; 278*10175SStuart.Maybee@Sun.COM #else 279*10175SStuart.Maybee@Sun.COM void *vcpumask; 280*10175SStuart.Maybee@Sun.COM #endif 2815084Sjohnlev } arg2; 2825084Sjohnlev }; 2835084Sjohnlev typedef struct mmuext_op mmuext_op_t; 2845084Sjohnlev DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); 2855084Sjohnlev #endif 2865084Sjohnlev 2875084Sjohnlev /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ 2885084Sjohnlev /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ 2895084Sjohnlev /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ 2905084Sjohnlev #define UVMF_NONE (0UL<<0) /* No flushing at all. */ 2915084Sjohnlev #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ 2925084Sjohnlev #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ 2935084Sjohnlev #define UVMF_FLUSHTYPE_MASK (3UL<<0) 2945084Sjohnlev #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ 2955084Sjohnlev #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ 2965084Sjohnlev #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ 2975084Sjohnlev 2985084Sjohnlev /* 2995084Sjohnlev * Commands to HYPERVISOR_console_io(). 3005084Sjohnlev */ 3015084Sjohnlev #define CONSOLEIO_write 0 3025084Sjohnlev #define CONSOLEIO_read 1 3035084Sjohnlev #define CONSOLEIO_get_device 32 3045084Sjohnlev 3055084Sjohnlev /* 3065084Sjohnlev * Commands to HYPERVISOR_vm_assist(). 3075084Sjohnlev */ 3085084Sjohnlev #define VMASST_CMD_enable 0 3095084Sjohnlev #define VMASST_CMD_disable 1 3105084Sjohnlev 3115084Sjohnlev /* x86/32 guests: simulate full 4GB segment limits. */ 3125084Sjohnlev #define VMASST_TYPE_4gb_segments 0 3135084Sjohnlev 3145084Sjohnlev /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ 3155084Sjohnlev #define VMASST_TYPE_4gb_segments_notify 1 3165084Sjohnlev 3175084Sjohnlev /* 3185084Sjohnlev * x86 guests: support writes to bottom-level PTEs. 3195084Sjohnlev * NB1. Page-directory entries cannot be written. 3205084Sjohnlev * NB2. Guest must continue to remove all writable mappings of PTEs. 3215084Sjohnlev */ 3225084Sjohnlev #define VMASST_TYPE_writable_pagetables 2 3235084Sjohnlev 3245084Sjohnlev /* x86/PAE guests: support PDPTs above 4GB. */ 3255084Sjohnlev #define VMASST_TYPE_pae_extended_cr3 3 3265084Sjohnlev 3275084Sjohnlev #define MAX_VMASST_TYPE 3 3285084Sjohnlev 3295084Sjohnlev #ifndef __ASSEMBLY__ 3305084Sjohnlev 3315084Sjohnlev typedef uint16_t domid_t; 3325084Sjohnlev 3335084Sjohnlev /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ 3345084Sjohnlev #define DOMID_FIRST_RESERVED (0x7FF0U) 3355084Sjohnlev 3365084Sjohnlev /* DOMID_SELF is used in certain contexts to refer to oneself. */ 3375084Sjohnlev #define DOMID_SELF (0x7FF0U) 3385084Sjohnlev 3395084Sjohnlev /* 3405084Sjohnlev * DOMID_IO is used to restrict page-table updates to mapping I/O memory. 3415084Sjohnlev * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO 3425084Sjohnlev * is useful to ensure that no mappings to the OS's own heap are accidentally 3435084Sjohnlev * installed. (e.g., in Linux this could cause havoc as reference counts 3445084Sjohnlev * aren't adjusted on the I/O-mapping code path). 3455084Sjohnlev * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can 3465084Sjohnlev * be specified by any calling domain. 3475084Sjohnlev */ 3485084Sjohnlev #define DOMID_IO (0x7FF1U) 3495084Sjohnlev 3505084Sjohnlev /* 3515084Sjohnlev * DOMID_XEN is used to allow privileged domains to map restricted parts of 3525084Sjohnlev * Xen's heap space (e.g., the machine_to_phys table). 3535084Sjohnlev * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if 3545084Sjohnlev * the caller is privileged. 3555084Sjohnlev */ 3565084Sjohnlev #define DOMID_XEN (0x7FF2U) 3575084Sjohnlev 3585084Sjohnlev /* 3595084Sjohnlev * Send an array of these to HYPERVISOR_mmu_update(). 3605084Sjohnlev * NB. The fields are natural pointer/address size for this architecture. 3615084Sjohnlev */ 3625084Sjohnlev struct mmu_update { 3635084Sjohnlev uint64_t ptr; /* Machine address of PTE. */ 3645084Sjohnlev uint64_t val; /* New contents of PTE. */ 3655084Sjohnlev }; 3665084Sjohnlev typedef struct mmu_update mmu_update_t; 3675084Sjohnlev DEFINE_XEN_GUEST_HANDLE(mmu_update_t); 3685084Sjohnlev 3695084Sjohnlev /* 3705084Sjohnlev * Send an array of these to HYPERVISOR_multicall(). 3715084Sjohnlev * NB. The fields are natural register size for this architecture. 3725084Sjohnlev */ 3735084Sjohnlev struct multicall_entry { 3745084Sjohnlev unsigned long op, result; 3755084Sjohnlev unsigned long args[6]; 3765084Sjohnlev }; 3775084Sjohnlev typedef struct multicall_entry multicall_entry_t; 3785084Sjohnlev DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); 3795084Sjohnlev 3805084Sjohnlev /* 3815084Sjohnlev * Event channel endpoints per domain: 3825084Sjohnlev * 1024 if a long is 32 bits; 4096 if a long is 64 bits. 3835084Sjohnlev */ 3845084Sjohnlev #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) 3855084Sjohnlev 3865084Sjohnlev struct vcpu_time_info { 3875084Sjohnlev /* 3885084Sjohnlev * Updates to the following values are preceded and followed by an 3895084Sjohnlev * increment of 'version'. The guest can therefore detect updates by 3905084Sjohnlev * looking for changes to 'version'. If the least-significant bit of 3915084Sjohnlev * the version number is set then an update is in progress and the guest 3925084Sjohnlev * must wait to read a consistent set of values. 3935084Sjohnlev * The correct way to interact with the version number is similar to 3945084Sjohnlev * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. 3955084Sjohnlev */ 3965084Sjohnlev uint32_t version; 3975084Sjohnlev uint32_t pad0; 3985084Sjohnlev uint64_t tsc_timestamp; /* TSC at last update of time vals. */ 3995084Sjohnlev uint64_t system_time; /* Time, in nanosecs, since boot. */ 4005084Sjohnlev /* 4015084Sjohnlev * Current system time: 4025084Sjohnlev * system_time + 4035084Sjohnlev * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) 4045084Sjohnlev * CPU frequency (Hz): 4055084Sjohnlev * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift 4065084Sjohnlev */ 4075084Sjohnlev uint32_t tsc_to_system_mul; 4085084Sjohnlev int8_t tsc_shift; 4095084Sjohnlev int8_t pad1[3]; 4105084Sjohnlev }; /* 32 bytes */ 4115084Sjohnlev typedef struct vcpu_time_info vcpu_time_info_t; 4125084Sjohnlev 4135084Sjohnlev struct vcpu_info { 4145084Sjohnlev /* 4155084Sjohnlev * 'evtchn_upcall_pending' is written non-zero by Xen to indicate 4165084Sjohnlev * a pending notification for a particular VCPU. It is then cleared 4175084Sjohnlev * by the guest OS /before/ checking for pending work, thus avoiding 4185084Sjohnlev * a set-and-check race. Note that the mask is only accessed by Xen 4195084Sjohnlev * on the CPU that is currently hosting the VCPU. This means that the 4205084Sjohnlev * pending and mask flags can be updated by the guest without special 4215084Sjohnlev * synchronisation (i.e., no need for the x86 LOCK prefix). 4225084Sjohnlev * This may seem suboptimal because if the pending flag is set by 4235084Sjohnlev * a different CPU then an IPI may be scheduled even when the mask 4245084Sjohnlev * is set. However, note: 4255084Sjohnlev * 1. The task of 'interrupt holdoff' is covered by the per-event- 4265084Sjohnlev * channel mask bits. A 'noisy' event that is continually being 4275084Sjohnlev * triggered can be masked at source at this very precise 4285084Sjohnlev * granularity. 4295084Sjohnlev * 2. The main purpose of the per-VCPU mask is therefore to restrict 4305084Sjohnlev * reentrant execution: whether for concurrency control, or to 4315084Sjohnlev * prevent unbounded stack usage. Whatever the purpose, we expect 4325084Sjohnlev * that the mask will be asserted only for short periods at a time, 4335084Sjohnlev * and so the likelihood of a 'spurious' IPI is suitably small. 4345084Sjohnlev * The mask is read before making an event upcall to the guest: a 4355084Sjohnlev * non-zero mask therefore guarantees that the VCPU will not receive 4365084Sjohnlev * an upcall activation. The mask is cleared when the VCPU requests 4375084Sjohnlev * to block: this avoids wakeup-waiting races. 4385084Sjohnlev */ 4395084Sjohnlev uint8_t evtchn_upcall_pending; 4405084Sjohnlev uint8_t evtchn_upcall_mask; 4415084Sjohnlev unsigned long evtchn_pending_sel; 4425084Sjohnlev struct arch_vcpu_info arch; 4435084Sjohnlev struct vcpu_time_info time; 4445084Sjohnlev }; /* 64 bytes (x86) */ 4456144Srab #ifndef __XEN__ 4465084Sjohnlev typedef struct vcpu_info vcpu_info_t; 4476144Srab #endif 4485084Sjohnlev 4495084Sjohnlev /* 4505084Sjohnlev * Xen/kernel shared data -- pointer provided in start_info. 4515084Sjohnlev * 4525084Sjohnlev * This structure is defined to be both smaller than a page, and the 4535084Sjohnlev * only data on the shared page, but may vary in actual size even within 4545084Sjohnlev * compatible Xen versions; guests should not rely on the size 4555084Sjohnlev * of this structure remaining constant. 4565084Sjohnlev */ 4575084Sjohnlev struct shared_info { 4585084Sjohnlev struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; 4595084Sjohnlev 4605084Sjohnlev /* 4615084Sjohnlev * A domain can create "event channels" on which it can send and receive 4625084Sjohnlev * asynchronous event notifications. There are three classes of event that 4635084Sjohnlev * are delivered by this mechanism: 4645084Sjohnlev * 1. Bi-directional inter- and intra-domain connections. Domains must 4655084Sjohnlev * arrange out-of-band to set up a connection (usually by allocating 4665084Sjohnlev * an unbound 'listener' port and avertising that via a storage service 4675084Sjohnlev * such as xenstore). 4685084Sjohnlev * 2. Physical interrupts. A domain with suitable hardware-access 4695084Sjohnlev * privileges can bind an event-channel port to a physical interrupt 4705084Sjohnlev * source. 4715084Sjohnlev * 3. Virtual interrupts ('events'). A domain can bind an event-channel 4725084Sjohnlev * port to a virtual interrupt source, such as the virtual-timer 4735084Sjohnlev * device or the emergency console. 4745084Sjohnlev * 4755084Sjohnlev * Event channels are addressed by a "port index". Each channel is 4765084Sjohnlev * associated with two bits of information: 4775084Sjohnlev * 1. PENDING -- notifies the domain that there is a pending notification 4785084Sjohnlev * to be processed. This bit is cleared by the guest. 4795084Sjohnlev * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING 4805084Sjohnlev * will cause an asynchronous upcall to be scheduled. This bit is only 4815084Sjohnlev * updated by the guest. It is read-only within Xen. If a channel 4825084Sjohnlev * becomes pending while the channel is masked then the 'edge' is lost 4835084Sjohnlev * (i.e., when the channel is unmasked, the guest must manually handle 4845084Sjohnlev * pending notifications as no upcall will be scheduled by Xen). 4855084Sjohnlev * 4865084Sjohnlev * To expedite scanning of pending notifications, any 0->1 pending 4875084Sjohnlev * transition on an unmasked channel causes a corresponding bit in a 4885084Sjohnlev * per-vcpu selector word to be set. Each bit in the selector covers a 4895084Sjohnlev * 'C long' in the PENDING bitfield array. 4905084Sjohnlev */ 4915084Sjohnlev unsigned long evtchn_pending[sizeof(unsigned long) * 8]; 4925084Sjohnlev unsigned long evtchn_mask[sizeof(unsigned long) * 8]; 4935084Sjohnlev 4945084Sjohnlev /* 4955084Sjohnlev * Wallclock time: updated only by control software. Guests should base 4965084Sjohnlev * their gettimeofday() syscall on this wallclock-base value. 4975084Sjohnlev */ 4985084Sjohnlev uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ 4995084Sjohnlev uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ 5005084Sjohnlev uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ 5015084Sjohnlev 5025084Sjohnlev struct arch_shared_info arch; 5035084Sjohnlev 5045084Sjohnlev }; 5056144Srab #ifndef __XEN__ 5065084Sjohnlev typedef struct shared_info shared_info_t; 5076144Srab #endif 5085084Sjohnlev 5095084Sjohnlev /* 5106144Srab * Start-of-day memory layout: 5115084Sjohnlev * 1. The domain is started within contiguous virtual-memory region. 5126144Srab * 2. The contiguous region ends on an aligned 4MB boundary. 5136144Srab * 3. This the order of bootstrap elements in the initial virtual region: 5145084Sjohnlev * a. relocated kernel image 5155084Sjohnlev * b. initial ram disk [mod_start, mod_len] 5165084Sjohnlev * c. list of allocated page frames [mfn_list, nr_pages] 5175084Sjohnlev * d. start_info_t structure [register ESI (x86)] 5185084Sjohnlev * e. bootstrap page tables [pt_base, CR3 (x86)] 5195084Sjohnlev * f. bootstrap stack [register ESP (x86)] 5206144Srab * 4. Bootstrap elements are packed together, but each is 4kB-aligned. 5216144Srab * 5. The initial ram disk may be omitted. 5226144Srab * 6. The list of page frames forms a contiguous 'pseudo-physical' memory 5235084Sjohnlev * layout for the domain. In particular, the bootstrap virtual-memory 5245084Sjohnlev * region is a 1:1 mapping to the first section of the pseudo-physical map. 5256144Srab * 7. All bootstrap elements are mapped read-writable for the guest OS. The 5265084Sjohnlev * only exception is the bootstrap page table, which is mapped read-only. 5276144Srab * 8. There is guaranteed to be at least 512kB padding after the final 5285084Sjohnlev * bootstrap element. If necessary, the bootstrap virtual region is 5295084Sjohnlev * extended by an extra 4MB to ensure this. 5305084Sjohnlev */ 5315084Sjohnlev 5325084Sjohnlev #define MAX_GUEST_CMDLINE 1024 5335084Sjohnlev struct start_info { 5345084Sjohnlev /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ 5355084Sjohnlev char magic[32]; /* "xen-<version>-<platform>". */ 5365084Sjohnlev unsigned long nr_pages; /* Total pages allocated to this domain. */ 5375084Sjohnlev unsigned long shared_info; /* MACHINE address of shared info struct. */ 5385084Sjohnlev uint32_t flags; /* SIF_xxx flags. */ 5395084Sjohnlev xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ 5405084Sjohnlev uint32_t store_evtchn; /* Event channel for store communication. */ 5415084Sjohnlev union { 5425084Sjohnlev struct { 5435084Sjohnlev xen_pfn_t mfn; /* MACHINE page number of console page. */ 5445084Sjohnlev uint32_t evtchn; /* Event channel for console page. */ 5455084Sjohnlev } domU; 5465084Sjohnlev struct { 5475084Sjohnlev uint32_t info_off; /* Offset of console_info struct. */ 5485084Sjohnlev uint32_t info_size; /* Size of console_info struct from start.*/ 5495084Sjohnlev } dom0; 5505084Sjohnlev } console; 5515084Sjohnlev /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ 5525084Sjohnlev unsigned long pt_base; /* VIRTUAL address of page directory. */ 5535084Sjohnlev unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ 5545084Sjohnlev unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ 5555084Sjohnlev unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ 5565084Sjohnlev unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ 5575084Sjohnlev int8_t cmd_line[MAX_GUEST_CMDLINE]; 5585084Sjohnlev }; 5595084Sjohnlev typedef struct start_info start_info_t; 5605084Sjohnlev 5615084Sjohnlev /* New console union for dom0 introduced in 0x00030203. */ 5625084Sjohnlev #if __XEN_INTERFACE_VERSION__ < 0x00030203 5635084Sjohnlev #define console_mfn console.domU.mfn 5645084Sjohnlev #define console_evtchn console.domU.evtchn 5655084Sjohnlev #endif 5665084Sjohnlev 5675084Sjohnlev /* These flags are passed in the 'flags' field of start_info_t. */ 5685084Sjohnlev #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ 5695084Sjohnlev #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ 570*10175SStuart.Maybee@Sun.COM #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ 5715084Sjohnlev 5725084Sjohnlev #define XEN_CONSOLE_INVALID -1 5735084Sjohnlev #define XEN_CONSOLE_COM1 0 5745084Sjohnlev #define XEN_CONSOLE_COM2 1 5755084Sjohnlev #define XEN_CONSOLE_VGA 2 5765084Sjohnlev 5775084Sjohnlev typedef struct dom0_vga_console_info { 5785084Sjohnlev uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ 5795084Sjohnlev #define XEN_VGATYPE_TEXT_MODE_3 0x03 5805084Sjohnlev #define XEN_VGATYPE_VESA_LFB 0x23 5815084Sjohnlev 5825084Sjohnlev union { 5835084Sjohnlev struct { 5845084Sjohnlev /* Font height, in pixels. */ 5855084Sjohnlev uint16_t font_height; 5865084Sjohnlev /* Cursor location (column, row). */ 5875084Sjohnlev uint16_t cursor_x, cursor_y; 5885084Sjohnlev /* Number of rows and columns (dimensions in characters). */ 5895084Sjohnlev uint16_t rows, columns; 5905084Sjohnlev } text_mode_3; 5915084Sjohnlev 5925084Sjohnlev struct { 5935084Sjohnlev /* Width and height, in pixels. */ 5945084Sjohnlev uint16_t width, height; 5955084Sjohnlev /* Bytes per scan line. */ 5965084Sjohnlev uint16_t bytes_per_line; 5975084Sjohnlev /* Bits per pixel. */ 5985084Sjohnlev uint16_t bits_per_pixel; 5995084Sjohnlev /* LFB physical address, and size (in units of 64kB). */ 6005084Sjohnlev uint32_t lfb_base; 6015084Sjohnlev uint32_t lfb_size; 6025084Sjohnlev /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ 6035084Sjohnlev uint8_t red_pos, red_size; 6045084Sjohnlev uint8_t green_pos, green_size; 6055084Sjohnlev uint8_t blue_pos, blue_size; 6065084Sjohnlev uint8_t rsvd_pos, rsvd_size; 607*10175SStuart.Maybee@Sun.COM #if __XEN_INTERFACE_VERSION__ >= 0x00030206 608*10175SStuart.Maybee@Sun.COM /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ 609*10175SStuart.Maybee@Sun.COM uint32_t gbl_caps; 610*10175SStuart.Maybee@Sun.COM /* Mode attributes (offset 0x0, VESA command 0x4f01). */ 611*10175SStuart.Maybee@Sun.COM uint16_t mode_attrs; 612*10175SStuart.Maybee@Sun.COM #endif 6135084Sjohnlev } vesa_lfb; 6145084Sjohnlev } u; 6155084Sjohnlev } dom0_vga_console_info_t; 6166144Srab #define xen_vga_console_info dom0_vga_console_info 6176144Srab #define xen_vga_console_info_t dom0_vga_console_info_t 6185084Sjohnlev 6195084Sjohnlev typedef uint8_t xen_domain_handle_t[16]; 6205084Sjohnlev 6215084Sjohnlev /* Turn a plain number into a C unsigned long constant. */ 6225084Sjohnlev #define __mk_unsigned_long(x) x ## UL 6235084Sjohnlev #define mk_unsigned_long(x) __mk_unsigned_long(x) 6245084Sjohnlev 625*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); 626*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); 627*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); 628*10175SStuart.Maybee@Sun.COM __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); 6295084Sjohnlev 6305084Sjohnlev #else /* __ASSEMBLY__ */ 6315084Sjohnlev 6325084Sjohnlev /* In assembly code we cannot use C numeric constant suffixes. */ 6335084Sjohnlev #define mk_unsigned_long(x) x 6345084Sjohnlev 6355084Sjohnlev #endif /* !__ASSEMBLY__ */ 6365084Sjohnlev 6376144Srab /* Default definitions for macros used by domctl/sysctl. */ 6386144Srab #if defined(__XEN__) || defined(__XEN_TOOLS__) 6396144Srab #ifndef uint64_aligned_t 6406144Srab #define uint64_aligned_t uint64_t 6416144Srab #endif 6426144Srab #ifndef XEN_GUEST_HANDLE_64 6436144Srab #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) 6446144Srab #endif 6456144Srab #endif 6466144Srab 6475084Sjohnlev #endif /* __XEN_PUBLIC_XEN_H__ */ 6485084Sjohnlev 6495084Sjohnlev /* 6505084Sjohnlev * Local variables: 6515084Sjohnlev * mode: C 6525084Sjohnlev * c-set-style: "BSD" 6535084Sjohnlev * c-basic-offset: 4 6545084Sjohnlev * tab-width: 4 6555084Sjohnlev * indent-tabs-mode: nil 6565084Sjohnlev * End: 6575084Sjohnlev */ 658