1*a215bf01Scherry /****************************************************************************** 2*a215bf01Scherry * vcpu.h 3*a215bf01Scherry * 4*a215bf01Scherry * VCPU initialisation, query, and hotplug. 5*a215bf01Scherry * 6*a215bf01Scherry * Permission is hereby granted, free of charge, to any person obtaining a copy 7*a215bf01Scherry * of this software and associated documentation files (the "Software"), to 8*a215bf01Scherry * deal in the Software without restriction, including without limitation the 9*a215bf01Scherry * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10*a215bf01Scherry * sell copies of the Software, and to permit persons to whom the Software is 11*a215bf01Scherry * furnished to do so, subject to the following conditions: 12*a215bf01Scherry * 13*a215bf01Scherry * The above copyright notice and this permission notice shall be included in 14*a215bf01Scherry * all copies or substantial portions of the Software. 15*a215bf01Scherry * 16*a215bf01Scherry * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17*a215bf01Scherry * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18*a215bf01Scherry * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19*a215bf01Scherry * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20*a215bf01Scherry * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21*a215bf01Scherry * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22*a215bf01Scherry * DEALINGS IN THE SOFTWARE. 23*a215bf01Scherry * 24*a215bf01Scherry * Copyright (c) 2005, Keir Fraser <keir@xensource.com> 25*a215bf01Scherry */ 26*a215bf01Scherry 27*a215bf01Scherry #ifndef __XEN_PUBLIC_VCPU_H__ 28*a215bf01Scherry #define __XEN_PUBLIC_VCPU_H__ 29*a215bf01Scherry 30*a215bf01Scherry #include "xen.h" 31*a215bf01Scherry 32*a215bf01Scherry /* 33*a215bf01Scherry * Prototype for this hypercall is: 34*a215bf01Scherry * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args) 35*a215bf01Scherry * @cmd == VCPUOP_??? (VCPU operation). 36*a215bf01Scherry * @vcpuid == VCPU to operate on. 37*a215bf01Scherry * @extra_args == Operation-specific extra arguments (NULL if none). 38*a215bf01Scherry */ 39*a215bf01Scherry 40*a215bf01Scherry /* 41*a215bf01Scherry * Initialise a VCPU. Each VCPU can be initialised only once. A 42*a215bf01Scherry * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. 43*a215bf01Scherry * 44*a215bf01Scherry * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context 45*a215bf01Scherry * structure containing the initial state for the VCPU. For x86 46*a215bf01Scherry * HVM based guests this is a pointer to a vcpu_hvm_context 47*a215bf01Scherry * structure. 48*a215bf01Scherry */ 49*a215bf01Scherry #define VCPUOP_initialise 0 50*a215bf01Scherry 51*a215bf01Scherry /* 52*a215bf01Scherry * Bring up a VCPU. This makes the VCPU runnable. This operation will fail 53*a215bf01Scherry * if the VCPU has not been initialised (VCPUOP_initialise). 54*a215bf01Scherry */ 55*a215bf01Scherry #define VCPUOP_up 1 56*a215bf01Scherry 57*a215bf01Scherry /* 58*a215bf01Scherry * Bring down a VCPU (i.e., make it non-runnable). 59*a215bf01Scherry * There are a few caveats that callers should observe: 60*a215bf01Scherry * 1. This operation may return, and VCPU_is_up may return false, before the 61*a215bf01Scherry * VCPU stops running (i.e., the command is asynchronous). It is a good 62*a215bf01Scherry * idea to ensure that the VCPU has entered a non-critical loop before 63*a215bf01Scherry * bringing it down. Alternatively, this operation is guaranteed 64*a215bf01Scherry * synchronous if invoked by the VCPU itself. 65*a215bf01Scherry * 2. After a VCPU is initialised, there is currently no way to drop all its 66*a215bf01Scherry * references to domain memory. Even a VCPU that is down still holds 67*a215bf01Scherry * memory references via its pagetable base pointer and GDT. It is good 68*a215bf01Scherry * practise to move a VCPU onto an 'idle' or default page table, LDT and 69*a215bf01Scherry * GDT before bringing it down. 70*a215bf01Scherry */ 71*a215bf01Scherry #define VCPUOP_down 2 72*a215bf01Scherry 73*a215bf01Scherry /* Returns 1 if the given VCPU is up. */ 74*a215bf01Scherry #define VCPUOP_is_up 3 75*a215bf01Scherry 76*a215bf01Scherry /* 77*a215bf01Scherry * Return information about the state and running time of a VCPU. 78*a215bf01Scherry * @extra_arg == pointer to vcpu_runstate_info structure. 79*a215bf01Scherry */ 80*a215bf01Scherry #define VCPUOP_get_runstate_info 4 81*a215bf01Scherry struct vcpu_runstate_info { 82*a215bf01Scherry /* VCPU's current state (RUNSTATE_*). */ 83*a215bf01Scherry int state; 84*a215bf01Scherry /* When was current state entered (system time, ns)? */ 85*a215bf01Scherry uint64_t state_entry_time; 86*a215bf01Scherry /* 87*a215bf01Scherry * Update indicator set in state_entry_time: 88*a215bf01Scherry * When activated via VMASST_TYPE_runstate_update_flag, set during 89*a215bf01Scherry * updates in guest memory mapped copy of vcpu_runstate_info. 90*a215bf01Scherry */ 91*a215bf01Scherry #define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63) 92*a215bf01Scherry /* 93*a215bf01Scherry * Time spent in each RUNSTATE_* (ns). The sum of these times is 94*a215bf01Scherry * guaranteed not to drift from system time. 95*a215bf01Scherry */ 96*a215bf01Scherry uint64_t time[4]; 97*a215bf01Scherry }; 98*a215bf01Scherry typedef struct vcpu_runstate_info vcpu_runstate_info_t; 99*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); 100*a215bf01Scherry 101*a215bf01Scherry /* VCPU is currently running on a physical CPU. */ 102*a215bf01Scherry #define RUNSTATE_running 0 103*a215bf01Scherry 104*a215bf01Scherry /* VCPU is runnable, but not currently scheduled on any physical CPU. */ 105*a215bf01Scherry #define RUNSTATE_runnable 1 106*a215bf01Scherry 107*a215bf01Scherry /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ 108*a215bf01Scherry #define RUNSTATE_blocked 2 109*a215bf01Scherry 110*a215bf01Scherry /* 111*a215bf01Scherry * VCPU is not runnable, but it is not blocked. 112*a215bf01Scherry * This is a 'catch all' state for things like hotplug and pauses by the 113*a215bf01Scherry * system administrator (or for critical sections in the hypervisor). 114*a215bf01Scherry * RUNSTATE_blocked dominates this state (it is the preferred state). 115*a215bf01Scherry */ 116*a215bf01Scherry #define RUNSTATE_offline 3 117*a215bf01Scherry 118*a215bf01Scherry /* 119*a215bf01Scherry * Register a shared memory area from which the guest may obtain its own 120*a215bf01Scherry * runstate information without needing to execute a hypercall. 121*a215bf01Scherry * Notes: 122*a215bf01Scherry * 1. The registered address may be virtual or physical or guest handle, 123*a215bf01Scherry * depending on the platform. Virtual address or guest handle should be 124*a215bf01Scherry * registered on x86 systems. 125*a215bf01Scherry * 2. Only one shared area may be registered per VCPU. The shared area is 126*a215bf01Scherry * updated by the hypervisor each time the VCPU is scheduled. Thus 127*a215bf01Scherry * runstate.state will always be RUNSTATE_running and 128*a215bf01Scherry * runstate.state_entry_time will indicate the system time at which the 129*a215bf01Scherry * VCPU was last scheduled to run. 130*a215bf01Scherry * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. 131*a215bf01Scherry */ 132*a215bf01Scherry #define VCPUOP_register_runstate_memory_area 5 133*a215bf01Scherry struct vcpu_register_runstate_memory_area { 134*a215bf01Scherry union { 135*a215bf01Scherry XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; 136*a215bf01Scherry struct vcpu_runstate_info *v; 137*a215bf01Scherry uint64_t p; 138*a215bf01Scherry } addr; 139*a215bf01Scherry }; 140*a215bf01Scherry typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; 141*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); 142*a215bf01Scherry 143*a215bf01Scherry /* 144*a215bf01Scherry * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer 145*a215bf01Scherry * which can be set via these commands. Periods smaller than one millisecond 146*a215bf01Scherry * may not be supported. 147*a215bf01Scherry */ 148*a215bf01Scherry #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ 149*a215bf01Scherry #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ 150*a215bf01Scherry struct vcpu_set_periodic_timer { 151*a215bf01Scherry uint64_t period_ns; 152*a215bf01Scherry }; 153*a215bf01Scherry typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; 154*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); 155*a215bf01Scherry 156*a215bf01Scherry /* 157*a215bf01Scherry * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot 158*a215bf01Scherry * timer which can be set via these commands. 159*a215bf01Scherry */ 160*a215bf01Scherry #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ 161*a215bf01Scherry #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ 162*a215bf01Scherry struct vcpu_set_singleshot_timer { 163*a215bf01Scherry uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ 164*a215bf01Scherry uint32_t flags; /* VCPU_SSHOTTMR_??? */ 165*a215bf01Scherry }; 166*a215bf01Scherry typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; 167*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); 168*a215bf01Scherry 169*a215bf01Scherry /* Flags to VCPUOP_set_singleshot_timer. */ 170*a215bf01Scherry /* Require the timeout to be in the future (return -ETIME if it's passed). */ 171*a215bf01Scherry #define _VCPU_SSHOTTMR_future (0) 172*a215bf01Scherry #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) 173*a215bf01Scherry 174*a215bf01Scherry /* 175*a215bf01Scherry * Register a memory location in the guest address space for the 176*a215bf01Scherry * vcpu_info structure. This allows the guest to place the vcpu_info 177*a215bf01Scherry * structure in a convenient place, such as in a per-cpu data area. 178*a215bf01Scherry * The pointer need not be page aligned, but the structure must not 179*a215bf01Scherry * cross a page boundary. 180*a215bf01Scherry * 181*a215bf01Scherry * This may be called only once per vcpu. 182*a215bf01Scherry */ 183*a215bf01Scherry #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ 184*a215bf01Scherry struct vcpu_register_vcpu_info { 185*a215bf01Scherry uint64_t mfn; /* mfn of page to place vcpu_info */ 186*a215bf01Scherry uint32_t offset; /* offset within page */ 187*a215bf01Scherry uint32_t rsvd; /* unused */ 188*a215bf01Scherry }; 189*a215bf01Scherry typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; 190*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); 191*a215bf01Scherry 192*a215bf01Scherry /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ 193*a215bf01Scherry #define VCPUOP_send_nmi 11 194*a215bf01Scherry 195*a215bf01Scherry /* 196*a215bf01Scherry * Get the physical ID information for a pinned vcpu's underlying physical 197*a215bf01Scherry * processor. The physical ID informmation is architecture-specific. 198*a215bf01Scherry * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. 199*a215bf01Scherry * This command returns -EINVAL if it is not a valid operation for this VCPU. 200*a215bf01Scherry */ 201*a215bf01Scherry #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ 202*a215bf01Scherry struct vcpu_get_physid { 203*a215bf01Scherry uint64_t phys_id; 204*a215bf01Scherry }; 205*a215bf01Scherry typedef struct vcpu_get_physid vcpu_get_physid_t; 206*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); 207*a215bf01Scherry #define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) 208*a215bf01Scherry #define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) 209*a215bf01Scherry 210*a215bf01Scherry /* 211*a215bf01Scherry * Register a memory location to get a secondary copy of the vcpu time 212*a215bf01Scherry * parameters. The master copy still exists as part of the vcpu shared 213*a215bf01Scherry * memory area, and this secondary copy is updated whenever the master copy 214*a215bf01Scherry * is updated (and using the same versioning scheme for synchronisation). 215*a215bf01Scherry * 216*a215bf01Scherry * The intent is that this copy may be mapped (RO) into userspace so 217*a215bf01Scherry * that usermode can compute system time using the time info and the 218*a215bf01Scherry * tsc. Usermode will see an array of vcpu_time_info structures, one 219*a215bf01Scherry * for each vcpu, and choose the right one by an existing mechanism 220*a215bf01Scherry * which allows it to get the current vcpu number (such as via a 221*a215bf01Scherry * segment limit). It can then apply the normal algorithm to compute 222*a215bf01Scherry * system time from the tsc. 223*a215bf01Scherry * 224*a215bf01Scherry * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. 225*a215bf01Scherry */ 226*a215bf01Scherry #define VCPUOP_register_vcpu_time_memory_area 13 227*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); 228*a215bf01Scherry struct vcpu_register_time_memory_area { 229*a215bf01Scherry union { 230*a215bf01Scherry XEN_GUEST_HANDLE(vcpu_time_info_t) h; 231*a215bf01Scherry struct vcpu_time_info *v; 232*a215bf01Scherry uint64_t p; 233*a215bf01Scherry } addr; 234*a215bf01Scherry }; 235*a215bf01Scherry typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; 236*a215bf01Scherry DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); 237*a215bf01Scherry 238*a215bf01Scherry #endif /* __XEN_PUBLIC_VCPU_H__ */ 239*a215bf01Scherry 240*a215bf01Scherry /* 241*a215bf01Scherry * Local variables: 242*a215bf01Scherry * mode: C 243*a215bf01Scherry * c-file-style: "BSD" 244*a215bf01Scherry * c-basic-offset: 4 245*a215bf01Scherry * tab-width: 4 246*a215bf01Scherry * indent-tabs-mode: nil 247*a215bf01Scherry * End: 248*a215bf01Scherry */ 249