1 /* $OpenBSD: vmmvar.h,v 1.19 2016/09/04 08:49:18 mlarkin Exp $ */ 2 /* 3 * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 /* 19 * CPU capabilities for VMM operation 20 */ 21 #ifndef _MACHINE_VMMVAR_H_ 22 #define _MACHINE_VMMVAR_H_ 23 24 #define VMM_HV_SIGNATURE "OpenBSDVMM58" 25 26 #define VMM_MAX_MEM_RANGES 16 27 #define VMM_MAX_DISKS_PER_VM 2 28 #define VMM_MAX_PATH_DISK 128 29 #define VMM_MAX_NAME_LEN 32 30 #define VMM_MAX_KERNEL_PATH 128 31 #define VMM_MAX_VCPUS_PER_VM 64 32 #define VMM_MAX_VM_MEM_SIZE (512 * 1024) 33 #define VMM_MAX_NICS_PER_VM 2 34 35 #define VMM_PCI_MMIO_BAR_BASE 0xF0000000 36 #define VMM_PCI_MMIO_BAR_END 0xF0FFFFFF 37 #define VMM_PCI_MMIO_BAR_SIZE 0x00010000 38 #define VMM_PCI_IO_BAR_BASE 0x1000 39 #define VMM_PCI_IO_BAR_END 0xFFFF 40 #define VMM_PCI_IO_BAR_SIZE 0x1000 41 42 /* VMX: Basic Exit Reasons */ 43 #define VMX_EXIT_NMI 0 44 #define VMX_EXIT_EXTINT 1 45 #define VMX_EXIT_TRIPLE_FAULT 2 46 #define VMX_EXIT_INIT 3 47 #define VMX_EXIT_SIPI 4 48 #define VMX_EXIT_IO_SMI 5 49 #define VMX_EXIT_OTHER_SMI 6 50 #define VMX_EXIT_INT_WINDOW 7 51 #define VMX_EXIT_NMI_WINDOW 8 52 #define VMX_EXIT_TASK_SWITCH 9 53 #define VMX_EXIT_CPUID 10 54 #define VMX_EXIT_GETSEC 11 55 #define VMX_EXIT_HLT 12 56 #define VMX_EXIT_INVD 13 57 #define VMX_EXIT_INVLPG 14 58 #define VMX_EXIT_RDPMC 15 59 #define VMX_EXIT_RDTSC 16 60 #define VMX_EXIT_RSM 17 61 #define VMX_EXIT_VMCALL 18 62 #define VMX_EXIT_VMCLEAR 19 63 #define VMX_EXIT_VMLAUNCH 20 64 #define VMX_EXIT_VMPTRLD 21 65 #define VMX_EXIT_VMPTRST 22 66 #define VMX_EXIT_VMREAD 23 67 #define VMX_EXIT_VMRESUME 24 68 #define VMX_EXIT_VMWRITE 25 69 #define VMX_EXIT_VMXOFF 26 70 #define VMX_EXIT_VMXON 27 71 #define VMX_EXIT_CR_ACCESS 28 72 #define VMX_EXIT_MOV_DR 29 73 #define VMX_EXIT_IO 30 74 #define VMX_EXIT_RDMSR 31 75 #define VMX_EXIT_WRMSR 32 76 #define VMX_EXIT_ENTRY_FAILED_GUEST_STATE 33 77 #define VMX_EXIT_ENTRY_FAILED_MSR_LOAD 34 78 #define VMX_EXIT_MWAIT 36 79 #define VMX_EXIT_MTF 37 80 #define VMX_EXIT_MONITOR 39 81 #define VMX_EXIT_PAUSE 40 82 #define VMX_EXIT_ENTRY_FAILED_MCE 41 83 #define VMX_EXIT_TPR_BELOW_THRESHOLD 43 84 #define VMX_EXIT_APIC_ACCESS 44 85 #define VMX_EXIT_VIRTUALIZED_EOI 45 86 #define VMX_EXIT_GDTR_IDTR 46 87 #define VMX_EXIT_LDTR_TR 47 88 #define VMX_EXIT_EPT_VIOLATION 48 89 #define VMX_EXIT_EPT_MISCONFIGURATION 49 90 #define VMX_EXIT_INVEPT 50 91 #define VMX_EXIT_RDTSCP 51 92 #define VMX_EXIT_VMX_PREEMPTION_TIMER_EXPIRED 52 93 #define VMX_EXIT_INVVPID 53 94 #define VMX_EXIT_WBINVD 54 95 #define VMX_EXIT_XSETBV 55 96 #define VMX_EXIT_APIC_WRITE 56 97 #define VMX_EXIT_RDRAND 57 98 #define VMX_EXIT_INVPCID 58 99 #define VMX_EXIT_VMFUNC 59 100 #define VMX_EXIT_RDSEED 61 101 #define VMX_EXIT_XSAVES 63 102 #define VMX_EXIT_XRSTORS 64 103 104 #define VM_EXIT_TERMINATED 0xFFFE 105 #define VM_EXIT_NONE 0xFFFF 106 107 /* 108 * VCPU state values. Note that there is a conversion function in vmm.c 109 * (vcpu_state_decode) that converts these to human readable strings, 110 * so this enum and vcpu_state_decode should be kept in sync. 111 */ 112 enum { 113 VCPU_STATE_STOPPED, 114 VCPU_STATE_RUNNING, 115 VCPU_STATE_REQTERM, 116 VCPU_STATE_TERMINATED, 117 VCPU_STATE_UNKNOWN, 118 }; 119 120 enum { 121 VEI_DIR_OUT, 122 VEI_DIR_IN 123 }; 124 125 /* 126 * vm exit data 127 * vm_exit_inout : describes an IN/OUT exit 128 */ 129 struct vm_exit_inout { 130 uint8_t vei_size; /* Size of access */ 131 uint8_t vei_dir; /* Direction */ 132 uint8_t vei_rep; /* REP prefix? */ 133 uint8_t vei_string; /* string variety? */ 134 uint8_t vei_encoding; /* operand encoding */ 135 uint16_t vei_port; /* port */ 136 uint32_t vei_data; /* data (for IN insns) */ 137 }; 138 139 union vm_exit { 140 struct vm_exit_inout vei; /* IN/OUT exit */ 141 }; 142 143 /* 144 * struct vcpu_segment_info describes a segment + selector set, used 145 * in constructing the initial vcpu register content 146 */ 147 struct vcpu_segment_info { 148 uint16_t vsi_sel; 149 uint32_t vsi_limit; 150 uint32_t vsi_ar; 151 uint64_t vsi_base; 152 }; 153 154 #define VCPU_REGS_RAX 0 155 #define VCPU_REGS_RBX 1 156 #define VCPU_REGS_RCX 2 157 #define VCPU_REGS_RDX 3 158 #define VCPU_REGS_RSI 4 159 #define VCPU_REGS_RDI 5 160 #define VCPU_REGS_R8 6 161 #define VCPU_REGS_R9 7 162 #define VCPU_REGS_R10 8 163 #define VCPU_REGS_R11 9 164 #define VCPU_REGS_R12 10 165 #define VCPU_REGS_R13 11 166 #define VCPU_REGS_R14 12 167 #define VCPU_REGS_R15 13 168 #define VCPU_REGS_RSP 14 169 #define VCPU_REGS_RBP 15 170 #define VCPU_REGS_RIP 16 171 #define VCPU_REGS_RFLAGS 17 172 #define VCPU_REGS_NGPRS (VCPU_REGS_RFLAGS + 1) 173 174 #define VCPU_REGS_CR0 0 175 #define VCPU_REGS_CR2 1 176 #define VCPU_REGS_CR3 2 177 #define VCPU_REGS_CR4 3 178 #define VCPU_REGS_CR8 4 179 #define VCPU_REGS_NCRS (VCPU_REGS_CR8 + 1) 180 181 #define VCPU_REGS_CS 0 182 #define VCPU_REGS_DS 1 183 #define VCPU_REGS_ES 2 184 #define VCPU_REGS_FS 3 185 #define VCPU_REGS_GS 4 186 #define VCPU_REGS_SS 5 187 #define VCPU_REGS_LDTR 6 188 #define VCPU_REGS_TR 7 189 #define VCPU_REGS_NSREGS (VCPU_REGS_TR + 1) 190 191 struct vcpu_reg_state { 192 uint64_t vrs_gprs[VCPU_REGS_NGPRS]; 193 uint64_t vrs_crs[VCPU_REGS_NCRS]; 194 struct vcpu_segment_info vrs_sregs[VCPU_REGS_NSREGS]; 195 struct vcpu_segment_info vrs_gdtr; 196 struct vcpu_segment_info vrs_idtr; 197 }; 198 199 struct vm_mem_range { 200 paddr_t vmr_gpa; 201 vaddr_t vmr_va; 202 size_t vmr_size; 203 }; 204 205 struct vm_create_params { 206 /* Input parameters to VMM_IOC_CREATE */ 207 size_t vcp_nmemranges; 208 size_t vcp_ncpus; 209 size_t vcp_ndisks; 210 size_t vcp_nnics; 211 struct vm_mem_range vcp_memranges[VMM_MAX_MEM_RANGES]; 212 char vcp_disks[VMM_MAX_DISKS_PER_VM][VMM_MAX_PATH_DISK]; 213 char vcp_name[VMM_MAX_NAME_LEN]; 214 char vcp_kernel[VMM_MAX_KERNEL_PATH]; 215 uint8_t vcp_macs[VMM_MAX_NICS_PER_VM][6]; 216 217 /* Output parameter from VMM_IOC_CREATE */ 218 uint32_t vcp_id; 219 }; 220 221 struct vm_run_params { 222 /* Input parameters to VMM_IOC_RUN */ 223 uint32_t vrp_vm_id; 224 uint32_t vrp_vcpu_id; 225 uint8_t vrp_continue; /* Continuing from an exit */ 226 uint16_t vrp_irq; /* IRQ to inject */ 227 228 /* Input/output parameter to VMM_IOC_RUN */ 229 union vm_exit *vrp_exit; /* updated exit data */ 230 231 /* Output parameter from VMM_IOC_RUN */ 232 uint16_t vrp_exit_reason; /* exit reason */ 233 uint8_t vrp_irqready; /* ready for IRQ on entry */ 234 }; 235 236 struct vm_info_result { 237 /* Output parameters from VMM_IOC_INFO */ 238 size_t vir_memory_size; 239 size_t vir_used_size; 240 size_t vir_ncpus; 241 uint8_t vir_vcpu_state[VMM_MAX_VCPUS_PER_VM]; 242 pid_t vir_creator_pid; 243 uint32_t vir_id; 244 char vir_name[VMM_MAX_NAME_LEN]; 245 }; 246 247 struct vm_info_params { 248 /* Input parameters to VMM_IOC_INFO */ 249 size_t vip_size; /* Output buffer size */ 250 251 /* Output Parameters from VMM_IOC_INFO */ 252 size_t vip_info_ct; /* # of entries returned */ 253 struct vm_info_result *vip_info; /* Output buffer */ 254 }; 255 256 struct vm_terminate_params { 257 /* Input parameters to VMM_IOC_TERM */ 258 uint32_t vtp_vm_id; 259 }; 260 261 struct vm_resetcpu_params { 262 /* Input parameters to VMM_IOC_RESETCPU */ 263 uint32_t vrp_vm_id; 264 uint32_t vrp_vcpu_id; 265 struct vcpu_reg_state vrp_init_state; 266 }; 267 268 struct vm_intr_params { 269 /* Input parameters to VMM_IOC_INTR */ 270 uint32_t vip_vm_id; 271 uint32_t vip_vcpu_id; 272 uint16_t vip_intr; 273 }; 274 275 #define VM_RWREGS_GPRS 0x1 /* read/write GPRs */ 276 #define VM_RWREGS_SREGS 0x2 /* read/write segment registers */ 277 #define VM_RWREGS_CRS 0x4 /* read/write CRs */ 278 #define VM_RWREGS_ALL (VM_RWREGS_GPRS | VM_RWREGS_SREGS | VM_RWREGS_CRS) 279 280 struct vm_rwregs_params { 281 uint32_t vrwp_vm_id; 282 uint32_t vrwp_vcpu_id; 283 uint64_t vrwp_mask; 284 struct vcpu_reg_state vrwp_regs; 285 }; 286 287 /* IOCTL definitions */ 288 #define VMM_IOC_CREATE _IOWR('V', 1, struct vm_create_params) /* Create VM */ 289 #define VMM_IOC_RUN _IOWR('V', 2, struct vm_run_params) /* Run VCPU */ 290 #define VMM_IOC_INFO _IOWR('V', 3, struct vm_info_params) /* Get VM Info */ 291 #define VMM_IOC_TERM _IOW('V', 4, struct vm_terminate_params) /* Terminate VM */ 292 #define VMM_IOC_RESETCPU _IOW('V', 5, struct vm_resetcpu_params) /* Reset */ 293 #define VMM_IOC_INTR _IOW('V', 6, struct vm_intr_params) /* Intr pending */ 294 #define VMM_IOC_READREGS _IOWR('V', 7, struct vm_rwregs_params) /* Get registers */ 295 #define VMM_IOC_WRITEREGS _IOW('V', 8, struct vm_rwregs_params) /* Set registers */ 296 297 #ifdef _KERNEL 298 299 #define VMX_FAIL_LAUNCH_UNKNOWN 1 300 #define VMX_FAIL_LAUNCH_INVALID_VMCS 2 301 #define VMX_FAIL_LAUNCH_VALID_VMCS 3 302 303 #define VMX_NUM_MSR_STORE 7 304 305 /* MSR bitmap manipulation macros */ 306 #define MSRIDX(m) ((m) / 8) 307 #define MSRBIT(m) (1 << (m) % 8) 308 309 enum { 310 VMM_MODE_UNKNOWN, 311 VMM_MODE_VMX, 312 VMM_MODE_EPT, 313 VMM_MODE_SVM, 314 VMM_MODE_RVI 315 }; 316 317 enum { 318 VMM_MEM_TYPE_REGULAR, 319 VMM_MEM_TYPE_UNKNOWN 320 }; 321 322 /* Forward declarations */ 323 struct vm; 324 325 /* 326 * Implementation-specific cpu state 327 */ 328 struct vmcb { 329 }; 330 331 struct vmcs { 332 uint32_t vmcs_revision; 333 }; 334 335 struct vmx_invvpid_descriptor 336 { 337 uint64_t vid_vpid; // : 16; 338 uint64_t vid_addr; 339 }; 340 341 struct vmx_invept_descriptor 342 { 343 uint64_t vid_eptp; 344 uint64_t vid_reserved; 345 }; 346 347 struct vmx_msr_store 348 { 349 uint64_t vms_index : 32; 350 uint64_t vms_data; 351 }; 352 353 /* 354 * Storage for guest registers not preserved in VMCS and various exit 355 * information. 356 * 357 * Note that vmx_enter_guest depends on the layout of this struct for 358 * field access. 359 */ 360 struct vmx_gueststate 361 { 362 /* %rsi should be first */ 363 uint64_t vg_rsi; /* 0x00 */ 364 uint64_t vg_rax; /* 0x08 */ 365 uint64_t vg_rbx; /* 0x10 */ 366 uint64_t vg_rcx; /* 0x18 */ 367 uint64_t vg_rdx; /* 0x20 */ 368 uint64_t vg_rdi; /* 0x28 */ 369 uint64_t vg_rbp; /* 0x30 */ 370 uint64_t vg_r8; /* 0x38 */ 371 uint64_t vg_r9; /* 0x40 */ 372 uint64_t vg_r10; /* 0x48 */ 373 uint64_t vg_r11; /* 0x50 */ 374 uint64_t vg_r12; /* 0x58 */ 375 uint64_t vg_r13; /* 0x60 */ 376 uint64_t vg_r14; /* 0x68 */ 377 uint64_t vg_r15; /* 0x70 */ 378 uint64_t vg_cr2; /* 0x78 */ 379 uint64_t vg_rip; /* 0x80 */ 380 uint32_t vg_exit_reason; /* 0x88 */ 381 uint64_t vg_rflags; /* 0x90 */ 382 }; 383 384 /* 385 * Virtual Machine 386 */ 387 struct vm; 388 389 /* 390 * Virtual CPU 391 */ 392 struct vcpu { 393 /* VMCS / VMCB pointer */ 394 vaddr_t vc_control_va; 395 uint64_t vc_control_pa; 396 397 /* VLAPIC pointer */ 398 vaddr_t vc_vlapic_va; 399 uint64_t vc_vlapic_pa; 400 401 /* MSR bitmap address */ 402 vaddr_t vc_msr_bitmap_va; 403 uint64_t vc_msr_bitmap_pa; 404 405 struct vm *vc_parent; 406 uint32_t vc_id; 407 u_int vc_state; 408 SLIST_ENTRY(vcpu) vc_vcpu_link; 409 vaddr_t vc_hsa_stack_va; 410 411 uint8_t vc_virt_mode; 412 413 struct cpu_info *vc_last_pcpu; 414 union vm_exit vc_exit; 415 416 uint16_t vc_intr; 417 uint8_t vc_irqready; 418 419 /* VMX only */ 420 uint64_t vc_vmx_basic; 421 uint64_t vc_vmx_entry_ctls; 422 uint64_t vc_vmx_true_entry_ctls; 423 uint64_t vc_vmx_exit_ctls; 424 uint64_t vc_vmx_true_exit_ctls; 425 uint64_t vc_vmx_pinbased_ctls; 426 uint64_t vc_vmx_true_pinbased_ctls; 427 uint64_t vc_vmx_procbased_ctls; 428 uint64_t vc_vmx_true_procbased_ctls; 429 uint64_t vc_vmx_procbased2_ctls; 430 struct vmx_gueststate vc_gueststate; 431 vaddr_t vc_vmx_msr_exit_save_va; 432 paddr_t vc_vmx_msr_exit_save_pa; 433 vaddr_t vc_vmx_msr_exit_load_va; 434 paddr_t vc_vmx_msr_exit_load_pa; 435 vaddr_t vc_vmx_msr_entry_load_va; 436 paddr_t vc_vmx_msr_entry_load_pa; 437 }; 438 439 SLIST_HEAD(vcpu_head, vcpu); 440 441 void vmm_dispatch_intr(vaddr_t); 442 int vmxon(uint64_t *); 443 int vmxoff(void); 444 int vmclear(uint64_t *); 445 int vmptrld(uint64_t *); 446 int vmptrst(uint64_t *); 447 int vmwrite(uint64_t, uint64_t); 448 int vmread(uint64_t, uint64_t *); 449 void invvpid(uint64_t, struct vmx_invvpid_descriptor *); 450 void invept(uint64_t, struct vmx_invept_descriptor *); 451 int vmx_enter_guest(uint64_t *, struct vmx_gueststate *, int); 452 void start_vmm_on_cpu(struct cpu_info *); 453 void stop_vmm_on_cpu(struct cpu_info *); 454 455 #endif /* _KERNEL */ 456 457 #endif /* ! _MACHINE_VMMVAR_H_ */ 458