xref: /openbsd-src/sys/arch/amd64/include/vmmvar.h (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: vmmvar.h,v 1.71 2021/04/05 18:26:46 dv Exp $	*/
2 /*
3  * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * CPU capabilities for VMM operation
20  */
21 #ifndef _MACHINE_VMMVAR_H_
22 #define _MACHINE_VMMVAR_H_
23 
24 #define VMM_HV_SIGNATURE 	"OpenBSDVMM58"
25 
26 #define VMM_MAX_MEM_RANGES	16
27 #define VMM_MAX_DISKS_PER_VM	4
28 #define VMM_MAX_PATH_DISK	128
29 #define VMM_MAX_PATH_CDROM	128
30 #define VMM_MAX_NAME_LEN	64
31 #define VMM_MAX_KERNEL_PATH	128
32 #define VMM_MAX_VCPUS_PER_VM	64
33 #define VMM_MAX_VM_MEM_SIZE	32768
34 #define VMM_MAX_NICS_PER_VM	4
35 
36 #define VMM_PCI_MMIO_BAR_BASE	0xF0000000ULL
37 #define VMM_PCI_MMIO_BAR_END	0xFFFFFFFFULL
38 #define VMM_PCI_MMIO_BAR_SIZE	0x00010000
39 #define VMM_PCI_IO_BAR_BASE	0x1000
40 #define VMM_PCI_IO_BAR_END	0xFFFF
41 #define VMM_PCI_IO_BAR_SIZE	0x1000
42 
43 /* VMX: Basic Exit Reasons */
44 #define VMX_EXIT_NMI				0
45 #define VMX_EXIT_EXTINT				1
46 #define VMX_EXIT_TRIPLE_FAULT			2
47 #define VMX_EXIT_INIT				3
48 #define VMX_EXIT_SIPI				4
49 #define VMX_EXIT_IO_SMI				5
50 #define VMX_EXIT_OTHER_SMI			6
51 #define VMX_EXIT_INT_WINDOW			7
52 #define VMX_EXIT_NMI_WINDOW			8
53 #define VMX_EXIT_TASK_SWITCH			9
54 #define VMX_EXIT_CPUID				10
55 #define VMX_EXIT_GETSEC				11
56 #define VMX_EXIT_HLT				12
57 #define VMX_EXIT_INVD				13
58 #define VMX_EXIT_INVLPG				14
59 #define VMX_EXIT_RDPMC				15
60 #define VMX_EXIT_RDTSC				16
61 #define VMX_EXIT_RSM				17
62 #define VMX_EXIT_VMCALL				18
63 #define VMX_EXIT_VMCLEAR			19
64 #define VMX_EXIT_VMLAUNCH			20
65 #define VMX_EXIT_VMPTRLD			21
66 #define VMX_EXIT_VMPTRST			22
67 #define VMX_EXIT_VMREAD				23
68 #define VMX_EXIT_VMRESUME			24
69 #define VMX_EXIT_VMWRITE			25
70 #define VMX_EXIT_VMXOFF				26
71 #define VMX_EXIT_VMXON				27
72 #define VMX_EXIT_CR_ACCESS			28
73 #define VMX_EXIT_MOV_DR				29
74 #define VMX_EXIT_IO				30
75 #define VMX_EXIT_RDMSR				31
76 #define VMX_EXIT_WRMSR				32
77 #define VMX_EXIT_ENTRY_FAILED_GUEST_STATE	33
78 #define VMX_EXIT_ENTRY_FAILED_MSR_LOAD		34
79 #define VMX_EXIT_MWAIT				36
80 #define VMX_EXIT_MTF				37
81 #define VMX_EXIT_MONITOR			39
82 #define VMX_EXIT_PAUSE				40
83 #define VMX_EXIT_ENTRY_FAILED_MCE		41
84 #define VMX_EXIT_TPR_BELOW_THRESHOLD		43
85 #define VMX_EXIT_APIC_ACCESS			44
86 #define VMX_EXIT_VIRTUALIZED_EOI		45
87 #define VMX_EXIT_GDTR_IDTR			46
88 #define	VMX_EXIT_LDTR_TR			47
89 #define VMX_EXIT_EPT_VIOLATION			48
90 #define VMX_EXIT_EPT_MISCONFIGURATION		49
91 #define VMX_EXIT_INVEPT				50
92 #define VMX_EXIT_RDTSCP				51
93 #define VMX_EXIT_VMX_PREEMPTION_TIMER_EXPIRED	52
94 #define VMX_EXIT_INVVPID			53
95 #define VMX_EXIT_WBINVD				54
96 #define VMX_EXIT_XSETBV				55
97 #define VMX_EXIT_APIC_WRITE			56
98 #define VMX_EXIT_RDRAND				57
99 #define VMX_EXIT_INVPCID			58
100 #define VMX_EXIT_VMFUNC				59
101 #define VMX_EXIT_RDSEED				61
102 #define VMX_EXIT_XSAVES				63
103 #define VMX_EXIT_XRSTORS			64
104 
105 /*
106  * VMX: Misc defines
107  */
108 #define VMX_MAX_CR3_TARGETS			256
109 
110 #define VM_EXIT_TERMINATED			0xFFFE
111 #define VM_EXIT_NONE				0xFFFF
112 
113 /*
114  * SVM: Intercept codes (exit reasons)
115  */
116 #define SVM_VMEXIT_CR0_READ			0x00
117 #define SVM_VMEXIT_CR1_READ			0x01
118 #define SVM_VMEXIT_CR2_READ			0x02
119 #define SVM_VMEXIT_CR3_READ			0x03
120 #define SVM_VMEXIT_CR4_READ			0x04
121 #define SVM_VMEXIT_CR5_READ			0x05
122 #define SVM_VMEXIT_CR6_READ			0x06
123 #define SVM_VMEXIT_CR7_READ			0x07
124 #define SVM_VMEXIT_CR8_READ			0x08
125 #define SVM_VMEXIT_CR9_READ			0x09
126 #define SVM_VMEXIT_CR10_READ			0x0A
127 #define SVM_VMEXIT_CR11_READ			0x0B
128 #define SVM_VMEXIT_CR12_READ			0x0C
129 #define SVM_VMEXIT_CR13_READ			0x0D
130 #define SVM_VMEXIT_CR14_READ			0x0E
131 #define SVM_VMEXIT_CR15_READ			0x0F
132 #define SVM_VMEXIT_CR0_WRITE			0x10
133 #define SVM_VMEXIT_CR1_WRITE			0x11
134 #define SVM_VMEXIT_CR2_WRITE			0x12
135 #define SVM_VMEXIT_CR3_WRITE			0x13
136 #define SVM_VMEXIT_CR4_WRITE			0x14
137 #define SVM_VMEXIT_CR5_WRITE			0x15
138 #define SVM_VMEXIT_CR6_WRITE			0x16
139 #define SVM_VMEXIT_CR7_WRITE			0x17
140 #define SVM_VMEXIT_CR8_WRITE			0x18
141 #define SVM_VMEXIT_CR9_WRITE			0x19
142 #define SVM_VMEXIT_CR10_WRITE			0x1A
143 #define SVM_VMEXIT_CR11_WRITE			0x1B
144 #define SVM_VMEXIT_CR12_WRITE			0x1C
145 #define SVM_VMEXIT_CR13_WRITE			0x1D
146 #define SVM_VMEXIT_CR14_WRITE			0x1E
147 #define SVM_VMEXIT_CR15_WRITE			0x1F
148 #define SVM_VMEXIT_DR0_READ			0x20
149 #define SVM_VMEXIT_DR1_READ			0x21
150 #define SVM_VMEXIT_DR2_READ			0x22
151 #define SVM_VMEXIT_DR3_READ			0x23
152 #define SVM_VMEXIT_DR4_READ			0x24
153 #define SVM_VMEXIT_DR5_READ			0x25
154 #define SVM_VMEXIT_DR6_READ			0x26
155 #define SVM_VMEXIT_DR7_READ			0x27
156 #define SVM_VMEXIT_DR8_READ			0x28
157 #define SVM_VMEXIT_DR9_READ			0x29
158 #define SVM_VMEXIT_DR10_READ			0x2A
159 #define SVM_VMEXIT_DR11_READ			0x2B
160 #define SVM_VMEXIT_DR12_READ			0x2C
161 #define SVM_VMEXIT_DR13_READ			0x2D
162 #define SVM_VMEXIT_DR14_READ			0x2E
163 #define SVM_VMEXIT_DR15_READ			0x2F
164 #define SVM_VMEXIT_DR0_WRITE			0x30
165 #define SVM_VMEXIT_DR1_WRITE			0x31
166 #define SVM_VMEXIT_DR2_WRITE			0x32
167 #define SVM_VMEXIT_DR3_WRITE			0x33
168 #define SVM_VMEXIT_DR4_WRITE			0x34
169 #define SVM_VMEXIT_DR5_WRITE			0x35
170 #define SVM_VMEXIT_DR6_WRITE			0x36
171 #define SVM_VMEXIT_DR7_WRITE			0x37
172 #define SVM_VMEXIT_DR8_WRITE			0x38
173 #define SVM_VMEXIT_DR9_WRITE			0x39
174 #define SVM_VMEXIT_DR10_WRITE			0x3A
175 #define SVM_VMEXIT_DR11_WRITE			0x3B
176 #define SVM_VMEXIT_DR12_WRITE			0x3C
177 #define SVM_VMEXIT_DR13_WRITE			0x3D
178 #define SVM_VMEXIT_DR14_WRITE			0x3E
179 #define SVM_VMEXIT_DR15_WRITE			0x3F
180 #define SVM_VMEXIT_EXCP0			0x40
181 #define SVM_VMEXIT_EXCP1			0x41
182 #define SVM_VMEXIT_EXCP2			0x42
183 #define SVM_VMEXIT_EXCP3			0x43
184 #define SVM_VMEXIT_EXCP4			0x44
185 #define SVM_VMEXIT_EXCP5			0x45
186 #define SVM_VMEXIT_EXCP6			0x46
187 #define SVM_VMEXIT_EXCP7			0x47
188 #define SVM_VMEXIT_EXCP8			0x48
189 #define SVM_VMEXIT_EXCP9			0x49
190 #define SVM_VMEXIT_EXCP10			0x4A
191 #define SVM_VMEXIT_EXCP11			0x4B
192 #define SVM_VMEXIT_EXCP12			0x4C
193 #define SVM_VMEXIT_EXCP13			0x4D
194 #define SVM_VMEXIT_EXCP14			0x4E
195 #define SVM_VMEXIT_EXCP15			0x4F
196 #define SVM_VMEXIT_EXCP16			0x50
197 #define SVM_VMEXIT_EXCP17			0x51
198 #define SVM_VMEXIT_EXCP18			0x52
199 #define SVM_VMEXIT_EXCP19			0x53
200 #define SVM_VMEXIT_EXCP20			0x54
201 #define SVM_VMEXIT_EXCP21			0x55
202 #define SVM_VMEXIT_EXCP22			0x56
203 #define SVM_VMEXIT_EXCP23			0x57
204 #define SVM_VMEXIT_EXCP24			0x58
205 #define SVM_VMEXIT_EXCP25			0x59
206 #define SVM_VMEXIT_EXCP26			0x5A
207 #define SVM_VMEXIT_EXCP27			0x5B
208 #define SVM_VMEXIT_EXCP28			0x5C
209 #define SVM_VMEXIT_EXCP29			0x5D
210 #define SVM_VMEXIT_EXCP30			0x5E
211 #define SVM_VMEXIT_EXCP31			0x5F
212 #define SVM_VMEXIT_INTR				0x60
213 #define SVM_VMEXIT_NMI				0x61
214 #define SVM_VMEXIT_SMI				0x62
215 #define SVM_VMEXIT_INIT				0x63
216 #define SVM_VMEXIT_VINTR			0x64
217 #define SVM_VMEXIT_CR0_SEL_WRITE		0x65
218 #define SVM_VMEXIT_IDTR_READ			0x66
219 #define SVM_VMEXIT_GDTR_READ			0x67
220 #define SVM_VMEXIT_LDTR_READ			0x68
221 #define SVM_VMEXIT_TR_READ			0x69
222 #define SVM_VMEXIT_IDTR_WRITE			0x6A
223 #define SVM_VMEXIT_GDTR_WRITE			0x6B
224 #define SVM_VMEXIT_LDTR_WRITE			0x6C
225 #define SVM_VMEXIT_TR_WRITE			0x6D
226 #define SVM_VMEXIT_RDTSC			0x6E
227 #define SVM_VMEXIT_RDPMC			0x6F
228 #define SVM_VMEXIT_PUSHF			0x70
229 #define SVM_VMEXIT_POPF				0x71
230 #define SVM_VMEXIT_CPUID			0x72
231 #define SVM_VMEXIT_RSM				0x73
232 #define SVM_VMEXIT_IRET				0x74
233 #define SVM_VMEXIT_SWINT			0x75
234 #define SVM_VMEXIT_INVD				0x76
235 #define SVM_VMEXIT_PAUSE			0x77
236 #define SVM_VMEXIT_HLT				0x78
237 #define SVM_VMEXIT_INVLPG			0x79
238 #define SVM_VMEXIT_INVLPGA			0x7A
239 #define SVM_VMEXIT_IOIO				0x7B
240 #define SVM_VMEXIT_MSR				0x7C
241 #define SVM_VMEXIT_TASK_SWITCH			0x7D
242 #define SVM_VMEXIT_FERR_FREEZE			0x7E
243 #define SVM_VMEXIT_SHUTDOWN			0x7F
244 #define SVM_VMEXIT_VMRUN			0x80
245 #define SVM_VMEXIT_VMMCALL			0x81
246 #define SVM_VMEXIT_VMLOAD			0x82
247 #define SVM_VMEXIT_VMSAVE			0x83
248 #define SVM_VMEXIT_STGI				0x84
249 #define SVM_VMEXIT_CLGI				0x85
250 #define SVM_VMEXIT_SKINIT			0x86
251 #define SVM_VMEXIT_RDTSCP			0x87
252 #define SVM_VMEXIT_ICEBP			0x88
253 #define SVM_VMEXIT_WBINVD			0x89
254 #define SVM_VMEXIT_MONITOR			0x8A
255 #define SVM_VMEXIT_MWAIT			0x8B
256 #define SVM_VMEXIT_MWAIT_CONDITIONAL		0x8C
257 #define SVM_VMEXIT_XSETBV			0x8D
258 #define SVM_VMEXIT_EFER_WRITE_TRAP		0x8F
259 #define SVM_VMEXIT_CR0_WRITE_TRAP		0x90
260 #define SVM_VMEXIT_CR1_WRITE_TRAP		0x91
261 #define SVM_VMEXIT_CR2_WRITE_TRAP		0x92
262 #define SVM_VMEXIT_CR3_WRITE_TRAP		0x93
263 #define SVM_VMEXIT_CR4_WRITE_TRAP		0x94
264 #define SVM_VMEXIT_CR5_WRITE_TRAP		0x95
265 #define SVM_VMEXIT_CR6_WRITE_TRAP		0x96
266 #define SVM_VMEXIT_CR7_WRITE_TRAP		0x97
267 #define SVM_VMEXIT_CR8_WRITE_TRAP		0x98
268 #define SVM_VMEXIT_CR9_WRITE_TRAP		0x99
269 #define SVM_VMEXIT_CR10_WRITE_TRAP		0x9A
270 #define SVM_VMEXIT_CR11_WRITE_TRAP		0x9B
271 #define SVM_VMEXIT_CR12_WRITE_TRAP		0x9C
272 #define SVM_VMEXIT_CR13_WRITE_TRAP		0x9D
273 #define SVM_VMEXIT_CR14_WRITE_TRAP		0x9E
274 #define SVM_VMEXIT_CR15_WRITE_TRAP		0x9F
275 #define SVM_VMEXIT_NPF				0x400
276 #define SVM_AVIC_INCOMPLETE_IPI			0x401
277 #define SVM_AVIC_NOACCEL			0x402
278 #define SVM_VMEXIT_VMGEXIT			0x403
279 #define SVM_VMEXIT_INVALID			-1
280 
281 /*
282  * Exception injection vectors (these correspond to the CPU exception types
283  * defined in the SDM.)
284  */
285 #define VMM_EX_DE	0	/* Divide Error #DE */
286 #define VMM_EX_DB	1	/* Debug Exception #DB */
287 #define VMM_EX_NMI	2	/* NMI */
288 #define VMM_EX_BP	3	/* Breakpoint #BP */
289 #define VMM_EX_OF	4	/* Overflow #OF */
290 #define VMM_EX_BR	5	/* Bound range exceeded #BR */
291 #define VMM_EX_UD	6	/* Undefined opcode #UD */
292 #define VMM_EX_NM	7	/* Device not available #NM */
293 #define VMM_EX_DF	8	/* Double fault #DF */
294 #define VMM_EX_CP	9	/* Coprocessor segment overrun (unused) */
295 #define VMM_EX_TS	10	/* Invalid TSS #TS */
296 #define VMM_EX_NP	11	/* Segment not present #NP */
297 #define VMM_EX_SS	12	/* Stack segment fault #SS */
298 #define VMM_EX_GP	13	/* General protection #GP */
299 #define VMM_EX_PF	14	/* Page fault #PF */
300 #define VMM_EX_MF	16	/* x87 FPU floating point error #MF */
301 #define VMM_EX_AC	17	/* Alignment check #AC */
302 #define VMM_EX_MC	18	/* Machine check #MC */
303 #define VMM_EX_XM	19	/* SIMD floating point exception #XM */
304 #define VMM_EX_VE	20	/* Virtualization exception #VE */
305 
306 /*
307  * VCPU state values. Note that there is a conversion function in vmm.c
308  * (vcpu_state_decode) that converts these to human readable strings,
309  * so this enum and vcpu_state_decode should be kept in sync.
310  */
311 enum {
312 	VCPU_STATE_STOPPED,
313 	VCPU_STATE_RUNNING,
314 	VCPU_STATE_REQTERM,
315 	VCPU_STATE_TERMINATED,
316 	VCPU_STATE_UNKNOWN,
317 };
318 
319 enum {
320 	VEI_DIR_OUT,
321 	VEI_DIR_IN
322 };
323 
324 enum {
325 	VEE_FAULT_PROTECT
326 };
327 
328 enum {
329 	VMM_CPU_MODE_REAL,
330 	VMM_CPU_MODE_PROT,
331 	VMM_CPU_MODE_PROT32,
332 	VMM_CPU_MODE_COMPAT,
333 	VMM_CPU_MODE_LONG,
334 	VMM_CPU_MODE_UNKNOWN,
335 };
336 
337 /*
338  * Port definitions not found elsewhere
339  */
340 #define PCKBC_AUX	0x61
341 #define ELCR0		0x4D0
342 #define ELCR1		0x4D1
343 
344 /*
345  * vm exit data
346  *  vm_exit_inout		: describes an IN/OUT exit
347  */
348 struct vm_exit_inout {
349 	uint8_t			vei_size;	/* Size of access */
350 	uint8_t			vei_dir;	/* Direction */
351 	uint8_t			vei_rep;	/* REP prefix? */
352 	uint8_t			vei_string;	/* string variety? */
353 	uint8_t			vei_encoding;	/* operand encoding */
354 	uint16_t		vei_port;	/* port */
355 	uint32_t		vei_data;	/* data */
356 };
357 /*
358  *  vm_exit_eptviolation	: describes an EPT VIOLATION exit
359  */
360 struct vm_exit_eptviolation {
361 	uint8_t		vee_fault_type;
362 };
363 
364 /*
365  * struct vcpu_segment_info
366  *
367  * Describes a segment + selector set, used in constructing the initial vcpu
368  * register content
369  */
370 struct vcpu_segment_info {
371 	uint16_t vsi_sel;
372 	uint32_t vsi_limit;
373 	uint32_t vsi_ar;
374 	uint64_t vsi_base;
375 };
376 
377 #define VCPU_REGS_RAX		0
378 #define VCPU_REGS_RBX		1
379 #define VCPU_REGS_RCX		2
380 #define VCPU_REGS_RDX		3
381 #define VCPU_REGS_RSI		4
382 #define VCPU_REGS_RDI		5
383 #define VCPU_REGS_R8		6
384 #define VCPU_REGS_R9		7
385 #define VCPU_REGS_R10		8
386 #define VCPU_REGS_R11		9
387 #define VCPU_REGS_R12		10
388 #define VCPU_REGS_R13		11
389 #define VCPU_REGS_R14		12
390 #define VCPU_REGS_R15		13
391 #define VCPU_REGS_RSP		14
392 #define VCPU_REGS_RBP		15
393 #define VCPU_REGS_RIP		16
394 #define VCPU_REGS_RFLAGS	17
395 #define VCPU_REGS_NGPRS		(VCPU_REGS_RFLAGS + 1)
396 
397 #define VCPU_REGS_CR0	0
398 #define VCPU_REGS_CR2	1
399 #define VCPU_REGS_CR3	2
400 #define VCPU_REGS_CR4	3
401 #define VCPU_REGS_CR8	4
402 #define VCPU_REGS_XCR0	5
403 #define VCPU_REGS_PDPTE0 6
404 #define VCPU_REGS_PDPTE1 7
405 #define VCPU_REGS_PDPTE2 8
406 #define VCPU_REGS_PDPTE3 9
407 #define VCPU_REGS_NCRS	(VCPU_REGS_PDPTE3 + 1)
408 
409 #define VCPU_REGS_CS		0
410 #define VCPU_REGS_DS		1
411 #define VCPU_REGS_ES		2
412 #define VCPU_REGS_FS		3
413 #define VCPU_REGS_GS		4
414 #define VCPU_REGS_SS		5
415 #define VCPU_REGS_LDTR		6
416 #define VCPU_REGS_TR		7
417 #define VCPU_REGS_NSREGS	(VCPU_REGS_TR + 1)
418 
419 #define VCPU_REGS_EFER   	0
420 #define VCPU_REGS_STAR   	1
421 #define VCPU_REGS_LSTAR  	2
422 #define VCPU_REGS_CSTAR  	3
423 #define VCPU_REGS_SFMASK 	4
424 #define VCPU_REGS_KGSBASE	5
425 #define VCPU_REGS_MISC_ENABLE	6
426 #define VCPU_REGS_NMSRS	(VCPU_REGS_MISC_ENABLE + 1)
427 
428 #define VCPU_REGS_DR0		0
429 #define VCPU_REGS_DR1		1
430 #define VCPU_REGS_DR2		2
431 #define VCPU_REGS_DR3		3
432 #define VCPU_REGS_DR6		4
433 #define VCPU_REGS_DR7		5
434 #define VCPU_REGS_NDRS	(VCPU_REGS_DR7 + 1)
435 
436 struct vcpu_reg_state {
437 	uint64_t			vrs_gprs[VCPU_REGS_NGPRS];
438 	uint64_t			vrs_crs[VCPU_REGS_NCRS];
439 	uint64_t			vrs_msrs[VCPU_REGS_NMSRS];
440 	uint64_t			vrs_drs[VCPU_REGS_NDRS];
441 	struct vcpu_segment_info	vrs_sregs[VCPU_REGS_NSREGS];
442 	struct vcpu_segment_info	vrs_gdtr;
443 	struct vcpu_segment_info	vrs_idtr;
444 };
445 
446 struct vm_mem_range {
447 	paddr_t	vmr_gpa;
448 	vaddr_t vmr_va;
449 	size_t	vmr_size;
450 };
451 
452 /*
453  * struct vm_exit
454  *
455  * Contains VM exit information communicated to vmd(8). This information is
456  * gathered by vmm(4) from the CPU on each exit that requires help from vmd.
457  */
458 struct vm_exit {
459 	union {
460 		struct vm_exit_inout		vei;	/* IN/OUT exit */
461 		struct vm_exit_eptviolation	vee;	/* EPT VIOLATION exit*/
462 	};
463 
464 	struct vcpu_reg_state		vrs;
465 	int				cpl;
466 };
467 
468 struct vm_create_params {
469 	/* Input parameters to VMM_IOC_CREATE */
470 	size_t			vcp_nmemranges;
471 	size_t			vcp_ncpus;
472 	size_t			vcp_ndisks;
473 	size_t			vcp_nnics;
474 	struct vm_mem_range	vcp_memranges[VMM_MAX_MEM_RANGES];
475 	char			vcp_disks[VMM_MAX_DISKS_PER_VM][VMM_MAX_PATH_DISK];
476 	char			vcp_cdrom[VMM_MAX_PATH_CDROM];
477 	char			vcp_name[VMM_MAX_NAME_LEN];
478 	char			vcp_kernel[VMM_MAX_KERNEL_PATH];
479 	uint8_t			vcp_macs[VMM_MAX_NICS_PER_VM][6];
480 
481 	/* Output parameter from VMM_IOC_CREATE */
482 	uint32_t	vcp_id;
483 };
484 
485 struct vm_run_params {
486 	/* Input parameters to VMM_IOC_RUN */
487 	uint32_t	vrp_vm_id;
488 	uint32_t	vrp_vcpu_id;
489 	uint8_t		vrp_continue;		/* Continuing from an exit */
490 	uint16_t	vrp_irq;		/* IRQ to inject */
491 
492 	/* Input/output parameter to VMM_IOC_RUN */
493 	struct vm_exit	*vrp_exit;		/* updated exit data */
494 
495 	/* Output parameter from VMM_IOC_RUN */
496 	uint16_t	vrp_exit_reason;	/* exit reason */
497 	uint8_t		vrp_irqready;		/* ready for IRQ on entry */
498 };
499 
500 struct vm_info_result {
501 	/* Output parameters from VMM_IOC_INFO */
502 	size_t		vir_memory_size;
503 	size_t		vir_used_size;
504 	size_t		vir_ncpus;
505 	uint8_t		vir_vcpu_state[VMM_MAX_VCPUS_PER_VM];
506 	pid_t		vir_creator_pid;
507 	uint32_t	vir_id;
508 	char		vir_name[VMM_MAX_NAME_LEN];
509 };
510 
511 struct vm_info_params {
512 	/* Input parameters to VMM_IOC_INFO */
513 	size_t			vip_size;	/* Output buffer size */
514 
515 	/* Output Parameters from VMM_IOC_INFO */
516 	size_t			 vip_info_ct;	/* # of entries returned */
517 	struct vm_info_result	*vip_info;	/* Output buffer */
518 };
519 
520 struct vm_terminate_params {
521 	/* Input parameters to VMM_IOC_TERM */
522 	uint32_t		vtp_vm_id;
523 };
524 
525 struct vm_resetcpu_params {
526 	/* Input parameters to VMM_IOC_RESETCPU */
527 	uint32_t		vrp_vm_id;
528 	uint32_t		vrp_vcpu_id;
529 	struct vcpu_reg_state	vrp_init_state;
530 };
531 
532 struct vm_intr_params {
533 	/* Input parameters to VMM_IOC_INTR */
534 	uint32_t		vip_vm_id;
535 	uint32_t		vip_vcpu_id;
536 	uint16_t		vip_intr;
537 };
538 
539 #define VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA 0x1	/* read/write pvclock gpa */
540 #define VM_RWVMPARAMS_PVCLOCK_VERSION	 0x2	/* read/write pvclock version */
541 #define VM_RWVMPARAMS_ALL	(VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA | \
542     VM_RWVMPARAMS_PVCLOCK_VERSION)
543 
544 struct vm_rwvmparams_params {
545 	/* Input parameters to VMM_IOC_READVMPARAMS/VMM_IOC_WRITEVMPARAMS */
546 	uint32_t		vpp_vm_id;
547 	uint32_t		vpp_vcpu_id;
548 	uint32_t		vpp_mask;
549 	paddr_t			vpp_pvclock_system_gpa;
550 	uint32_t		vpp_pvclock_version;
551 };
552 
553 #define VM_RWREGS_GPRS	0x1	/* read/write GPRs */
554 #define VM_RWREGS_SREGS	0x2	/* read/write segment registers */
555 #define VM_RWREGS_CRS	0x4	/* read/write CRs */
556 #define VM_RWREGS_MSRS	0x8	/* read/write MSRs */
557 #define VM_RWREGS_DRS	0x10	/* read/write DRs */
558 #define VM_RWREGS_ALL	(VM_RWREGS_GPRS | VM_RWREGS_SREGS | VM_RWREGS_CRS | \
559     VM_RWREGS_MSRS | VM_RWREGS_DRS)
560 
561 struct vm_rwregs_params {
562 	/*
563 	 * Input/output parameters to VMM_IOC_READREGS /
564 	 * VMM_IOC_WRITEREGS
565 	 */
566 	uint32_t		vrwp_vm_id;
567 	uint32_t		vrwp_vcpu_id;
568 	uint64_t		vrwp_mask;
569 	struct vcpu_reg_state	vrwp_regs;
570 };
571 
572 struct vm_mprotect_ept_params {
573 	/* Input parameters to VMM_IOC_MPROTECT_EPT */
574 	uint32_t		vmep_vm_id;
575 	uint32_t		vmep_vcpu_id;
576 	vaddr_t			vmep_sgpa;
577 	size_t			vmep_size;
578 	int			vmep_prot;
579 };
580 
581 /* IOCTL definitions */
582 #define VMM_IOC_CREATE _IOWR('V', 1, struct vm_create_params) /* Create VM */
583 #define VMM_IOC_RUN _IOWR('V', 2, struct vm_run_params) /* Run VCPU */
584 #define VMM_IOC_INFO _IOWR('V', 3, struct vm_info_params) /* Get VM Info */
585 #define VMM_IOC_TERM _IOW('V', 4, struct vm_terminate_params) /* Terminate VM */
586 #define VMM_IOC_RESETCPU _IOW('V', 5, struct vm_resetcpu_params) /* Reset */
587 #define VMM_IOC_INTR _IOW('V', 6, struct vm_intr_params) /* Intr pending */
588 #define VMM_IOC_READREGS _IOWR('V', 7, struct vm_rwregs_params) /* Get regs */
589 #define VMM_IOC_WRITEREGS _IOW('V', 8, struct vm_rwregs_params) /* Set regs */
590 /* Get VM params */
591 #define VMM_IOC_READVMPARAMS _IOWR('V', 9, struct vm_rwvmparams_params)
592 /* Set VM params */
593 #define VMM_IOC_WRITEVMPARAMS _IOW('V', 10, struct vm_rwvmparams_params)
594 /* Control the protection of ept pages*/
595 #define VMM_IOC_MPROTECT_EPT _IOW('V', 11, struct vm_mprotect_ept_params)
596 
597 /* CPUID masks */
598 /*
599  * clone host capabilities minus:
600  *  debug store (CPUIDECX_DTES64, CPUIDECX_DSCPL, CPUID_DS)
601  *  monitor/mwait (CPUIDECX_MWAIT, CPUIDECX_MWAITX)
602  *  vmx/svm (CPUIDECX_VMX, CPUIDECX_SVM)
603  *  smx (CPUIDECX_SMX)
604  *  speedstep (CPUIDECX_EST)
605  *  thermal (CPUIDECX_TM2, CPUID_ACPI, CPUID_TM)
606  *  context id (CPUIDECX_CNXTID)
607  *  machine check (CPUID_MCE, CPUID_MCA)
608  *  silicon debug (CPUIDECX_SDBG)
609  *  xTPR (CPUIDECX_XTPR)
610  *  perf/debug (CPUIDECX_PDCM)
611  *  pcid (CPUIDECX_PCID)
612  *  direct cache access (CPUIDECX_DCA)
613  *  x2APIC (CPUIDECX_X2APIC)
614  *  apic deadline (CPUIDECX_DEADLINE)
615  *  apic (CPUID_APIC)
616  *  psn (CPUID_PSN)
617  *  self snoop (CPUID_SS)
618  *  hyperthreading (CPUID_HTT)
619  *  pending break enabled (CPUID_PBE)
620  *  MTRR (CPUID_MTRR)
621  *  Speculative execution control features (AMD)
622  */
623 #define VMM_CPUIDECX_MASK ~(CPUIDECX_EST | CPUIDECX_TM2 | CPUIDECX_MWAIT | \
624     CPUIDECX_PDCM | CPUIDECX_VMX | CPUIDECX_DTES64 | \
625     CPUIDECX_DSCPL | CPUIDECX_SMX | CPUIDECX_CNXTID | \
626     CPUIDECX_SDBG | CPUIDECX_XTPR | CPUIDECX_PCID | \
627     CPUIDECX_DCA | CPUIDECX_X2APIC | CPUIDECX_DEADLINE)
628 #define VMM_ECPUIDECX_MASK ~(CPUIDECX_SVM | CPUIDECX_MWAITX)
629 #define VMM_CPUIDEDX_MASK ~(CPUID_ACPI | CPUID_TM | \
630     CPUID_HTT | CPUID_DS | CPUID_APIC | \
631     CPUID_PSN | CPUID_SS | CPUID_PBE | \
632     CPUID_MTRR | CPUID_MCE | CPUID_MCA)
633 #define VMM_AMDSPEC_EBX_MASK ~(CPUIDEBX_IBPB | CPUIDEBX_IBRS | \
634     CPUIDEBX_STIBP | CPUIDEBX_IBRS_ALWAYSON | CPUIDEBX_STIBP_ALWAYSON | \
635     CPUIDEBX_IBRS_PREF | CPUIDEBX_SSBD | CPUIDEBX_VIRT_SSBD | \
636     CPUIDEBX_SSBD_NOTREQ)
637 
638 /*
639  * SEFF flags - copy from host minus:
640  *  SGX (SEFF0EBX_SGX)
641  *  HLE (SEFF0EBX_HLE)
642  *  INVPCID (SEFF0EBX_INVPCID)
643  *  RTM (SEFF0EBX_RTM)
644  *  PQM (SEFF0EBX_PQM)
645  *  AVX512F (SEFF0EBX_AVX512F)
646  *  AVX512DQ (SEFF0EBX_AVX512DQ)
647  *  AVX512IFMA (SEFF0EBX_AVX512IFMA)
648  *  AVX512PF (SEFF0EBX_AVX512PF)
649  *  AVX512ER (SEFF0EBX_AVX512ER)
650  *  AVX512CD (SEFF0EBX_AVX512CD)
651  *  AVX512BW (SEFF0EBX_AVX512BW)
652  *  AVX512VL (SEFF0EBX_AVX512VL)
653  *  MPX (SEFF0EBX_MPX)
654  *  PCOMMIT (SEFF0EBX_PCOMMIT)
655  *  PT (SEFF0EBX_PT)
656  *  AVX512VBMI (SEFF0ECX_AVX512VBMI)
657  */
658 #define VMM_SEFF0EBX_MASK ~(SEFF0EBX_SGX | SEFF0EBX_HLE | SEFF0EBX_INVPCID | \
659     SEFF0EBX_RTM | SEFF0EBX_PQM | SEFF0EBX_MPX | \
660     SEFF0EBX_PCOMMIT | SEFF0EBX_PT | \
661     SEFF0EBX_AVX512F | SEFF0EBX_AVX512DQ | \
662     SEFF0EBX_AVX512IFMA | SEFF0EBX_AVX512PF | \
663     SEFF0EBX_AVX512ER | SEFF0EBX_AVX512CD | \
664     SEFF0EBX_AVX512BW | SEFF0EBX_AVX512VL)
665 #define VMM_SEFF0ECX_MASK ~(SEFF0ECX_AVX512VBMI)
666 
667 /* EDX mask contains the bits to include */
668 #define VMM_SEFF0EDX_MASK (SEFF0EDX_MD_CLEAR)
669 
670 /*
671  * Extended function flags - copy from host minus:
672  * 0x80000001  EDX:RDTSCP Support
673  */
674 #define VMM_FEAT_EFLAGS_MASK ~(CPUID_RDTSCP)
675 
676 /*
677  * CPUID[0x4] deterministic cache info
678  */
679 #define VMM_CPUID4_CACHE_TOPOLOGY_MASK	0x3FF
680 
681 #ifdef _KERNEL
682 
683 #define VMX_FAIL_LAUNCH_UNKNOWN 1
684 #define VMX_FAIL_LAUNCH_INVALID_VMCS 2
685 #define VMX_FAIL_LAUNCH_VALID_VMCS 3
686 
687 #define VMX_NUM_MSR_STORE 7
688 
689 /* MSR bitmap manipulation macros */
690 #define VMX_MSRIDX(m) ((m) / 8)
691 #define VMX_MSRBIT(m) (1 << (m) % 8)
692 
693 #define SVM_MSRIDX(m) ((m) / 4)
694 #define SVM_MSRBIT_R(m) (1 << (((m) % 4) * 2))
695 #define SVM_MSRBIT_W(m) (1 << (((m) % 4) * 2 + 1))
696 
697 enum {
698 	VMM_MODE_UNKNOWN,
699 	VMM_MODE_VMX,
700 	VMM_MODE_EPT,
701 	VMM_MODE_SVM,
702 	VMM_MODE_RVI
703 };
704 
705 enum {
706 	VMM_MEM_TYPE_REGULAR,
707 	VMM_MEM_TYPE_UNKNOWN
708 };
709 
710 /* Forward declarations */
711 struct vm;
712 
713 /*
714  * Implementation-specific cpu state
715  */
716 
717 struct vmcb_segment {
718 	uint16_t 			vs_sel;			/* 000h */
719 	uint16_t 			vs_attr;		/* 002h */
720 	uint32_t			vs_lim;			/* 004h */
721 	uint64_t			vs_base;		/* 008h */
722 };
723 
724 struct vmcb {
725 	union {
726 		struct {
727 			uint32_t	v_cr_rw;		/* 000h */
728 			uint32_t	v_dr_rw;		/* 004h */
729 			uint32_t	v_excp;			/* 008h */
730 			uint32_t	v_intercept1;		/* 00Ch */
731 			uint32_t	v_intercept2;		/* 010h */
732 			uint8_t		v_pad1[0x28];		/* 014h-03Bh */
733 			uint16_t	v_pause_thr;		/* 03Ch */
734 			uint16_t	v_pause_ct;		/* 03Eh */
735 			uint64_t	v_iopm_pa;		/* 040h */
736 			uint64_t	v_msrpm_pa;		/* 048h */
737 			uint64_t	v_tsc_offset;		/* 050h */
738 			uint32_t	v_asid;			/* 058h */
739 			uint8_t		v_tlb_control;		/* 05Ch */
740 			uint8_t		v_pad2[0x3];		/* 05Dh-05Fh */
741 			uint8_t		v_tpr;			/* 060h */
742 			uint8_t		v_irq;			/* 061h */
743 			uint8_t		v_intr_misc;		/* 062h */
744 			uint8_t		v_intr_masking;		/* 063h */
745 			uint8_t		v_intr_vector;		/* 064h */
746 			uint8_t		v_pad3[0x3];		/* 065h-067h */
747 			uint64_t	v_intr_shadow;		/* 068h */
748 			uint64_t	v_exitcode;		/* 070h */
749 			uint64_t	v_exitinfo1;		/* 078h */
750 			uint64_t	v_exitinfo2;		/* 080h */
751 			uint64_t	v_exitintinfo;		/* 088h */
752 			uint64_t	v_np_enable;		/* 090h */
753 			uint64_t	v_avic_apic_bar;	/* 098h */
754 			uint64_t	v_pad4;			/* 0A0h */
755 			uint64_t	v_eventinj;		/* 0A8h */
756 			uint64_t	v_n_cr3;		/* 0B0h */
757 			uint64_t	v_lbr_virt_enable;	/* 0B8h */
758 			uint64_t	v_vmcb_clean_bits;	/* 0C0h */
759 			uint64_t	v_nrip;			/* 0C8h */
760 			uint8_t		v_n_bytes_fetched;	/* 0D0h */
761 			uint8_t		v_guest_ins_bytes[0xf];	/* 0D1h-0DFh */
762 			uint64_t	v_avic_apic_back_page;	/* 0E0h */
763 			uint64_t	v_pad5;			/* 0E8h-0EFh */
764 			uint64_t	v_avic_logical_table;	/* 0F0h */
765 			uint64_t	v_avic_phys;		/* 0F8h */
766 
767 		};
768 		uint8_t vmcb_control[0x400];
769 	};
770 
771 	union {
772 		struct {
773 			/* Offsets here are relative to start of VMCB SSA */
774 			struct vmcb_segment	v_es;		/* 000h */
775 			struct vmcb_segment	v_cs;		/* 010h */
776 			struct vmcb_segment	v_ss;		/* 020h */
777 			struct vmcb_segment	v_ds;		/* 030h */
778 			struct vmcb_segment	v_fs;		/* 040h */
779 			struct vmcb_segment	v_gs;		/* 050h */
780 			struct vmcb_segment	v_gdtr;		/* 060h */
781 			struct vmcb_segment	v_ldtr;		/* 070h */
782 			struct vmcb_segment	v_idtr;		/* 080h */
783 			struct vmcb_segment	v_tr;		/* 090h */
784 			uint8_t 		v_pad6[0x2B];	/* 0A0h-0CAh */
785 			uint8_t			v_cpl;		/* 0CBh */
786 			uint32_t		v_pad7;		/* 0CCh-0CFh */
787 			uint64_t		v_efer;		/* 0D0h */
788 			uint8_t			v_pad8[0x70];	/* 0D8h-147h */
789 			uint64_t		v_cr4;		/* 148h */
790 			uint64_t		v_cr3;		/* 150h */
791 			uint64_t		v_cr0;		/* 158h */
792 			uint64_t		v_dr7;		/* 160h */
793 			uint64_t		v_dr6;		/* 168h */
794 			uint64_t		v_rflags;	/* 170h */
795 			uint64_t		v_rip;		/* 178h */
796 			uint64_t		v_pad9[0xB];	/* 180h-1D7h */
797 			uint64_t		v_rsp;		/* 1D8h */
798 			uint64_t		v_pad10[0x3];	/* 1E0h-1F7h */
799 			uint64_t		v_rax;		/* 1F8h */
800 			uint64_t		v_star;		/* 200h */
801 			uint64_t		v_lstar;	/* 208h */
802 			uint64_t		v_cstar;	/* 210h */
803 			uint64_t		v_sfmask;	/* 218h */
804 			uint64_t		v_kgsbase;	/* 220h */
805 			uint64_t		v_sysenter_cs;	/* 228h */
806 			uint64_t		v_sysenter_esp;	/* 230h */
807 			uint64_t		v_sysenter_eip;	/* 238h */
808 			uint64_t		v_cr2;		/* 240h */
809 			uint64_t		v_pad11[0x4];	/* 248h-267h */
810 			uint64_t		v_g_pat;	/* 268h */
811 			uint64_t		v_dbgctl;	/* 270h */
812 			uint64_t		v_br_from;	/* 278h */
813 			uint64_t		v_br_to;	/* 280h */
814 			uint64_t		v_lastexcpfrom;	/* 288h */
815 			uint64_t		v_lastexcpto;	/* 290h */
816 		};
817 
818 		uint8_t vmcb_layout[PAGE_SIZE - 0x400];
819 	};
820 };
821 
822 struct vmcs {
823 	uint32_t	vmcs_revision;
824 };
825 
826 struct vmx_invvpid_descriptor
827 {
828 	uint64_t	vid_vpid;
829 	uint64_t	vid_addr;
830 };
831 
832 struct vmx_invept_descriptor
833 {
834 	uint64_t	vid_eptp;
835 	uint64_t	vid_reserved;
836 };
837 
838 struct vmx_msr_store
839 {
840 	uint64_t	vms_index;
841 	uint64_t	vms_data;
842 };
843 
844 /*
845  * Storage for guest registers not preserved in VMCS and various exit
846  * information.
847  *
848  * Note that vmx/svm_enter_guest depend on the layout of this struct for
849  * field access.
850  */
851 struct vcpu_gueststate
852 {
853 	/* %rsi should be first */
854 	uint64_t	vg_rsi;			/* 0x00 */
855 	uint64_t	vg_rax;			/* 0x08 */
856 	uint64_t	vg_rbx;			/* 0x10 */
857 	uint64_t	vg_rcx;			/* 0x18 */
858 	uint64_t	vg_rdx;			/* 0x20 */
859 	uint64_t	vg_rdi;			/* 0x28 */
860 	uint64_t	vg_rbp;			/* 0x30 */
861 	uint64_t	vg_r8;			/* 0x38 */
862 	uint64_t	vg_r9;			/* 0x40 */
863 	uint64_t	vg_r10;			/* 0x48 */
864 	uint64_t	vg_r11;			/* 0x50 */
865 	uint64_t	vg_r12;			/* 0x58 */
866 	uint64_t	vg_r13;			/* 0x60 */
867 	uint64_t	vg_r14;			/* 0x68 */
868 	uint64_t	vg_r15;			/* 0x70 */
869 	uint64_t	vg_cr2;			/* 0x78 */
870 	uint64_t	vg_rip;			/* 0x80 */
871 	uint32_t	vg_exit_reason;		/* 0x88 */
872 	uint64_t	vg_rflags;		/* 0x90 */
873 	uint64_t	vg_xcr0;		/* 0x98 */
874 	/*
875 	 * Debug registers
876 	 * - %dr4/%dr5 are aliased to %dr6/%dr7 (or cause #DE)
877 	 * - %dr7 is saved automatically in the VMCS
878 	 */
879 	uint64_t	vg_dr0;			/* 0xa0 */
880 	uint64_t	vg_dr1;			/* 0xa8 */
881 	uint64_t	vg_dr2;			/* 0xb0 */
882 	uint64_t	vg_dr3;			/* 0xb8 */
883 	uint64_t	vg_dr6;			/* 0xc0 */
884 };
885 
886 /*
887  * Virtual Machine
888  */
889 struct vm;
890 
891 /*
892  * Virtual CPU
893  */
894 struct vcpu {
895 	/*
896 	 * Guest FPU state - this must remain as the first member of the struct
897 	 * to ensure 64-byte alignment (set up during vcpu_pool init)
898 	 */
899 	struct savefpu vc_g_fpu;
900 
901 	/* VMCS / VMCB pointer */
902 	vaddr_t vc_control_va;
903 	uint64_t vc_control_pa;
904 
905 	/* VLAPIC pointer */
906 	vaddr_t vc_vlapic_va;
907 	uint64_t vc_vlapic_pa;
908 
909 	/* MSR bitmap address */
910 	vaddr_t vc_msr_bitmap_va;
911 	uint64_t vc_msr_bitmap_pa;
912 
913 	struct vm *vc_parent;
914 	uint32_t vc_id;
915 	uint16_t vc_vpid;
916 	u_int vc_state;
917 	SLIST_ENTRY(vcpu) vc_vcpu_link;
918 
919 	uint8_t vc_virt_mode;
920 
921 	struct cpu_info *vc_last_pcpu;
922 	struct vm_exit vc_exit;
923 
924 	uint16_t vc_intr;
925 	uint8_t vc_irqready;
926 
927 	uint8_t vc_fpuinited;
928 
929 	uint64_t vc_h_xcr0;
930 
931 	struct vcpu_gueststate vc_gueststate;
932 
933 	uint8_t vc_event;
934 
935 	uint32_t vc_pvclock_version;
936 	paddr_t vc_pvclock_system_gpa;
937 	uint32_t vc_pvclock_system_tsc_mul;
938 
939 	/* Shadowed MSRs */
940 	uint64_t vc_shadow_pat;
941 
942 	/* VMX only */
943 	uint64_t vc_vmx_basic;
944 	uint64_t vc_vmx_entry_ctls;
945 	uint64_t vc_vmx_true_entry_ctls;
946 	uint64_t vc_vmx_exit_ctls;
947 	uint64_t vc_vmx_true_exit_ctls;
948 	uint64_t vc_vmx_pinbased_ctls;
949 	uint64_t vc_vmx_true_pinbased_ctls;
950 	uint64_t vc_vmx_procbased_ctls;
951 	uint64_t vc_vmx_true_procbased_ctls;
952 	uint64_t vc_vmx_procbased2_ctls;
953 	vaddr_t vc_vmx_msr_exit_save_va;
954 	paddr_t vc_vmx_msr_exit_save_pa;
955 	vaddr_t vc_vmx_msr_exit_load_va;
956 	paddr_t vc_vmx_msr_exit_load_pa;
957 	vaddr_t vc_vmx_msr_entry_load_va;
958 	paddr_t vc_vmx_msr_entry_load_pa;
959 	uint8_t vc_vmx_vpid_enabled;
960 	uint64_t vc_vmx_cr0_fixed1;
961 	uint64_t vc_vmx_cr0_fixed0;
962 
963 	/* SVM only */
964 	vaddr_t vc_svm_hsa_va;
965 	paddr_t vc_svm_hsa_pa;
966 	vaddr_t vc_svm_ioio_va;
967 	paddr_t vc_svm_ioio_pa;
968 };
969 
970 SLIST_HEAD(vcpu_head, vcpu);
971 
972 void	vmm_dispatch_intr(vaddr_t);
973 int	vmxon(uint64_t *);
974 int	vmxoff(void);
975 int	vmclear(uint64_t *);
976 int	vmptrld(uint64_t *);
977 int	vmptrst(uint64_t *);
978 int	vmwrite(uint64_t, uint64_t);
979 int	vmread(uint64_t, uint64_t *);
980 void	invvpid(uint64_t, struct vmx_invvpid_descriptor *);
981 void	invept(uint64_t, struct vmx_invept_descriptor *);
982 int	vmx_enter_guest(uint64_t *, struct vcpu_gueststate *, int, uint8_t);
983 int	svm_enter_guest(uint64_t, struct vcpu_gueststate *,
984     struct region_descriptor *);
985 void	start_vmm_on_cpu(struct cpu_info *);
986 void	stop_vmm_on_cpu(struct cpu_info *);
987 
988 #endif /* _KERNEL */
989 
990 #endif /* ! _MACHINE_VMMVAR_H_ */
991