1 /* $NetBSD: nvmm_x86_vmx.c,v 1.86 2023/11/06 17:02:17 rin Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved.
6 *
7 * This code is part of the NVMM hypervisor.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.86 2023/11/06 17:02:17 rin Exp $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kmem.h>
38 #include <sys/cpu.h>
39 #include <sys/xcall.h>
40 #include <sys/mman.h>
41 #include <sys/bitops.h>
42
43 #include <uvm/uvm_extern.h>
44 #include <uvm/uvm_page.h>
45
46 #include <x86/cputypes.h>
47 #include <x86/specialreg.h>
48 #include <x86/dbregs.h>
49 #include <x86/cpu_counter.h>
50
51 #include <machine/cpuvar.h>
52 #include <machine/pmap_private.h>
53
54 #include <dev/nvmm/nvmm.h>
55 #include <dev/nvmm/nvmm_internal.h>
56 #include <dev/nvmm/x86/nvmm_x86.h>
57
58 int _vmx_vmxon(paddr_t *pa);
59 int _vmx_vmxoff(void);
60 int vmx_vmlaunch(uint64_t *gprs);
61 int vmx_vmresume(uint64_t *gprs);
62
63 #define vmx_vmxon(a) \
64 if (__predict_false(_vmx_vmxon(a) != 0)) { \
65 panic("%s: VMXON failed", __func__); \
66 }
67 #define vmx_vmxoff() \
68 if (__predict_false(_vmx_vmxoff() != 0)) { \
69 panic("%s: VMXOFF failed", __func__); \
70 }
71
72 struct ept_desc {
73 uint64_t eptp;
74 uint64_t mbz;
75 } __packed;
76
77 struct vpid_desc {
78 uint64_t vpid;
79 uint64_t addr;
80 } __packed;
81
82 static inline void
vmx_invept(uint64_t op,struct ept_desc * desc)83 vmx_invept(uint64_t op, struct ept_desc *desc)
84 {
85 asm volatile (
86 "invept %[desc],%[op];"
87 "jz vmx_insn_failvalid;"
88 "jc vmx_insn_failinvalid;"
89 :
90 : [desc] "m" (*desc), [op] "r" (op)
91 : "memory", "cc"
92 );
93 }
94
95 static inline void
vmx_invvpid(uint64_t op,struct vpid_desc * desc)96 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
97 {
98 asm volatile (
99 "invvpid %[desc],%[op];"
100 "jz vmx_insn_failvalid;"
101 "jc vmx_insn_failinvalid;"
102 :
103 : [desc] "m" (*desc), [op] "r" (op)
104 : "memory", "cc"
105 );
106 }
107
108 static inline uint64_t
vmx_vmread(uint64_t field)109 vmx_vmread(uint64_t field)
110 {
111 uint64_t value;
112
113 asm volatile (
114 "vmread %[field],%[value];"
115 "jz vmx_insn_failvalid;"
116 "jc vmx_insn_failinvalid;"
117 : [value] "=r" (value)
118 : [field] "r" (field)
119 : "cc"
120 );
121
122 return value;
123 }
124
125 static inline void
vmx_vmwrite(uint64_t field,uint64_t value)126 vmx_vmwrite(uint64_t field, uint64_t value)
127 {
128 asm volatile (
129 "vmwrite %[value],%[field];"
130 "jz vmx_insn_failvalid;"
131 "jc vmx_insn_failinvalid;"
132 :
133 : [field] "r" (field), [value] "r" (value)
134 : "cc"
135 );
136 }
137
138 static inline paddr_t __diagused
vmx_vmptrst(void)139 vmx_vmptrst(void)
140 {
141 paddr_t pa;
142
143 asm volatile (
144 "vmptrst %[pa];"
145 :
146 : [pa] "m" (*(paddr_t *)&pa)
147 : "memory"
148 );
149
150 return pa;
151 }
152
153 static inline void
vmx_vmptrld(paddr_t * pa)154 vmx_vmptrld(paddr_t *pa)
155 {
156 asm volatile (
157 "vmptrld %[pa];"
158 "jz vmx_insn_failvalid;"
159 "jc vmx_insn_failinvalid;"
160 :
161 : [pa] "m" (*pa)
162 : "memory", "cc"
163 );
164 }
165
166 static inline void
vmx_vmclear(paddr_t * pa)167 vmx_vmclear(paddr_t *pa)
168 {
169 asm volatile (
170 "vmclear %[pa];"
171 "jz vmx_insn_failvalid;"
172 "jc vmx_insn_failinvalid;"
173 :
174 : [pa] "m" (*pa)
175 : "memory", "cc"
176 );
177 }
178
179 static inline void
vmx_cli(void)180 vmx_cli(void)
181 {
182 asm volatile ("cli" ::: "memory");
183 }
184
185 static inline void
vmx_sti(void)186 vmx_sti(void)
187 {
188 asm volatile ("sti" ::: "memory");
189 }
190
191 #define MSR_IA32_FEATURE_CONTROL 0x003A
192 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
193 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
194 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
195
196 #define MSR_IA32_VMX_BASIC 0x0480
197 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
198 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
199 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
200 #define IA32_VMX_BASIC_DUAL __BIT(49)
201 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
202 #define MEM_TYPE_UC 0
203 #define MEM_TYPE_WB 6
204 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
205 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
206
207 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
208 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
209 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
210 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
211 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
212
213 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
214 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
215 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
216 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
217
218 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
219 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
220 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
221 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
222
223 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
224 #define IA32_VMX_EPT_VPID_XO __BIT(0)
225 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
226 #define IA32_VMX_EPT_VPID_UC __BIT(8)
227 #define IA32_VMX_EPT_VPID_WB __BIT(14)
228 #define IA32_VMX_EPT_VPID_2MB __BIT(16)
229 #define IA32_VMX_EPT_VPID_1GB __BIT(17)
230 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
231 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
232 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22)
233 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23)
234 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
235 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
236 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
237 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
238 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
239 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
240 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
241
242 /* -------------------------------------------------------------------------- */
243
244 /* 16-bit control fields */
245 #define VMCS_VPID 0x00000000
246 #define VMCS_PIR_VECTOR 0x00000002
247 #define VMCS_EPTP_INDEX 0x00000004
248 /* 16-bit guest-state fields */
249 #define VMCS_GUEST_ES_SELECTOR 0x00000800
250 #define VMCS_GUEST_CS_SELECTOR 0x00000802
251 #define VMCS_GUEST_SS_SELECTOR 0x00000804
252 #define VMCS_GUEST_DS_SELECTOR 0x00000806
253 #define VMCS_GUEST_FS_SELECTOR 0x00000808
254 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
255 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
256 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
257 #define VMCS_GUEST_INTR_STATUS 0x00000810
258 #define VMCS_PML_INDEX 0x00000812
259 /* 16-bit host-state fields */
260 #define VMCS_HOST_ES_SELECTOR 0x00000C00
261 #define VMCS_HOST_CS_SELECTOR 0x00000C02
262 #define VMCS_HOST_SS_SELECTOR 0x00000C04
263 #define VMCS_HOST_DS_SELECTOR 0x00000C06
264 #define VMCS_HOST_FS_SELECTOR 0x00000C08
265 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
266 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
267 /* 64-bit control fields */
268 #define VMCS_IO_BITMAP_A 0x00002000
269 #define VMCS_IO_BITMAP_B 0x00002002
270 #define VMCS_MSR_BITMAP 0x00002004
271 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
272 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
273 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
274 #define VMCS_EXECUTIVE_VMCS 0x0000200C
275 #define VMCS_PML_ADDRESS 0x0000200E
276 #define VMCS_TSC_OFFSET 0x00002010
277 #define VMCS_VIRTUAL_APIC 0x00002012
278 #define VMCS_APIC_ACCESS 0x00002014
279 #define VMCS_PIR_DESC 0x00002016
280 #define VMCS_VM_CONTROL 0x00002018
281 #define VMCS_EPTP 0x0000201A
282 #define EPTP_TYPE __BITS(2,0)
283 #define EPTP_TYPE_UC 0
284 #define EPTP_TYPE_WB 6
285 #define EPTP_WALKLEN __BITS(5,3)
286 #define EPTP_FLAGS_AD __BIT(6)
287 #define EPTP_SSS __BIT(7)
288 #define EPTP_PHYSADDR __BITS(63,12)
289 #define VMCS_EOI_EXIT0 0x0000201C
290 #define VMCS_EOI_EXIT1 0x0000201E
291 #define VMCS_EOI_EXIT2 0x00002020
292 #define VMCS_EOI_EXIT3 0x00002022
293 #define VMCS_EPTP_LIST 0x00002024
294 #define VMCS_VMREAD_BITMAP 0x00002026
295 #define VMCS_VMWRITE_BITMAP 0x00002028
296 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
297 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
298 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
299 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
300 #define VMCS_TSC_MULTIPLIER 0x00002032
301 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036
302 /* 64-bit read-only fields */
303 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
304 /* 64-bit guest-state fields */
305 #define VMCS_LINK_POINTER 0x00002800
306 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
307 #define VMCS_GUEST_IA32_PAT 0x00002804
308 #define VMCS_GUEST_IA32_EFER 0x00002806
309 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
310 #define VMCS_GUEST_PDPTE0 0x0000280A
311 #define VMCS_GUEST_PDPTE1 0x0000280C
312 #define VMCS_GUEST_PDPTE2 0x0000280E
313 #define VMCS_GUEST_PDPTE3 0x00002810
314 #define VMCS_GUEST_BNDCFGS 0x00002812
315 #define VMCS_GUEST_RTIT_CTL 0x00002814
316 #define VMCS_GUEST_PKRS 0x00002818
317 /* 64-bit host-state fields */
318 #define VMCS_HOST_IA32_PAT 0x00002C00
319 #define VMCS_HOST_IA32_EFER 0x00002C02
320 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
321 #define VMCS_HOST_IA32_PKRS 0x00002C06
322 /* 32-bit control fields */
323 #define VMCS_PINBASED_CTLS 0x00004000
324 #define PIN_CTLS_INT_EXITING __BIT(0)
325 #define PIN_CTLS_NMI_EXITING __BIT(3)
326 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
327 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
328 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
329 #define VMCS_PROCBASED_CTLS 0x00004002
330 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
331 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
332 #define PROC_CTLS_HLT_EXITING __BIT(7)
333 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
334 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
335 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
336 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
337 #define PROC_CTLS_RCR3_EXITING __BIT(15)
338 #define PROC_CTLS_LCR3_EXITING __BIT(16)
339 #define PROC_CTLS_RCR8_EXITING __BIT(19)
340 #define PROC_CTLS_LCR8_EXITING __BIT(20)
341 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
342 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
343 #define PROC_CTLS_DR_EXITING __BIT(23)
344 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
345 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
346 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
347 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
348 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
349 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
350 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
351 #define VMCS_EXCEPTION_BITMAP 0x00004004
352 #define VMCS_PF_ERROR_MASK 0x00004006
353 #define VMCS_PF_ERROR_MATCH 0x00004008
354 #define VMCS_CR3_TARGET_COUNT 0x0000400A
355 #define VMCS_EXIT_CTLS 0x0000400C
356 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
357 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
358 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
359 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
360 #define EXIT_CTLS_SAVE_PAT __BIT(18)
361 #define EXIT_CTLS_LOAD_PAT __BIT(19)
362 #define EXIT_CTLS_SAVE_EFER __BIT(20)
363 #define EXIT_CTLS_LOAD_EFER __BIT(21)
364 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
365 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
366 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
367 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25)
368 #define EXIT_CTLS_LOAD_CET __BIT(28)
369 #define EXIT_CTLS_LOAD_PKRS __BIT(29)
370 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
371 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
372 #define VMCS_ENTRY_CTLS 0x00004012
373 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
374 #define ENTRY_CTLS_LONG_MODE __BIT(9)
375 #define ENTRY_CTLS_SMM __BIT(10)
376 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
377 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
378 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
379 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
380 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
381 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
382 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18)
383 #define ENTRY_CTLS_LOAD_CET __BIT(20)
384 #define ENTRY_CTLS_LOAD_PKRS __BIT(22)
385 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
386 #define VMCS_ENTRY_INTR_INFO 0x00004016
387 #define INTR_INFO_VECTOR __BITS(7,0)
388 #define INTR_INFO_TYPE __BITS(10,8)
389 #define INTR_TYPE_EXT_INT 0
390 #define INTR_TYPE_NMI 2
391 #define INTR_TYPE_HW_EXC 3
392 #define INTR_TYPE_SW_INT 4
393 #define INTR_TYPE_PRIV_SW_EXC 5
394 #define INTR_TYPE_SW_EXC 6
395 #define INTR_TYPE_OTHER 7
396 #define INTR_INFO_ERROR __BIT(11)
397 #define INTR_INFO_VALID __BIT(31)
398 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
399 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
400 #define VMCS_TPR_THRESHOLD 0x0000401C
401 #define VMCS_PROCBASED_CTLS2 0x0000401E
402 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
403 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
404 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
405 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
406 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
407 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
408 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
409 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
410 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
411 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
412 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
413 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
414 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
415 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
416 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
417 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
418 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
419 #define PROC_CTLS2_PML_ENABLE __BIT(17)
420 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
421 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
422 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
423 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
424 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
425 #define PROC_CTLS2_PT_USES_GPA __BIT(24)
426 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
427 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26)
428 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
429 #define VMCS_PLE_GAP 0x00004020
430 #define VMCS_PLE_WINDOW 0x00004022
431 /* 32-bit read-only data fields */
432 #define VMCS_INSTRUCTION_ERROR 0x00004400
433 #define VMCS_EXIT_REASON 0x00004402
434 #define VMCS_EXIT_INTR_INFO 0x00004404
435 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
436 #define VMCS_IDT_VECTORING_INFO 0x00004408
437 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
438 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
439 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
440 /* 32-bit guest-state fields */
441 #define VMCS_GUEST_ES_LIMIT 0x00004800
442 #define VMCS_GUEST_CS_LIMIT 0x00004802
443 #define VMCS_GUEST_SS_LIMIT 0x00004804
444 #define VMCS_GUEST_DS_LIMIT 0x00004806
445 #define VMCS_GUEST_FS_LIMIT 0x00004808
446 #define VMCS_GUEST_GS_LIMIT 0x0000480A
447 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
448 #define VMCS_GUEST_TR_LIMIT 0x0000480E
449 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
450 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
451 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
452 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
453 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
454 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
455 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
456 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
457 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
458 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
459 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
460 #define INT_STATE_STI __BIT(0)
461 #define INT_STATE_MOVSS __BIT(1)
462 #define INT_STATE_SMI __BIT(2)
463 #define INT_STATE_NMI __BIT(3)
464 #define INT_STATE_ENCLAVE __BIT(4)
465 #define VMCS_GUEST_ACTIVITY 0x00004826
466 #define VMCS_GUEST_SMBASE 0x00004828
467 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
468 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
469 /* 32-bit host state fields */
470 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
471 /* Natural-Width control fields */
472 #define VMCS_CR0_MASK 0x00006000
473 #define VMCS_CR4_MASK 0x00006002
474 #define VMCS_CR0_SHADOW 0x00006004
475 #define VMCS_CR4_SHADOW 0x00006006
476 #define VMCS_CR3_TARGET0 0x00006008
477 #define VMCS_CR3_TARGET1 0x0000600A
478 #define VMCS_CR3_TARGET2 0x0000600C
479 #define VMCS_CR3_TARGET3 0x0000600E
480 /* Natural-Width read-only fields */
481 #define VMCS_EXIT_QUALIFICATION 0x00006400
482 #define VMCS_IO_RCX 0x00006402
483 #define VMCS_IO_RSI 0x00006404
484 #define VMCS_IO_RDI 0x00006406
485 #define VMCS_IO_RIP 0x00006408
486 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
487 /* Natural-Width guest-state fields */
488 #define VMCS_GUEST_CR0 0x00006800
489 #define VMCS_GUEST_CR3 0x00006802
490 #define VMCS_GUEST_CR4 0x00006804
491 #define VMCS_GUEST_ES_BASE 0x00006806
492 #define VMCS_GUEST_CS_BASE 0x00006808
493 #define VMCS_GUEST_SS_BASE 0x0000680A
494 #define VMCS_GUEST_DS_BASE 0x0000680C
495 #define VMCS_GUEST_FS_BASE 0x0000680E
496 #define VMCS_GUEST_GS_BASE 0x00006810
497 #define VMCS_GUEST_LDTR_BASE 0x00006812
498 #define VMCS_GUEST_TR_BASE 0x00006814
499 #define VMCS_GUEST_GDTR_BASE 0x00006816
500 #define VMCS_GUEST_IDTR_BASE 0x00006818
501 #define VMCS_GUEST_DR7 0x0000681A
502 #define VMCS_GUEST_RSP 0x0000681C
503 #define VMCS_GUEST_RIP 0x0000681E
504 #define VMCS_GUEST_RFLAGS 0x00006820
505 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
506 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
507 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
508 #define VMCS_GUEST_IA32_S_CET 0x00006828
509 #define VMCS_GUEST_SSP 0x0000682A
510 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C
511 /* Natural-Width host-state fields */
512 #define VMCS_HOST_CR0 0x00006C00
513 #define VMCS_HOST_CR3 0x00006C02
514 #define VMCS_HOST_CR4 0x00006C04
515 #define VMCS_HOST_FS_BASE 0x00006C06
516 #define VMCS_HOST_GS_BASE 0x00006C08
517 #define VMCS_HOST_TR_BASE 0x00006C0A
518 #define VMCS_HOST_GDTR_BASE 0x00006C0C
519 #define VMCS_HOST_IDTR_BASE 0x00006C0E
520 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
521 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
522 #define VMCS_HOST_RSP 0x00006C14
523 #define VMCS_HOST_RIP 0x00006C16
524 #define VMCS_HOST_IA32_S_CET 0x00006C18
525 #define VMCS_HOST_SSP 0x00006C1A
526 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C
527
528 /* VMX basic exit reasons. */
529 #define VMCS_EXITCODE_EXC_NMI 0
530 #define VMCS_EXITCODE_EXT_INT 1
531 #define VMCS_EXITCODE_SHUTDOWN 2
532 #define VMCS_EXITCODE_INIT 3
533 #define VMCS_EXITCODE_SIPI 4
534 #define VMCS_EXITCODE_SMI 5
535 #define VMCS_EXITCODE_OTHER_SMI 6
536 #define VMCS_EXITCODE_INT_WINDOW 7
537 #define VMCS_EXITCODE_NMI_WINDOW 8
538 #define VMCS_EXITCODE_TASK_SWITCH 9
539 #define VMCS_EXITCODE_CPUID 10
540 #define VMCS_EXITCODE_GETSEC 11
541 #define VMCS_EXITCODE_HLT 12
542 #define VMCS_EXITCODE_INVD 13
543 #define VMCS_EXITCODE_INVLPG 14
544 #define VMCS_EXITCODE_RDPMC 15
545 #define VMCS_EXITCODE_RDTSC 16
546 #define VMCS_EXITCODE_RSM 17
547 #define VMCS_EXITCODE_VMCALL 18
548 #define VMCS_EXITCODE_VMCLEAR 19
549 #define VMCS_EXITCODE_VMLAUNCH 20
550 #define VMCS_EXITCODE_VMPTRLD 21
551 #define VMCS_EXITCODE_VMPTRST 22
552 #define VMCS_EXITCODE_VMREAD 23
553 #define VMCS_EXITCODE_VMRESUME 24
554 #define VMCS_EXITCODE_VMWRITE 25
555 #define VMCS_EXITCODE_VMXOFF 26
556 #define VMCS_EXITCODE_VMXON 27
557 #define VMCS_EXITCODE_CR 28
558 #define VMCS_EXITCODE_DR 29
559 #define VMCS_EXITCODE_IO 30
560 #define VMCS_EXITCODE_RDMSR 31
561 #define VMCS_EXITCODE_WRMSR 32
562 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
563 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
564 #define VMCS_EXITCODE_MWAIT 36
565 #define VMCS_EXITCODE_TRAP_FLAG 37
566 #define VMCS_EXITCODE_MONITOR 39
567 #define VMCS_EXITCODE_PAUSE 40
568 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
569 #define VMCS_EXITCODE_TPR_BELOW 43
570 #define VMCS_EXITCODE_APIC_ACCESS 44
571 #define VMCS_EXITCODE_VEOI 45
572 #define VMCS_EXITCODE_GDTR_IDTR 46
573 #define VMCS_EXITCODE_LDTR_TR 47
574 #define VMCS_EXITCODE_EPT_VIOLATION 48
575 #define VMCS_EXITCODE_EPT_MISCONFIG 49
576 #define VMCS_EXITCODE_INVEPT 50
577 #define VMCS_EXITCODE_RDTSCP 51
578 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
579 #define VMCS_EXITCODE_INVVPID 53
580 #define VMCS_EXITCODE_WBINVD 54
581 #define VMCS_EXITCODE_XSETBV 55
582 #define VMCS_EXITCODE_APIC_WRITE 56
583 #define VMCS_EXITCODE_RDRAND 57
584 #define VMCS_EXITCODE_INVPCID 58
585 #define VMCS_EXITCODE_VMFUNC 59
586 #define VMCS_EXITCODE_ENCLS 60
587 #define VMCS_EXITCODE_RDSEED 61
588 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
589 #define VMCS_EXITCODE_XSAVES 63
590 #define VMCS_EXITCODE_XRSTORS 64
591 #define VMCS_EXITCODE_SPP 66
592 #define VMCS_EXITCODE_UMWAIT 67
593 #define VMCS_EXITCODE_TPAUSE 68
594
595 /* -------------------------------------------------------------------------- */
596
597 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
598 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
599
600 #define VMX_MSRLIST_STAR 0
601 #define VMX_MSRLIST_LSTAR 1
602 #define VMX_MSRLIST_CSTAR 2
603 #define VMX_MSRLIST_SFMASK 3
604 #define VMX_MSRLIST_KERNELGSBASE 4
605 #define VMX_MSRLIST_EXIT_NMSR 5
606 #define VMX_MSRLIST_L1DFLUSH 5
607
608 /* On entry, we may do +1 to include L1DFLUSH. */
609 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
610
611 struct vmxon {
612 uint32_t ident;
613 #define VMXON_IDENT_REVISION __BITS(30,0)
614
615 uint8_t data[PAGE_SIZE - 4];
616 } __packed;
617
618 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
619
620 struct vmxoncpu {
621 vaddr_t va;
622 paddr_t pa;
623 };
624
625 static struct vmxoncpu vmxoncpu[MAXCPUS];
626
627 struct vmcs {
628 uint32_t ident;
629 #define VMCS_IDENT_REVISION __BITS(30,0)
630 #define VMCS_IDENT_SHADOW __BIT(31)
631
632 uint32_t abort;
633 uint8_t data[PAGE_SIZE - 8];
634 } __packed;
635
636 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
637
638 struct msr_entry {
639 uint32_t msr;
640 uint32_t rsvd;
641 uint64_t val;
642 } __packed;
643
644 #define VPID_MAX 0xFFFF
645
646 /* Make sure we never run out of VPIDs. */
647 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
648
649 static uint64_t vmx_tlb_flush_op __read_mostly;
650 static uint64_t vmx_ept_flush_op __read_mostly;
651 static uint64_t vmx_eptp_type __read_mostly;
652
653 static uint64_t vmx_pinbased_ctls __read_mostly;
654 static uint64_t vmx_procbased_ctls __read_mostly;
655 static uint64_t vmx_procbased_ctls2 __read_mostly;
656 static uint64_t vmx_entry_ctls __read_mostly;
657 static uint64_t vmx_exit_ctls __read_mostly;
658
659 static uint64_t vmx_cr0_fixed0 __read_mostly;
660 static uint64_t vmx_cr0_fixed1 __read_mostly;
661 static uint64_t vmx_cr4_fixed0 __read_mostly;
662 static uint64_t vmx_cr4_fixed1 __read_mostly;
663
664 extern bool pmap_ept_has_ad;
665
666 #define VMX_PINBASED_CTLS_ONE \
667 (PIN_CTLS_INT_EXITING| \
668 PIN_CTLS_NMI_EXITING| \
669 PIN_CTLS_VIRTUAL_NMIS)
670
671 #define VMX_PINBASED_CTLS_ZERO 0
672
673 #define VMX_PROCBASED_CTLS_ONE \
674 (PROC_CTLS_USE_TSC_OFFSETTING| \
675 PROC_CTLS_HLT_EXITING| \
676 PROC_CTLS_MWAIT_EXITING | \
677 PROC_CTLS_RDPMC_EXITING | \
678 PROC_CTLS_RCR8_EXITING | \
679 PROC_CTLS_LCR8_EXITING | \
680 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
681 PROC_CTLS_USE_MSR_BITMAPS | \
682 PROC_CTLS_MONITOR_EXITING | \
683 PROC_CTLS_ACTIVATE_CTLS2)
684
685 #define VMX_PROCBASED_CTLS_ZERO \
686 (PROC_CTLS_RCR3_EXITING| \
687 PROC_CTLS_LCR3_EXITING)
688
689 #define VMX_PROCBASED_CTLS2_ONE \
690 (PROC_CTLS2_ENABLE_EPT| \
691 PROC_CTLS2_ENABLE_VPID| \
692 PROC_CTLS2_UNRESTRICTED_GUEST)
693
694 #define VMX_PROCBASED_CTLS2_ZERO 0
695
696 #define VMX_ENTRY_CTLS_ONE \
697 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
698 ENTRY_CTLS_LOAD_EFER| \
699 ENTRY_CTLS_LOAD_PAT)
700
701 #define VMX_ENTRY_CTLS_ZERO \
702 (ENTRY_CTLS_SMM| \
703 ENTRY_CTLS_DISABLE_DUAL)
704
705 #define VMX_EXIT_CTLS_ONE \
706 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
707 EXIT_CTLS_HOST_LONG_MODE| \
708 EXIT_CTLS_SAVE_PAT| \
709 EXIT_CTLS_LOAD_PAT| \
710 EXIT_CTLS_SAVE_EFER| \
711 EXIT_CTLS_LOAD_EFER)
712
713 #define VMX_EXIT_CTLS_ZERO 0
714
715 static uint8_t *vmx_asidmap __read_mostly;
716 static uint32_t vmx_maxasid __read_mostly;
717 static kmutex_t vmx_asidlock __cacheline_aligned;
718
719 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
720 static uint64_t vmx_xcr0_mask __read_mostly;
721
722 #define VMX_NCPUIDS 32
723
724 #define VMCS_NPAGES 1
725 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
726
727 #define MSRBM_NPAGES 1
728 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
729
730 #define CR0_STATIC_MASK \
731 (CR0_ET | CR0_NW | CR0_CD)
732
733 #define CR4_VALID \
734 (CR4_VME | \
735 CR4_PVI | \
736 CR4_TSD | \
737 CR4_DE | \
738 CR4_PSE | \
739 CR4_PAE | \
740 CR4_MCE | \
741 CR4_PGE | \
742 CR4_PCE | \
743 CR4_OSFXSR | \
744 CR4_OSXMMEXCPT | \
745 CR4_UMIP | \
746 /* CR4_LA57 excluded */ \
747 /* CR4_VMXE excluded */ \
748 /* CR4_SMXE excluded */ \
749 CR4_FSGSBASE | \
750 CR4_PCIDE | \
751 CR4_OSXSAVE | \
752 CR4_SMEP | \
753 CR4_SMAP \
754 /* CR4_PKE excluded */ \
755 /* CR4_CET excluded */ \
756 /* CR4_PKS excluded */)
757 #define CR4_INVALID \
758 (0xFFFFFFFFFFFFFFFFULL & ~CR4_VALID)
759
760 #define EFER_TLB_FLUSH \
761 (EFER_NXE|EFER_LMA|EFER_LME)
762 #define CR0_TLB_FLUSH \
763 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
764 #define CR4_TLB_FLUSH \
765 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP)
766
767 /* -------------------------------------------------------------------------- */
768
769 struct vmx_machdata {
770 volatile uint64_t mach_htlb_gen;
771 };
772
773 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
774 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
775 sizeof(struct nvmm_vcpu_conf_cpuid),
776 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
777 sizeof(struct nvmm_vcpu_conf_tpr)
778 };
779
780 struct vmx_cpudata {
781 /* General */
782 uint64_t asid;
783 bool gtlb_want_flush;
784 bool gtsc_want_update;
785 uint64_t vcpu_htlb_gen;
786 kcpuset_t *htlb_want_flush;
787
788 /* VMCS */
789 struct vmcs *vmcs;
790 paddr_t vmcs_pa;
791 size_t vmcs_refcnt;
792 struct cpu_info *vmcs_ci;
793 bool vmcs_launched;
794
795 /* MSR bitmap */
796 uint8_t *msrbm;
797 paddr_t msrbm_pa;
798
799 /* Host state */
800 uint64_t hxcr0;
801 uint64_t star;
802 uint64_t lstar;
803 uint64_t cstar;
804 uint64_t sfmask;
805 uint64_t kernelgsbase;
806
807 /* Intr state */
808 bool int_window_exit;
809 bool nmi_window_exit;
810 bool evt_pending;
811
812 /* Guest state */
813 struct msr_entry *gmsr;
814 paddr_t gmsr_pa;
815 uint64_t gmsr_misc_enable;
816 uint64_t gcr2;
817 uint64_t gcr8;
818 uint64_t gxcr0;
819 uint64_t gprs[NVMM_X64_NGPR];
820 uint64_t drs[NVMM_X64_NDR];
821 uint64_t gtsc;
822 struct xsave_header gfpu __aligned(64);
823
824 /* VCPU configuration. */
825 bool cpuidpresent[VMX_NCPUIDS];
826 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
827 struct nvmm_vcpu_conf_tpr tpr;
828 };
829
830 static const struct {
831 uint64_t selector;
832 uint64_t attrib;
833 uint64_t limit;
834 uint64_t base;
835 } vmx_guest_segs[NVMM_X64_NSEG] = {
836 [NVMM_X64_SEG_ES] = {
837 VMCS_GUEST_ES_SELECTOR,
838 VMCS_GUEST_ES_ACCESS_RIGHTS,
839 VMCS_GUEST_ES_LIMIT,
840 VMCS_GUEST_ES_BASE
841 },
842 [NVMM_X64_SEG_CS] = {
843 VMCS_GUEST_CS_SELECTOR,
844 VMCS_GUEST_CS_ACCESS_RIGHTS,
845 VMCS_GUEST_CS_LIMIT,
846 VMCS_GUEST_CS_BASE
847 },
848 [NVMM_X64_SEG_SS] = {
849 VMCS_GUEST_SS_SELECTOR,
850 VMCS_GUEST_SS_ACCESS_RIGHTS,
851 VMCS_GUEST_SS_LIMIT,
852 VMCS_GUEST_SS_BASE
853 },
854 [NVMM_X64_SEG_DS] = {
855 VMCS_GUEST_DS_SELECTOR,
856 VMCS_GUEST_DS_ACCESS_RIGHTS,
857 VMCS_GUEST_DS_LIMIT,
858 VMCS_GUEST_DS_BASE
859 },
860 [NVMM_X64_SEG_FS] = {
861 VMCS_GUEST_FS_SELECTOR,
862 VMCS_GUEST_FS_ACCESS_RIGHTS,
863 VMCS_GUEST_FS_LIMIT,
864 VMCS_GUEST_FS_BASE
865 },
866 [NVMM_X64_SEG_GS] = {
867 VMCS_GUEST_GS_SELECTOR,
868 VMCS_GUEST_GS_ACCESS_RIGHTS,
869 VMCS_GUEST_GS_LIMIT,
870 VMCS_GUEST_GS_BASE
871 },
872 [NVMM_X64_SEG_GDT] = {
873 0, /* doesn't exist */
874 0, /* doesn't exist */
875 VMCS_GUEST_GDTR_LIMIT,
876 VMCS_GUEST_GDTR_BASE
877 },
878 [NVMM_X64_SEG_IDT] = {
879 0, /* doesn't exist */
880 0, /* doesn't exist */
881 VMCS_GUEST_IDTR_LIMIT,
882 VMCS_GUEST_IDTR_BASE
883 },
884 [NVMM_X64_SEG_LDT] = {
885 VMCS_GUEST_LDTR_SELECTOR,
886 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
887 VMCS_GUEST_LDTR_LIMIT,
888 VMCS_GUEST_LDTR_BASE
889 },
890 [NVMM_X64_SEG_TR] = {
891 VMCS_GUEST_TR_SELECTOR,
892 VMCS_GUEST_TR_ACCESS_RIGHTS,
893 VMCS_GUEST_TR_LIMIT,
894 VMCS_GUEST_TR_BASE
895 }
896 };
897
898 /* -------------------------------------------------------------------------- */
899
900 static uint64_t
vmx_get_revision(void)901 vmx_get_revision(void)
902 {
903 uint64_t msr;
904
905 msr = rdmsr(MSR_IA32_VMX_BASIC);
906 msr &= IA32_VMX_BASIC_IDENT;
907
908 return msr;
909 }
910
911 static void
vmx_vmclear_ipi(void * arg1,void * arg2)912 vmx_vmclear_ipi(void *arg1, void *arg2)
913 {
914 paddr_t vmcs_pa = (paddr_t)arg1;
915 vmx_vmclear(&vmcs_pa);
916 }
917
918 static void
vmx_vmclear_remote(struct cpu_info * ci,paddr_t vmcs_pa)919 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
920 {
921 uint64_t xc;
922 int bound;
923
924 KASSERT(kpreempt_disabled());
925
926 bound = curlwp_bind();
927 kpreempt_enable();
928
929 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
930 xc_wait(xc);
931
932 kpreempt_disable();
933 curlwp_bindx(bound);
934 }
935
936 static void
vmx_vmcs_enter(struct nvmm_cpu * vcpu)937 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
938 {
939 struct vmx_cpudata *cpudata = vcpu->cpudata;
940 struct cpu_info *vmcs_ci;
941
942 cpudata->vmcs_refcnt++;
943 if (cpudata->vmcs_refcnt > 1) {
944 KASSERT(kpreempt_disabled());
945 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
946 return;
947 }
948
949 vmcs_ci = cpudata->vmcs_ci;
950 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
951
952 kpreempt_disable();
953
954 if (vmcs_ci == NULL) {
955 /* This VMCS is loaded for the first time. */
956 vmx_vmclear(&cpudata->vmcs_pa);
957 cpudata->vmcs_launched = false;
958 } else if (vmcs_ci != curcpu()) {
959 /* This VMCS is active on a remote CPU. */
960 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
961 cpudata->vmcs_launched = false;
962 } else {
963 /* This VMCS is active on curcpu, nothing to do. */
964 }
965
966 vmx_vmptrld(&cpudata->vmcs_pa);
967 }
968
969 static void
vmx_vmcs_leave(struct nvmm_cpu * vcpu)970 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
971 {
972 struct vmx_cpudata *cpudata = vcpu->cpudata;
973
974 KASSERT(kpreempt_disabled());
975 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
976 KASSERT(cpudata->vmcs_refcnt > 0);
977 cpudata->vmcs_refcnt--;
978
979 if (cpudata->vmcs_refcnt > 0) {
980 return;
981 }
982
983 cpudata->vmcs_ci = curcpu();
984 kpreempt_enable();
985 }
986
987 static void
vmx_vmcs_destroy(struct nvmm_cpu * vcpu)988 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
989 {
990 struct vmx_cpudata *cpudata = vcpu->cpudata;
991
992 KASSERT(kpreempt_disabled());
993 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
994 KASSERT(cpudata->vmcs_refcnt == 1);
995 cpudata->vmcs_refcnt--;
996
997 vmx_vmclear(&cpudata->vmcs_pa);
998 kpreempt_enable();
999 }
1000
1001 /* -------------------------------------------------------------------------- */
1002
1003 static void
vmx_event_waitexit_enable(struct nvmm_cpu * vcpu,bool nmi)1004 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
1005 {
1006 struct vmx_cpudata *cpudata = vcpu->cpudata;
1007 uint64_t ctls1;
1008
1009 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1010
1011 if (nmi) {
1012 // XXX INT_STATE_NMI?
1013 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
1014 cpudata->nmi_window_exit = true;
1015 } else {
1016 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
1017 cpudata->int_window_exit = true;
1018 }
1019
1020 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1021 }
1022
1023 static void
vmx_event_waitexit_disable(struct nvmm_cpu * vcpu,bool nmi)1024 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
1025 {
1026 struct vmx_cpudata *cpudata = vcpu->cpudata;
1027 uint64_t ctls1;
1028
1029 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1030
1031 if (nmi) {
1032 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
1033 cpudata->nmi_window_exit = false;
1034 } else {
1035 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
1036 cpudata->int_window_exit = false;
1037 }
1038
1039 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1040 }
1041
1042 static inline bool
vmx_excp_has_rf(uint8_t vector)1043 vmx_excp_has_rf(uint8_t vector)
1044 {
1045 switch (vector) {
1046 case 1: /* #DB */
1047 case 4: /* #OF */
1048 case 8: /* #DF */
1049 case 18: /* #MC */
1050 return false;
1051 default:
1052 return true;
1053 }
1054 }
1055
1056 static inline int
vmx_excp_has_error(uint8_t vector)1057 vmx_excp_has_error(uint8_t vector)
1058 {
1059 switch (vector) {
1060 case 8: /* #DF */
1061 case 10: /* #TS */
1062 case 11: /* #NP */
1063 case 12: /* #SS */
1064 case 13: /* #GP */
1065 case 14: /* #PF */
1066 case 17: /* #AC */
1067 case 30: /* #SX */
1068 return 1;
1069 default:
1070 return 0;
1071 }
1072 }
1073
1074 static int
vmx_vcpu_inject(struct nvmm_cpu * vcpu)1075 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1076 {
1077 struct nvmm_comm_page *comm = vcpu->comm;
1078 struct vmx_cpudata *cpudata = vcpu->cpudata;
1079 int type = 0, err = 0, ret = EINVAL;
1080 uint64_t rflags, info, error;
1081 u_int evtype;
1082 uint8_t vector;
1083
1084 evtype = comm->event.type;
1085 vector = comm->event.vector;
1086 error = comm->event.u.excp.error;
1087 __insn_barrier();
1088
1089 vmx_vmcs_enter(vcpu);
1090
1091 switch (evtype) {
1092 case NVMM_VCPU_EVENT_EXCP:
1093 if (vector == 2 || vector >= 32)
1094 goto out;
1095 if (vector == 3 || vector == 0)
1096 goto out;
1097 if (vmx_excp_has_rf(vector)) {
1098 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1099 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags | PSL_RF);
1100 }
1101 type = INTR_TYPE_HW_EXC;
1102 err = vmx_excp_has_error(vector);
1103 break;
1104 case NVMM_VCPU_EVENT_INTR:
1105 type = INTR_TYPE_EXT_INT;
1106 if (vector == 2) {
1107 type = INTR_TYPE_NMI;
1108 vmx_event_waitexit_enable(vcpu, true);
1109 }
1110 err = 0;
1111 break;
1112 default:
1113 goto out;
1114 }
1115
1116 info =
1117 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1118 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1119 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1120 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1121 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1122 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1123
1124 cpudata->evt_pending = true;
1125 ret = 0;
1126
1127 out:
1128 vmx_vmcs_leave(vcpu);
1129 return ret;
1130 }
1131
1132 static void
vmx_inject_ud(struct nvmm_cpu * vcpu)1133 vmx_inject_ud(struct nvmm_cpu *vcpu)
1134 {
1135 struct nvmm_comm_page *comm = vcpu->comm;
1136 int ret __diagused;
1137
1138 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1139 comm->event.vector = 6;
1140 comm->event.u.excp.error = 0;
1141
1142 ret = vmx_vcpu_inject(vcpu);
1143 KASSERT(ret == 0);
1144 }
1145
1146 static void
vmx_inject_gp(struct nvmm_cpu * vcpu)1147 vmx_inject_gp(struct nvmm_cpu *vcpu)
1148 {
1149 struct nvmm_comm_page *comm = vcpu->comm;
1150 int ret __diagused;
1151
1152 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1153 comm->event.vector = 13;
1154 comm->event.u.excp.error = 0;
1155
1156 ret = vmx_vcpu_inject(vcpu);
1157 KASSERT(ret == 0);
1158 }
1159
1160 static inline int
vmx_vcpu_event_commit(struct nvmm_cpu * vcpu)1161 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1162 {
1163 if (__predict_true(!vcpu->comm->event_commit)) {
1164 return 0;
1165 }
1166 vcpu->comm->event_commit = false;
1167 return vmx_vcpu_inject(vcpu);
1168 }
1169
1170 static inline void
vmx_inkernel_advance(void)1171 vmx_inkernel_advance(void)
1172 {
1173 uint64_t rip, inslen, intstate, rflags;
1174
1175 /*
1176 * Maybe we should also apply single-stepping and debug exceptions.
1177 * Matters for guest-ring3, because it can execute 'cpuid' under a
1178 * debugger.
1179 */
1180
1181 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1182 rip = vmx_vmread(VMCS_GUEST_RIP);
1183 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1184
1185 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1186 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags & ~PSL_RF);
1187
1188 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1189 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1190 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1191 }
1192
1193 static void
vmx_exit_invalid(struct nvmm_vcpu_exit * exit,uint64_t code)1194 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1195 {
1196 exit->u.inv.hwcode = code;
1197 exit->reason = NVMM_VCPU_EXIT_INVALID;
1198 }
1199
1200 static void
vmx_exit_exc_nmi(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1201 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1202 struct nvmm_vcpu_exit *exit)
1203 {
1204 uint64_t qual;
1205
1206 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1207
1208 if ((qual & INTR_INFO_VALID) == 0) {
1209 goto error;
1210 }
1211 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1212 goto error;
1213 }
1214
1215 exit->reason = NVMM_VCPU_EXIT_NONE;
1216 return;
1217
1218 error:
1219 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1220 }
1221
1222 #define VMX_CPUID_MAX_BASIC 0x16
1223 #define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1224 #define VMX_CPUID_MAX_EXTENDED 0x80000008
1225 static uint32_t vmx_cpuid_max_basic __read_mostly;
1226 static uint32_t vmx_cpuid_max_extended __read_mostly;
1227
1228 static void
vmx_inkernel_exec_cpuid(struct vmx_cpudata * cpudata,uint64_t eax,uint64_t ecx)1229 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx)
1230 {
1231 u_int descs[4];
1232
1233 x86_cpuid2(eax, ecx, descs);
1234 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1235 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1236 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1237 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1238 }
1239
1240 static void
vmx_inkernel_handle_cpuid(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t eax,uint64_t ecx)1241 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1242 uint64_t eax, uint64_t ecx)
1243 {
1244 struct vmx_cpudata *cpudata = vcpu->cpudata;
1245 unsigned int ncpus;
1246 uint64_t cr4;
1247
1248 if (eax < 0x40000000) {
1249 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1250 eax = vmx_cpuid_max_basic;
1251 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1252 }
1253 } else if (eax < 0x80000000) {
1254 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1255 eax = vmx_cpuid_max_basic;
1256 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1257 }
1258 } else {
1259 if (__predict_false(eax > vmx_cpuid_max_extended)) {
1260 eax = vmx_cpuid_max_basic;
1261 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1262 }
1263 }
1264
1265 switch (eax) {
1266 case 0x00000000:
1267 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1268 break;
1269 case 0x00000001:
1270 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1271
1272 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1273 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1274 CPUID_LOCAL_APIC_ID);
1275
1276 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1277 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
1278 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1279 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
1280 }
1281
1282 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1283
1284 /* CPUID2_OSXSAVE depends on CR4. */
1285 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1286 if (!(cr4 & CR4_OSXSAVE)) {
1287 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
1288 }
1289 break;
1290 case 0x00000002:
1291 break;
1292 case 0x00000003:
1293 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1294 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1295 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1296 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1297 break;
1298 case 0x00000004: /* Deterministic Cache Parameters */
1299 break; /* TODO? */
1300 case 0x00000005: /* MONITOR/MWAIT */
1301 case 0x00000006: /* Thermal and Power Management */
1302 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1303 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1304 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1305 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1306 break;
1307 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1308 switch (ecx) {
1309 case 0:
1310 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1311 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1312 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1313 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1314 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1315 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
1316 }
1317 break;
1318 default:
1319 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1320 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1321 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1322 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1323 break;
1324 }
1325 break;
1326 case 0x00000008: /* Empty */
1327 case 0x00000009: /* Direct Cache Access Information */
1328 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1329 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1330 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1331 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1332 break;
1333 case 0x0000000A: /* Architectural Performance Monitoring */
1334 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1335 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1336 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1337 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1338 break;
1339 case 0x0000000B: /* Extended Topology Enumeration */
1340 switch (ecx) {
1341 case 0: /* Threads */
1342 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1343 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1344 cpudata->gprs[NVMM_X64_GPR_RCX] =
1345 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1346 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
1347 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1348 break;
1349 case 1: /* Cores */
1350 ncpus = atomic_load_relaxed(&mach->ncpus);
1351 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1352 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1353 cpudata->gprs[NVMM_X64_GPR_RCX] =
1354 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1355 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
1356 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1357 break;
1358 default:
1359 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1360 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1361 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1362 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1363 break;
1364 }
1365 break;
1366 case 0x0000000C: /* Empty */
1367 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1368 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1369 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1370 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1371 break;
1372 case 0x0000000D: /* Processor Extended State Enumeration */
1373 if (vmx_xcr0_mask == 0) {
1374 break;
1375 }
1376 switch (ecx) {
1377 case 0:
1378 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1379 if (cpudata->gxcr0 & XCR0_SSE) {
1380 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1381 } else {
1382 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1383 }
1384 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1385 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
1386 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1387 break;
1388 case 1:
1389 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1390 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
1391 CPUID_PES1_XGETBV);
1392 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1393 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1394 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1395 break;
1396 default:
1397 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1398 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1399 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1400 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1401 break;
1402 }
1403 break;
1404 case 0x0000000E: /* Empty */
1405 case 0x0000000F: /* Intel RDT Monitoring Enumeration */
1406 case 0x00000010: /* Intel RDT Allocation Enumeration */
1407 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1408 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1409 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1410 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1411 break;
1412 case 0x00000011: /* Empty */
1413 case 0x00000012: /* Intel SGX Capability Enumeration */
1414 case 0x00000013: /* Empty */
1415 case 0x00000014: /* Intel Processor Trace Enumeration */
1416 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1417 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1418 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1419 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1420 break;
1421 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */
1422 case 0x00000016: /* Processor Frequency Information */
1423 break;
1424
1425 case 0x40000000: /* Hypervisor Information */
1426 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1427 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1428 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1429 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1430 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1431 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1432 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1433 break;
1434
1435 case 0x80000000:
1436 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
1437 break;
1438 case 0x80000001:
1439 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1440 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1441 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1442 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1443 break;
1444 case 0x80000002: /* Processor Brand String */
1445 case 0x80000003: /* Processor Brand String */
1446 case 0x80000004: /* Processor Brand String */
1447 case 0x80000005: /* Reserved Zero */
1448 case 0x80000006: /* Cache Information */
1449 break;
1450 case 0x80000007: /* TSC Information */
1451 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
1452 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
1453 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
1454 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
1455 break;
1456 case 0x80000008: /* Address Sizes */
1457 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
1458 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
1459 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
1460 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1461 break;
1462
1463 default:
1464 break;
1465 }
1466 }
1467
1468 static void
vmx_exit_insn(struct nvmm_vcpu_exit * exit,uint64_t reason)1469 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1470 {
1471 uint64_t inslen, rip;
1472
1473 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1474 rip = vmx_vmread(VMCS_GUEST_RIP);
1475 exit->u.insn.npc = rip + inslen;
1476 exit->reason = reason;
1477 }
1478
1479 static void
vmx_exit_cpuid(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1480 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1481 struct nvmm_vcpu_exit *exit)
1482 {
1483 struct vmx_cpudata *cpudata = vcpu->cpudata;
1484 struct nvmm_vcpu_conf_cpuid *cpuid;
1485 uint64_t eax, ecx;
1486 size_t i;
1487
1488 eax = cpudata->gprs[NVMM_X64_GPR_RAX];
1489 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1490 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1491 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1492
1493 for (i = 0; i < VMX_NCPUIDS; i++) {
1494 if (!cpudata->cpuidpresent[i]) {
1495 continue;
1496 }
1497 cpuid = &cpudata->cpuid[i];
1498 if (cpuid->leaf != eax) {
1499 continue;
1500 }
1501
1502 if (cpuid->exit) {
1503 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1504 return;
1505 }
1506 KASSERT(cpuid->mask);
1507
1508 /* del */
1509 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1510 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1511 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1512 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1513
1514 /* set */
1515 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1516 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1517 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1518 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1519
1520 break;
1521 }
1522
1523 vmx_inkernel_advance();
1524 exit->reason = NVMM_VCPU_EXIT_NONE;
1525 }
1526
1527 static void
vmx_exit_hlt(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1528 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1529 struct nvmm_vcpu_exit *exit)
1530 {
1531 struct vmx_cpudata *cpudata = vcpu->cpudata;
1532 uint64_t rflags;
1533
1534 if (cpudata->int_window_exit) {
1535 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1536 if (rflags & PSL_I) {
1537 vmx_event_waitexit_disable(vcpu, false);
1538 }
1539 }
1540
1541 vmx_inkernel_advance();
1542 exit->reason = NVMM_VCPU_EXIT_HALTED;
1543 }
1544
1545 #define VMX_QUAL_CR_NUM __BITS(3,0)
1546 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1547 #define CR_TYPE_WRITE 0
1548 #define CR_TYPE_READ 1
1549 #define CR_TYPE_CLTS 2
1550 #define CR_TYPE_LMSW 3
1551 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1552 #define VMX_QUAL_CR_GPR __BITS(11,8)
1553 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1554
1555 static inline int
vmx_check_cr(uint64_t crval,uint64_t fixed0,uint64_t fixed1)1556 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1557 {
1558 /* Bits set to 1 in fixed0 are fixed to 1. */
1559 if ((crval & fixed0) != fixed0) {
1560 return -1;
1561 }
1562 /* Bits set to 0 in fixed1 are fixed to 0. */
1563 if (crval & ~fixed1) {
1564 return -1;
1565 }
1566 return 0;
1567 }
1568
1569 static int
vmx_inkernel_handle_cr0(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual)1570 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1571 uint64_t qual)
1572 {
1573 struct vmx_cpudata *cpudata = vcpu->cpudata;
1574 uint64_t type, gpr, oldcr0, realcr0, fakecr0;
1575 uint64_t efer, ctls1;
1576
1577 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1578 if (type != CR_TYPE_WRITE) {
1579 return -1;
1580 }
1581
1582 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1583 KASSERT(gpr < 16);
1584
1585 if (gpr == NVMM_X64_GPR_RSP) {
1586 fakecr0 = vmx_vmread(VMCS_GUEST_RSP);
1587 } else {
1588 fakecr0 = cpudata->gprs[gpr];
1589 }
1590
1591 /*
1592 * fakecr0 is the value the guest believes is in %cr0. realcr0 is the
1593 * actual value in %cr0.
1594 *
1595 * In fakecr0 we must force CR0_ET to 1.
1596 *
1597 * In realcr0 we must force CR0_NW and CR0_CD to 0, and CR0_ET and
1598 * CR0_NE to 1.
1599 */
1600 fakecr0 |= CR0_ET;
1601 realcr0 = (fakecr0 & ~CR0_STATIC_MASK) | CR0_ET | CR0_NE;
1602
1603 if (vmx_check_cr(realcr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1604 return -1;
1605 }
1606
1607 /*
1608 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1609 * from CR3.
1610 */
1611
1612 if (realcr0 & CR0_PG) {
1613 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1614 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1615 if (efer & EFER_LME) {
1616 ctls1 |= ENTRY_CTLS_LONG_MODE;
1617 efer |= EFER_LMA;
1618 } else {
1619 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1620 efer &= ~EFER_LMA;
1621 }
1622 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1623 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1624 }
1625
1626 oldcr0 = (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
1627 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
1628 if ((oldcr0 ^ fakecr0) & CR0_TLB_FLUSH) {
1629 cpudata->gtlb_want_flush = true;
1630 }
1631
1632 vmx_vmwrite(VMCS_CR0_SHADOW, fakecr0);
1633 vmx_vmwrite(VMCS_GUEST_CR0, realcr0);
1634 vmx_inkernel_advance();
1635 return 0;
1636 }
1637
1638 static int
vmx_inkernel_handle_cr4(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual)1639 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1640 uint64_t qual)
1641 {
1642 struct vmx_cpudata *cpudata = vcpu->cpudata;
1643 uint64_t type, gpr, oldcr4, cr4;
1644
1645 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1646 if (type != CR_TYPE_WRITE) {
1647 return -1;
1648 }
1649
1650 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1651 KASSERT(gpr < 16);
1652
1653 if (gpr == NVMM_X64_GPR_RSP) {
1654 gpr = vmx_vmread(VMCS_GUEST_RSP);
1655 } else {
1656 gpr = cpudata->gprs[gpr];
1657 }
1658
1659 if (gpr & CR4_INVALID) {
1660 return -1;
1661 }
1662 cr4 = gpr | CR4_VMXE;
1663 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1664 return -1;
1665 }
1666
1667 oldcr4 = vmx_vmread(VMCS_GUEST_CR4);
1668 if ((oldcr4 ^ gpr) & CR4_TLB_FLUSH) {
1669 cpudata->gtlb_want_flush = true;
1670 }
1671
1672 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1673 vmx_inkernel_advance();
1674 return 0;
1675 }
1676
1677 static int
vmx_inkernel_handle_cr8(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual,struct nvmm_vcpu_exit * exit)1678 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1679 uint64_t qual, struct nvmm_vcpu_exit *exit)
1680 {
1681 struct vmx_cpudata *cpudata = vcpu->cpudata;
1682 uint64_t type, gpr;
1683 bool write;
1684
1685 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1686 if (type == CR_TYPE_WRITE) {
1687 write = true;
1688 } else if (type == CR_TYPE_READ) {
1689 write = false;
1690 } else {
1691 return -1;
1692 }
1693
1694 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1695 KASSERT(gpr < 16);
1696
1697 if (write) {
1698 if (gpr == NVMM_X64_GPR_RSP) {
1699 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1700 } else {
1701 cpudata->gcr8 = cpudata->gprs[gpr];
1702 }
1703 if (cpudata->tpr.exit_changed) {
1704 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1705 }
1706 } else {
1707 if (gpr == NVMM_X64_GPR_RSP) {
1708 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1709 } else {
1710 cpudata->gprs[gpr] = cpudata->gcr8;
1711 }
1712 }
1713
1714 vmx_inkernel_advance();
1715 return 0;
1716 }
1717
1718 static void
vmx_exit_cr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1719 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1720 struct nvmm_vcpu_exit *exit)
1721 {
1722 uint64_t qual;
1723 int ret;
1724
1725 exit->reason = NVMM_VCPU_EXIT_NONE;
1726
1727 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1728
1729 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1730 case 0:
1731 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1732 break;
1733 case 4:
1734 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1735 break;
1736 case 8:
1737 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1738 break;
1739 default:
1740 ret = -1;
1741 break;
1742 }
1743
1744 if (ret == -1) {
1745 vmx_inject_gp(vcpu);
1746 }
1747 }
1748
1749 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1750 #define IO_SIZE_8 0
1751 #define IO_SIZE_16 1
1752 #define IO_SIZE_32 3
1753 #define VMX_QUAL_IO_IN __BIT(3)
1754 #define VMX_QUAL_IO_STR __BIT(4)
1755 #define VMX_QUAL_IO_REP __BIT(5)
1756 #define VMX_QUAL_IO_DX __BIT(6)
1757 #define VMX_QUAL_IO_PORT __BITS(31,16)
1758
1759 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1760 #define IO_ADRSIZE_16 0
1761 #define IO_ADRSIZE_32 1
1762 #define IO_ADRSIZE_64 2
1763 #define VMX_INFO_IO_SEG __BITS(17,15)
1764
1765 static void
vmx_exit_io(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1766 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1767 struct nvmm_vcpu_exit *exit)
1768 {
1769 uint64_t qual, info, inslen, rip;
1770
1771 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1772 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1773
1774 exit->reason = NVMM_VCPU_EXIT_IO;
1775
1776 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1777 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1778
1779 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1780 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1781
1782 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1783 exit->u.io.address_size = 8;
1784 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1785 exit->u.io.address_size = 4;
1786 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1787 exit->u.io.address_size = 2;
1788 }
1789
1790 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1791 exit->u.io.operand_size = 4;
1792 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1793 exit->u.io.operand_size = 2;
1794 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1795 exit->u.io.operand_size = 1;
1796 }
1797
1798 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1799 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1800
1801 if (exit->u.io.in && exit->u.io.str) {
1802 exit->u.io.seg = NVMM_X64_SEG_ES;
1803 }
1804
1805 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1806 rip = vmx_vmread(VMCS_GUEST_RIP);
1807 exit->u.io.npc = rip + inslen;
1808
1809 vmx_vcpu_state_provide(vcpu,
1810 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1811 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1812 }
1813
1814 static const uint64_t msr_ignore_list[] = {
1815 MSR_BIOS_SIGN,
1816 MSR_IA32_PLATFORM_ID
1817 };
1818
1819 static bool
vmx_inkernel_handle_msr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1820 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1821 struct nvmm_vcpu_exit *exit)
1822 {
1823 struct vmx_cpudata *cpudata = vcpu->cpudata;
1824 uint64_t val;
1825 size_t i;
1826
1827 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1828 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1829 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1830 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1831 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1832 goto handled;
1833 }
1834 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1835 val = cpudata->gmsr_misc_enable;
1836 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1837 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1838 goto handled;
1839 }
1840 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) {
1841 u_int descs[4];
1842 if (cpuid_level < 7) {
1843 goto error;
1844 }
1845 x86_cpuid(7, descs);
1846 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) {
1847 goto error;
1848 }
1849 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1850 val &= (IA32_ARCH_RDCL_NO |
1851 IA32_ARCH_SSB_NO |
1852 IA32_ARCH_MDS_NO |
1853 IA32_ARCH_TAA_NO);
1854 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1855 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1856 goto handled;
1857 }
1858 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1859 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1860 continue;
1861 val = 0;
1862 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1863 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1864 goto handled;
1865 }
1866 } else {
1867 if (exit->u.wrmsr.msr == MSR_TSC) {
1868 cpudata->gtsc = exit->u.wrmsr.val;
1869 cpudata->gtsc_want_update = true;
1870 goto handled;
1871 }
1872 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1873 val = exit->u.wrmsr.val;
1874 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1875 goto error;
1876 }
1877 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1878 goto handled;
1879 }
1880 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1881 /* Don't care. */
1882 goto handled;
1883 }
1884 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1885 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1886 continue;
1887 goto handled;
1888 }
1889 }
1890
1891 return false;
1892
1893 handled:
1894 vmx_inkernel_advance();
1895 return true;
1896
1897 error:
1898 vmx_inject_gp(vcpu);
1899 return true;
1900 }
1901
1902 static void
vmx_exit_rdmsr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1903 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1904 struct nvmm_vcpu_exit *exit)
1905 {
1906 struct vmx_cpudata *cpudata = vcpu->cpudata;
1907 uint64_t inslen, rip;
1908
1909 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1910 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1911
1912 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1913 exit->reason = NVMM_VCPU_EXIT_NONE;
1914 return;
1915 }
1916
1917 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1918 rip = vmx_vmread(VMCS_GUEST_RIP);
1919 exit->u.rdmsr.npc = rip + inslen;
1920
1921 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1922 }
1923
1924 static void
vmx_exit_wrmsr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1925 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1926 struct nvmm_vcpu_exit *exit)
1927 {
1928 struct vmx_cpudata *cpudata = vcpu->cpudata;
1929 uint64_t rdx, rax, inslen, rip;
1930
1931 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1932 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1933
1934 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1935 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1936 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1937
1938 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1939 exit->reason = NVMM_VCPU_EXIT_NONE;
1940 return;
1941 }
1942
1943 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1944 rip = vmx_vmread(VMCS_GUEST_RIP);
1945 exit->u.wrmsr.npc = rip + inslen;
1946
1947 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1948 }
1949
1950 static void
vmx_exit_xsetbv(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1951 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1952 struct nvmm_vcpu_exit *exit)
1953 {
1954 struct vmx_cpudata *cpudata = vcpu->cpudata;
1955 uint64_t val;
1956
1957 exit->reason = NVMM_VCPU_EXIT_NONE;
1958
1959 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1960 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1961
1962 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1963 goto error;
1964 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1965 goto error;
1966 } else if (__predict_false((val & XCR0_X87) == 0)) {
1967 goto error;
1968 }
1969
1970 cpudata->gxcr0 = val;
1971
1972 vmx_inkernel_advance();
1973 return;
1974
1975 error:
1976 vmx_inject_gp(vcpu);
1977 }
1978
1979 #define VMX_EPT_VIOLATION_READ __BIT(0)
1980 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
1981 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1982
1983 static void
vmx_exit_epf(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1984 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1985 struct nvmm_vcpu_exit *exit)
1986 {
1987 uint64_t perm;
1988 gpaddr_t gpa;
1989
1990 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
1991
1992 exit->reason = NVMM_VCPU_EXIT_MEMORY;
1993 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1994 if (perm & VMX_EPT_VIOLATION_WRITE)
1995 exit->u.mem.prot = PROT_WRITE;
1996 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
1997 exit->u.mem.prot = PROT_EXEC;
1998 else
1999 exit->u.mem.prot = PROT_READ;
2000 exit->u.mem.gpa = gpa;
2001 exit->u.mem.inst_len = 0;
2002
2003 vmx_vcpu_state_provide(vcpu,
2004 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
2005 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
2006 }
2007
2008 /* -------------------------------------------------------------------------- */
2009
2010 static void
vmx_vcpu_guest_fpu_enter(struct nvmm_cpu * vcpu)2011 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
2012 {
2013 struct vmx_cpudata *cpudata = vcpu->cpudata;
2014
2015 fpu_kern_enter();
2016 /* TODO: should we use *XSAVE64 here? */
2017 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask, false);
2018
2019 if (vmx_xcr0_mask != 0) {
2020 cpudata->hxcr0 = rdxcr(0);
2021 wrxcr(0, cpudata->gxcr0);
2022 }
2023 }
2024
2025 static void
vmx_vcpu_guest_fpu_leave(struct nvmm_cpu * vcpu)2026 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
2027 {
2028 struct vmx_cpudata *cpudata = vcpu->cpudata;
2029
2030 if (vmx_xcr0_mask != 0) {
2031 cpudata->gxcr0 = rdxcr(0);
2032 wrxcr(0, cpudata->hxcr0);
2033 }
2034
2035 /* TODO: should we use *XSAVE64 here? */
2036 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask, false);
2037 fpu_kern_leave();
2038 }
2039
2040 static void
vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu * vcpu)2041 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
2042 {
2043 struct vmx_cpudata *cpudata = vcpu->cpudata;
2044
2045 x86_dbregs_save(curlwp);
2046
2047 ldr7(0);
2048
2049 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
2050 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
2051 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
2052 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
2053 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
2054 }
2055
2056 static void
vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu * vcpu)2057 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
2058 {
2059 struct vmx_cpudata *cpudata = vcpu->cpudata;
2060
2061 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
2062 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
2063 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
2064 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
2065 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
2066
2067 x86_dbregs_restore(curlwp);
2068 }
2069
2070 static void
vmx_vcpu_guest_misc_enter(struct nvmm_cpu * vcpu)2071 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
2072 {
2073 struct vmx_cpudata *cpudata = vcpu->cpudata;
2074
2075 /* This gets restored automatically by the CPU. */
2076 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)curcpu()->ci_idtvec.iv_idt);
2077 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
2078 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
2079 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
2080
2081 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
2082 }
2083
2084 static void
vmx_vcpu_guest_misc_leave(struct nvmm_cpu * vcpu)2085 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
2086 {
2087 struct vmx_cpudata *cpudata = vcpu->cpudata;
2088
2089 wrmsr(MSR_STAR, cpudata->star);
2090 wrmsr(MSR_LSTAR, cpudata->lstar);
2091 wrmsr(MSR_CSTAR, cpudata->cstar);
2092 wrmsr(MSR_SFMASK, cpudata->sfmask);
2093 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
2094 }
2095
2096 /* -------------------------------------------------------------------------- */
2097
2098 #define VMX_INVVPID_ADDRESS 0
2099 #define VMX_INVVPID_CONTEXT 1
2100 #define VMX_INVVPID_ALL 2
2101 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
2102
2103 #define VMX_INVEPT_CONTEXT 1
2104 #define VMX_INVEPT_ALL 2
2105
2106 static inline void
vmx_gtlb_catchup(struct nvmm_cpu * vcpu,int hcpu)2107 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2108 {
2109 struct vmx_cpudata *cpudata = vcpu->cpudata;
2110
2111 if (vcpu->hcpu_last != hcpu) {
2112 cpudata->gtlb_want_flush = true;
2113 }
2114 }
2115
2116 static inline void
vmx_htlb_catchup(struct nvmm_cpu * vcpu,int hcpu)2117 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2118 {
2119 struct vmx_cpudata *cpudata = vcpu->cpudata;
2120 struct ept_desc ept_desc;
2121
2122 if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
2123 return;
2124 }
2125
2126 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2127 ept_desc.mbz = 0;
2128 vmx_invept(vmx_ept_flush_op, &ept_desc);
2129 kcpuset_clear(cpudata->htlb_want_flush, hcpu);
2130 }
2131
2132 static inline uint64_t
vmx_htlb_flush(struct vmx_machdata * machdata,struct vmx_cpudata * cpudata)2133 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
2134 {
2135 struct ept_desc ept_desc;
2136 uint64_t machgen;
2137
2138 machgen = machdata->mach_htlb_gen;
2139 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
2140 return machgen;
2141 }
2142
2143 kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
2144
2145 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2146 ept_desc.mbz = 0;
2147 vmx_invept(vmx_ept_flush_op, &ept_desc);
2148
2149 return machgen;
2150 }
2151
2152 static inline void
vmx_htlb_flush_ack(struct vmx_cpudata * cpudata,uint64_t machgen)2153 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
2154 {
2155 cpudata->vcpu_htlb_gen = machgen;
2156 kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
2157 }
2158
2159 static inline void
vmx_exit_evt(struct vmx_cpudata * cpudata)2160 vmx_exit_evt(struct vmx_cpudata *cpudata)
2161 {
2162 uint64_t info, err, inslen;
2163
2164 cpudata->evt_pending = false;
2165
2166 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
2167 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
2168 return;
2169 }
2170 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
2171
2172 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
2173 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
2174
2175 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
2176 case INTR_TYPE_SW_INT:
2177 case INTR_TYPE_PRIV_SW_EXC:
2178 case INTR_TYPE_SW_EXC:
2179 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
2180 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
2181 }
2182
2183 cpudata->evt_pending = true;
2184 }
2185
2186 static int
vmx_vcpu_run(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)2187 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2188 struct nvmm_vcpu_exit *exit)
2189 {
2190 struct nvmm_comm_page *comm = vcpu->comm;
2191 struct vmx_machdata *machdata = mach->machdata;
2192 struct vmx_cpudata *cpudata = vcpu->cpudata;
2193 struct vpid_desc vpid_desc;
2194 struct cpu_info *ci;
2195 uint64_t exitcode;
2196 uint64_t intstate;
2197 uint64_t machgen;
2198 int hcpu, ret;
2199 bool launched;
2200
2201 vmx_vmcs_enter(vcpu);
2202
2203 vmx_vcpu_state_commit(vcpu);
2204 comm->state_cached = 0;
2205
2206 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2207 vmx_vmcs_leave(vcpu);
2208 return EINVAL;
2209 }
2210
2211 ci = curcpu();
2212 hcpu = cpu_number();
2213 launched = cpudata->vmcs_launched;
2214
2215 vmx_gtlb_catchup(vcpu, hcpu);
2216 vmx_htlb_catchup(vcpu, hcpu);
2217
2218 if (vcpu->hcpu_last != hcpu) {
2219 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
2220 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
2221 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
2222 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
2223 cpudata->gtsc_want_update = true;
2224 vcpu->hcpu_last = hcpu;
2225 }
2226
2227 vmx_vcpu_guest_dbregs_enter(vcpu);
2228 vmx_vcpu_guest_misc_enter(vcpu);
2229
2230 while (1) {
2231 if (cpudata->gtlb_want_flush) {
2232 vpid_desc.vpid = cpudata->asid;
2233 vpid_desc.addr = 0;
2234 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2235 cpudata->gtlb_want_flush = false;
2236 }
2237
2238 if (__predict_false(cpudata->gtsc_want_update)) {
2239 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
2240 cpudata->gtsc_want_update = false;
2241 }
2242
2243 vmx_vcpu_guest_fpu_enter(vcpu);
2244 vmx_cli();
2245 machgen = vmx_htlb_flush(machdata, cpudata);
2246 lcr2(cpudata->gcr2);
2247 if (launched) {
2248 ret = vmx_vmresume(cpudata->gprs);
2249 } else {
2250 ret = vmx_vmlaunch(cpudata->gprs);
2251 }
2252 cpudata->gcr2 = rcr2();
2253 vmx_htlb_flush_ack(cpudata, machgen);
2254 vmx_sti();
2255 vmx_vcpu_guest_fpu_leave(vcpu);
2256
2257 if (__predict_false(ret != 0)) {
2258 vmx_exit_invalid(exit, -1);
2259 break;
2260 }
2261 vmx_exit_evt(cpudata);
2262
2263 launched = true;
2264
2265 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2266 exitcode &= __BITS(15,0);
2267
2268 switch (exitcode) {
2269 case VMCS_EXITCODE_EXC_NMI:
2270 vmx_exit_exc_nmi(mach, vcpu, exit);
2271 break;
2272 case VMCS_EXITCODE_EXT_INT:
2273 exit->reason = NVMM_VCPU_EXIT_NONE;
2274 break;
2275 case VMCS_EXITCODE_CPUID:
2276 vmx_exit_cpuid(mach, vcpu, exit);
2277 break;
2278 case VMCS_EXITCODE_HLT:
2279 vmx_exit_hlt(mach, vcpu, exit);
2280 break;
2281 case VMCS_EXITCODE_CR:
2282 vmx_exit_cr(mach, vcpu, exit);
2283 break;
2284 case VMCS_EXITCODE_IO:
2285 vmx_exit_io(mach, vcpu, exit);
2286 break;
2287 case VMCS_EXITCODE_RDMSR:
2288 vmx_exit_rdmsr(mach, vcpu, exit);
2289 break;
2290 case VMCS_EXITCODE_WRMSR:
2291 vmx_exit_wrmsr(mach, vcpu, exit);
2292 break;
2293 case VMCS_EXITCODE_SHUTDOWN:
2294 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2295 break;
2296 case VMCS_EXITCODE_MONITOR:
2297 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2298 break;
2299 case VMCS_EXITCODE_MWAIT:
2300 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2301 break;
2302 case VMCS_EXITCODE_XSETBV:
2303 vmx_exit_xsetbv(mach, vcpu, exit);
2304 break;
2305 case VMCS_EXITCODE_RDPMC:
2306 case VMCS_EXITCODE_RDTSCP:
2307 case VMCS_EXITCODE_INVVPID:
2308 case VMCS_EXITCODE_INVEPT:
2309 case VMCS_EXITCODE_VMCALL:
2310 case VMCS_EXITCODE_VMCLEAR:
2311 case VMCS_EXITCODE_VMLAUNCH:
2312 case VMCS_EXITCODE_VMPTRLD:
2313 case VMCS_EXITCODE_VMPTRST:
2314 case VMCS_EXITCODE_VMREAD:
2315 case VMCS_EXITCODE_VMRESUME:
2316 case VMCS_EXITCODE_VMWRITE:
2317 case VMCS_EXITCODE_VMXOFF:
2318 case VMCS_EXITCODE_VMXON:
2319 vmx_inject_ud(vcpu);
2320 exit->reason = NVMM_VCPU_EXIT_NONE;
2321 break;
2322 case VMCS_EXITCODE_EPT_VIOLATION:
2323 vmx_exit_epf(mach, vcpu, exit);
2324 break;
2325 case VMCS_EXITCODE_INT_WINDOW:
2326 vmx_event_waitexit_disable(vcpu, false);
2327 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2328 break;
2329 case VMCS_EXITCODE_NMI_WINDOW:
2330 vmx_event_waitexit_disable(vcpu, true);
2331 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2332 break;
2333 default:
2334 vmx_exit_invalid(exit, exitcode);
2335 break;
2336 }
2337
2338 /* If no reason to return to userland, keep rolling. */
2339 if (nvmm_return_needed(vcpu, exit)) {
2340 break;
2341 }
2342 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2343 break;
2344 }
2345 }
2346
2347 cpudata->vmcs_launched = launched;
2348
2349 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
2350
2351 vmx_vcpu_guest_misc_leave(vcpu);
2352 vmx_vcpu_guest_dbregs_leave(vcpu);
2353
2354 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2355 exit->exitstate.cr8 = cpudata->gcr8;
2356 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2357 exit->exitstate.int_shadow =
2358 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2359 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2360 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2361 exit->exitstate.evt_pending = cpudata->evt_pending;
2362
2363 vmx_vmcs_leave(vcpu);
2364
2365 return 0;
2366 }
2367
2368 /* -------------------------------------------------------------------------- */
2369
2370 static int
vmx_memalloc(paddr_t * pa,vaddr_t * va,size_t npages)2371 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
2372 {
2373 struct pglist pglist;
2374 paddr_t _pa;
2375 vaddr_t _va;
2376 size_t i;
2377 int ret;
2378
2379 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
2380 &pglist, 1, 0);
2381 if (ret != 0)
2382 return ENOMEM;
2383 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
2384 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
2385 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
2386 if (_va == 0)
2387 goto error;
2388
2389 for (i = 0; i < npages; i++) {
2390 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
2391 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
2392 }
2393 pmap_update(pmap_kernel());
2394
2395 memset((void *)_va, 0, npages * PAGE_SIZE);
2396
2397 *pa = _pa;
2398 *va = _va;
2399 return 0;
2400
2401 error:
2402 for (i = 0; i < npages; i++) {
2403 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
2404 }
2405 return ENOMEM;
2406 }
2407
2408 static void
vmx_memfree(paddr_t pa,vaddr_t va,size_t npages)2409 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
2410 {
2411 size_t i;
2412
2413 pmap_kremove(va, npages * PAGE_SIZE);
2414 pmap_update(pmap_kernel());
2415 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
2416 for (i = 0; i < npages; i++) {
2417 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
2418 }
2419 }
2420
2421 /* -------------------------------------------------------------------------- */
2422
2423 static void
vmx_vcpu_msr_allow(uint8_t * bitmap,uint64_t msr,bool read,bool write)2424 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2425 {
2426 uint64_t byte;
2427 uint8_t bitoff;
2428
2429 if (msr < 0x00002000) {
2430 /* Range 1 */
2431 byte = ((msr - 0x00000000) / 8) + 0;
2432 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2433 /* Range 2 */
2434 byte = ((msr - 0xC0000000) / 8) + 1024;
2435 } else {
2436 panic("%s: wrong range", __func__);
2437 }
2438
2439 bitoff = (msr & 0x7);
2440
2441 if (read) {
2442 bitmap[byte] &= ~__BIT(bitoff);
2443 }
2444 if (write) {
2445 bitmap[2048 + byte] &= ~__BIT(bitoff);
2446 }
2447 }
2448
2449 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2450 #define VMX_SEG_ATTRIB_S __BIT(4)
2451 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2452 #define VMX_SEG_ATTRIB_P __BIT(7)
2453 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2454 #define VMX_SEG_ATTRIB_L __BIT(13)
2455 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2456 #define VMX_SEG_ATTRIB_G __BIT(15)
2457 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2458
2459 static void
vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg * segs,int idx)2460 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2461 {
2462 uint64_t attrib;
2463
2464 attrib =
2465 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2466 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2467 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2468 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2469 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2470 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2471 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2472 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2473 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2474
2475 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2476 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2477 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2478 }
2479 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2480 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2481 }
2482
2483 static void
vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg * segs,int idx)2484 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2485 {
2486 uint64_t selector = 0, attrib = 0, base, limit;
2487
2488 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2489 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2490 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2491 }
2492 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2493 base = vmx_vmread(vmx_guest_segs[idx].base);
2494
2495 segs[idx].selector = selector;
2496 segs[idx].limit = limit;
2497 segs[idx].base = base;
2498 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2499 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2500 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2501 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2502 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2503 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2504 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2505 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2506 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2507 segs[idx].attrib.p = 0;
2508 }
2509 }
2510
2511 static inline bool
vmx_state_tlb_flush(const struct nvmm_x64_state * state,uint64_t flags)2512 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2513 {
2514 uint64_t cr0, cr3, cr4, efer;
2515
2516 if (flags & NVMM_X64_STATE_CRS) {
2517 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2518 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2519 return true;
2520 }
2521 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2522 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2523 return true;
2524 }
2525 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2526 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2527 return true;
2528 }
2529 }
2530
2531 if (flags & NVMM_X64_STATE_MSRS) {
2532 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2533 if ((efer ^
2534 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2535 return true;
2536 }
2537 }
2538
2539 return false;
2540 }
2541
2542 static void
vmx_vcpu_setstate(struct nvmm_cpu * vcpu)2543 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2544 {
2545 struct nvmm_comm_page *comm = vcpu->comm;
2546 const struct nvmm_x64_state *state = &comm->state;
2547 struct vmx_cpudata *cpudata = vcpu->cpudata;
2548 struct fxsave *fpustate;
2549 uint64_t ctls1, intstate;
2550 uint64_t flags;
2551
2552 flags = comm->state_wanted;
2553
2554 vmx_vmcs_enter(vcpu);
2555
2556 if (vmx_state_tlb_flush(state, flags)) {
2557 cpudata->gtlb_want_flush = true;
2558 }
2559
2560 if (flags & NVMM_X64_STATE_SEGS) {
2561 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2562 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2563 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2564 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2565 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2566 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2567 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2568 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2569 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2570 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2571 }
2572
2573 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2574 if (flags & NVMM_X64_STATE_GPRS) {
2575 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2576
2577 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2578 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2579 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2580 }
2581
2582 if (flags & NVMM_X64_STATE_CRS) {
2583 /*
2584 * CR0_ET must be 1 both in the shadow and the real register.
2585 * CR0_NE must be 1 in the real register.
2586 * CR0_NW and CR0_CD must be 0 in the real register.
2587 */
2588 vmx_vmwrite(VMCS_CR0_SHADOW,
2589 (state->crs[NVMM_X64_CR_CR0] & CR0_STATIC_MASK) |
2590 CR0_ET);
2591 vmx_vmwrite(VMCS_GUEST_CR0,
2592 (state->crs[NVMM_X64_CR_CR0] & ~CR0_STATIC_MASK) |
2593 CR0_ET | CR0_NE);
2594
2595 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2596
2597 /* XXX We are not handling PDPTE here. */
2598 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]);
2599
2600 /* CR4_VMXE is mandatory. */
2601 vmx_vmwrite(VMCS_GUEST_CR4,
2602 (state->crs[NVMM_X64_CR_CR4] & CR4_VALID) | CR4_VMXE);
2603
2604 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2605
2606 if (vmx_xcr0_mask != 0) {
2607 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2608 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2609 cpudata->gxcr0 &= vmx_xcr0_mask;
2610 cpudata->gxcr0 |= XCR0_X87;
2611 }
2612 }
2613
2614 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2615 if (flags & NVMM_X64_STATE_DRS) {
2616 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2617
2618 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2619 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2620 }
2621
2622 if (flags & NVMM_X64_STATE_MSRS) {
2623 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2624 state->msrs[NVMM_X64_MSR_STAR];
2625 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2626 state->msrs[NVMM_X64_MSR_LSTAR];
2627 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2628 state->msrs[NVMM_X64_MSR_CSTAR];
2629 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2630 state->msrs[NVMM_X64_MSR_SFMASK];
2631 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2632 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2633
2634 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2635 state->msrs[NVMM_X64_MSR_EFER]);
2636 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2637 state->msrs[NVMM_X64_MSR_PAT]);
2638 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2639 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2640 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2641 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2642 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2643 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2644
2645 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
2646 cpudata->gtsc_want_update = true;
2647
2648 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2649 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2650 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2651 ctls1 |= ENTRY_CTLS_LONG_MODE;
2652 } else {
2653 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2654 }
2655 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2656 }
2657
2658 if (flags & NVMM_X64_STATE_INTR) {
2659 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2660 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2661 if (state->intr.int_shadow) {
2662 intstate |= INT_STATE_MOVSS;
2663 }
2664 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2665
2666 if (state->intr.int_window_exiting) {
2667 vmx_event_waitexit_enable(vcpu, false);
2668 } else {
2669 vmx_event_waitexit_disable(vcpu, false);
2670 }
2671
2672 if (state->intr.nmi_window_exiting) {
2673 vmx_event_waitexit_enable(vcpu, true);
2674 } else {
2675 vmx_event_waitexit_disable(vcpu, true);
2676 }
2677 }
2678
2679 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2680 if (flags & NVMM_X64_STATE_FPU) {
2681 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
2682 sizeof(state->fpu));
2683
2684 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
2685 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2686 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2687
2688 if (vmx_xcr0_mask != 0) {
2689 /* Reset XSTATE_BV, to force a reload. */
2690 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2691 }
2692 }
2693
2694 vmx_vmcs_leave(vcpu);
2695
2696 comm->state_wanted = 0;
2697 comm->state_cached |= flags;
2698 }
2699
2700 static void
vmx_vcpu_getstate(struct nvmm_cpu * vcpu)2701 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2702 {
2703 struct nvmm_comm_page *comm = vcpu->comm;
2704 struct nvmm_x64_state *state = &comm->state;
2705 struct vmx_cpudata *cpudata = vcpu->cpudata;
2706 uint64_t intstate, flags;
2707
2708 flags = comm->state_wanted;
2709
2710 vmx_vmcs_enter(vcpu);
2711
2712 if (flags & NVMM_X64_STATE_SEGS) {
2713 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2714 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2715 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2716 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2717 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2718 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2719 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2720 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2721 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2722 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2723 }
2724
2725 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2726 if (flags & NVMM_X64_STATE_GPRS) {
2727 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2728
2729 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2730 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2731 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2732 }
2733
2734 if (flags & NVMM_X64_STATE_CRS) {
2735 state->crs[NVMM_X64_CR_CR0] =
2736 (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
2737 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
2738 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2739 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2740 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2741 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2742 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2743
2744 /* Hide VMXE. */
2745 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2746 }
2747
2748 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2749 if (flags & NVMM_X64_STATE_DRS) {
2750 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2751
2752 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2753 }
2754
2755 if (flags & NVMM_X64_STATE_MSRS) {
2756 state->msrs[NVMM_X64_MSR_STAR] =
2757 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2758 state->msrs[NVMM_X64_MSR_LSTAR] =
2759 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2760 state->msrs[NVMM_X64_MSR_CSTAR] =
2761 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2762 state->msrs[NVMM_X64_MSR_SFMASK] =
2763 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2764 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2765 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2766 state->msrs[NVMM_X64_MSR_EFER] =
2767 vmx_vmread(VMCS_GUEST_IA32_EFER);
2768 state->msrs[NVMM_X64_MSR_PAT] =
2769 vmx_vmread(VMCS_GUEST_IA32_PAT);
2770 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2771 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2772 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2773 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2774 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2775 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2776 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
2777 }
2778
2779 if (flags & NVMM_X64_STATE_INTR) {
2780 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2781 state->intr.int_shadow =
2782 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2783 state->intr.int_window_exiting = cpudata->int_window_exit;
2784 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2785 state->intr.evt_pending = cpudata->evt_pending;
2786 }
2787
2788 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2789 if (flags & NVMM_X64_STATE_FPU) {
2790 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
2791 sizeof(state->fpu));
2792 }
2793
2794 vmx_vmcs_leave(vcpu);
2795
2796 comm->state_wanted = 0;
2797 comm->state_cached |= flags;
2798 }
2799
2800 static void
vmx_vcpu_state_provide(struct nvmm_cpu * vcpu,uint64_t flags)2801 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2802 {
2803 vcpu->comm->state_wanted = flags;
2804 vmx_vcpu_getstate(vcpu);
2805 }
2806
2807 static void
vmx_vcpu_state_commit(struct nvmm_cpu * vcpu)2808 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2809 {
2810 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2811 vcpu->comm->state_commit = 0;
2812 vmx_vcpu_setstate(vcpu);
2813 }
2814
2815 /* -------------------------------------------------------------------------- */
2816
2817 static void
vmx_asid_alloc(struct nvmm_cpu * vcpu)2818 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2819 {
2820 struct vmx_cpudata *cpudata = vcpu->cpudata;
2821 size_t i, oct, bit;
2822
2823 mutex_enter(&vmx_asidlock);
2824
2825 for (i = 0; i < vmx_maxasid; i++) {
2826 oct = i / 8;
2827 bit = i % 8;
2828
2829 if (vmx_asidmap[oct] & __BIT(bit)) {
2830 continue;
2831 }
2832
2833 cpudata->asid = i;
2834
2835 vmx_asidmap[oct] |= __BIT(bit);
2836 vmx_vmwrite(VMCS_VPID, i);
2837 mutex_exit(&vmx_asidlock);
2838 return;
2839 }
2840
2841 mutex_exit(&vmx_asidlock);
2842
2843 panic("%s: impossible", __func__);
2844 }
2845
2846 static void
vmx_asid_free(struct nvmm_cpu * vcpu)2847 vmx_asid_free(struct nvmm_cpu *vcpu)
2848 {
2849 size_t oct, bit;
2850 uint64_t asid;
2851
2852 asid = vmx_vmread(VMCS_VPID);
2853
2854 oct = asid / 8;
2855 bit = asid % 8;
2856
2857 mutex_enter(&vmx_asidlock);
2858 vmx_asidmap[oct] &= ~__BIT(bit);
2859 mutex_exit(&vmx_asidlock);
2860 }
2861
2862 static void
vmx_vcpu_init(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)2863 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2864 {
2865 struct vmx_cpudata *cpudata = vcpu->cpudata;
2866 struct vmcs *vmcs = cpudata->vmcs;
2867 struct msr_entry *gmsr = cpudata->gmsr;
2868 extern uint8_t vmx_resume_rip;
2869 uint64_t rev, eptp;
2870
2871 rev = vmx_get_revision();
2872
2873 memset(vmcs, 0, VMCS_SIZE);
2874 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
2875 vmcs->abort = 0;
2876
2877 vmx_vmcs_enter(vcpu);
2878
2879 /* No link pointer. */
2880 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
2881
2882 /* Install the CTLSs. */
2883 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
2884 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
2885 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
2886 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
2887 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
2888
2889 /* Allow direct access to certain MSRs. */
2890 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2891 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
2892 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2893 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2894 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2895 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2896 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2897 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2898 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2899 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2900 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2901 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2902 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2903 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
2904
2905 /*
2906 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
2907 * includes the L1D_FLUSH MSR, to mitigate L1TF.
2908 */
2909 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
2910 gmsr[VMX_MSRLIST_STAR].val = 0;
2911 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
2912 gmsr[VMX_MSRLIST_LSTAR].val = 0;
2913 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
2914 gmsr[VMX_MSRLIST_CSTAR].val = 0;
2915 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
2916 gmsr[VMX_MSRLIST_SFMASK].val = 0;
2917 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
2918 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
2919 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
2920 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
2921 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
2922 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
2923 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
2924 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
2925
2926 /* Set the CR0 mask. Any change of these bits causes a VMEXIT. */
2927 vmx_vmwrite(VMCS_CR0_MASK, CR0_STATIC_MASK);
2928
2929 /* Force unsupported CR4 fields to zero. */
2930 vmx_vmwrite(VMCS_CR4_MASK, CR4_INVALID);
2931 vmx_vmwrite(VMCS_CR4_SHADOW, 0);
2932
2933 /* Set the Host state for resuming. */
2934 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
2935 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
2936 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2937 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2938 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2939 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
2940 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
2941 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
2942 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
2943 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
2944 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
2945 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
2946 vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS);
2947
2948 /* Generate ASID. */
2949 vmx_asid_alloc(vcpu);
2950
2951 /* Enable Extended Paging, 4-Level. */
2952 eptp =
2953 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
2954 __SHIFTIN(4-1, EPTP_WALKLEN) |
2955 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
2956 mach->vm->vm_map.pmap->pm_pdirpa[0];
2957 vmx_vmwrite(VMCS_EPTP, eptp);
2958
2959 /* Init IA32_MISC_ENABLE. */
2960 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
2961 cpudata->gmsr_misc_enable &=
2962 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
2963 cpudata->gmsr_misc_enable |=
2964 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
2965
2966 /* Init XSAVE header. */
2967 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2968 cpudata->gfpu.xsh_xcomp_bv = 0;
2969
2970 /* These MSRs are static. */
2971 cpudata->star = rdmsr(MSR_STAR);
2972 cpudata->lstar = rdmsr(MSR_LSTAR);
2973 cpudata->cstar = rdmsr(MSR_CSTAR);
2974 cpudata->sfmask = rdmsr(MSR_SFMASK);
2975
2976 /* Install the RESET state. */
2977 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
2978 sizeof(nvmm_x86_reset_state));
2979 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
2980 vcpu->comm->state_cached = 0;
2981 vmx_vcpu_setstate(vcpu);
2982
2983 vmx_vmcs_leave(vcpu);
2984 }
2985
2986 static int
vmx_vcpu_create(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)2987 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2988 {
2989 struct vmx_cpudata *cpudata;
2990 int error;
2991
2992 /* Allocate the VMX cpudata. */
2993 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
2994 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2995 UVM_KMF_WIRED|UVM_KMF_ZERO);
2996 vcpu->cpudata = cpudata;
2997
2998 /* VMCS */
2999 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
3000 VMCS_NPAGES);
3001 if (error)
3002 goto error;
3003
3004 /* MSR Bitmap */
3005 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
3006 MSRBM_NPAGES);
3007 if (error)
3008 goto error;
3009
3010 /* Guest MSR List */
3011 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
3012 if (error)
3013 goto error;
3014
3015 kcpuset_create(&cpudata->htlb_want_flush, true);
3016
3017 /* Init the VCPU info. */
3018 vmx_vcpu_init(mach, vcpu);
3019
3020 return 0;
3021
3022 error:
3023 if (cpudata->vmcs_pa) {
3024 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
3025 VMCS_NPAGES);
3026 }
3027 if (cpudata->msrbm_pa) {
3028 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
3029 MSRBM_NPAGES);
3030 }
3031 if (cpudata->gmsr_pa) {
3032 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3033 }
3034
3035 kmem_free(cpudata, sizeof(*cpudata));
3036 return error;
3037 }
3038
3039 static void
vmx_vcpu_destroy(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)3040 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3041 {
3042 struct vmx_cpudata *cpudata = vcpu->cpudata;
3043
3044 vmx_vmcs_enter(vcpu);
3045 vmx_asid_free(vcpu);
3046 vmx_vmcs_destroy(vcpu);
3047
3048 kcpuset_destroy(cpudata->htlb_want_flush);
3049
3050 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
3051 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
3052 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3053 uvm_km_free(kernel_map, (vaddr_t)cpudata,
3054 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
3055 }
3056
3057 /* -------------------------------------------------------------------------- */
3058
3059 static int
vmx_vcpu_configure_cpuid(struct vmx_cpudata * cpudata,void * data)3060 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
3061 {
3062 struct nvmm_vcpu_conf_cpuid *cpuid = data;
3063 size_t i;
3064
3065 if (__predict_false(cpuid->mask && cpuid->exit)) {
3066 return EINVAL;
3067 }
3068 if (__predict_false(cpuid->mask &&
3069 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
3070 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
3071 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
3072 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
3073 return EINVAL;
3074 }
3075
3076 /* If unset, delete, to restore the default behavior. */
3077 if (!cpuid->mask && !cpuid->exit) {
3078 for (i = 0; i < VMX_NCPUIDS; i++) {
3079 if (!cpudata->cpuidpresent[i]) {
3080 continue;
3081 }
3082 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3083 cpudata->cpuidpresent[i] = false;
3084 }
3085 }
3086 return 0;
3087 }
3088
3089 /* If already here, replace. */
3090 for (i = 0; i < VMX_NCPUIDS; i++) {
3091 if (!cpudata->cpuidpresent[i]) {
3092 continue;
3093 }
3094 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3095 memcpy(&cpudata->cpuid[i], cpuid,
3096 sizeof(struct nvmm_vcpu_conf_cpuid));
3097 return 0;
3098 }
3099 }
3100
3101 /* Not here, insert. */
3102 for (i = 0; i < VMX_NCPUIDS; i++) {
3103 if (!cpudata->cpuidpresent[i]) {
3104 cpudata->cpuidpresent[i] = true;
3105 memcpy(&cpudata->cpuid[i], cpuid,
3106 sizeof(struct nvmm_vcpu_conf_cpuid));
3107 return 0;
3108 }
3109 }
3110
3111 return ENOBUFS;
3112 }
3113
3114 static int
vmx_vcpu_configure_tpr(struct vmx_cpudata * cpudata,void * data)3115 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
3116 {
3117 struct nvmm_vcpu_conf_tpr *tpr = data;
3118
3119 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
3120 return 0;
3121 }
3122
3123 static int
vmx_vcpu_configure(struct nvmm_cpu * vcpu,uint64_t op,void * data)3124 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
3125 {
3126 struct vmx_cpudata *cpudata = vcpu->cpudata;
3127
3128 switch (op) {
3129 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
3130 return vmx_vcpu_configure_cpuid(cpudata, data);
3131 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
3132 return vmx_vcpu_configure_tpr(cpudata, data);
3133 default:
3134 return EINVAL;
3135 }
3136 }
3137
3138 static void
vmx_vcpu_suspend(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)3139 vmx_vcpu_suspend(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3140 {
3141 struct vmx_cpudata *cpudata = vcpu->cpudata;
3142 struct cpu_info *vmcs_ci;
3143
3144 KASSERT(cpudata->vmcs_refcnt == 0);
3145
3146 vmcs_ci = cpudata->vmcs_ci;
3147 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
3148
3149 kpreempt_disable();
3150 if (vmcs_ci == NULL) {
3151 /* VMCS is inactive, nothing to do. */
3152 } else if (vmcs_ci != curcpu()) {
3153 /* VMCS is active on a remote CPU; clear it there. */
3154 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
3155 } else {
3156 /* VMCS is active on this CPU; clear it here. */
3157 vmx_vmclear(&cpudata->vmcs_pa);
3158 }
3159 kpreempt_enable();
3160 }
3161
3162 static void
vmx_vcpu_resume(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)3163 vmx_vcpu_resume(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3164 {
3165 struct vmx_cpudata *cpudata = vcpu->cpudata;
3166
3167 KASSERT(cpudata->vmcs_refcnt == 0);
3168
3169 /* Mark VMCS as inactive. */
3170 cpudata->vmcs_ci = NULL;
3171 }
3172
3173 /* -------------------------------------------------------------------------- */
3174
3175 static void
vmx_tlb_flush(struct pmap * pm)3176 vmx_tlb_flush(struct pmap *pm)
3177 {
3178 struct nvmm_machine *mach = pm->pm_data;
3179 struct vmx_machdata *machdata = mach->machdata;
3180
3181 atomic_inc_64(&machdata->mach_htlb_gen);
3182
3183 /* Generates IPIs, which cause #VMEXITs. */
3184 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
3185 }
3186
3187 static void
vmx_machine_create(struct nvmm_machine * mach)3188 vmx_machine_create(struct nvmm_machine *mach)
3189 {
3190 struct pmap *pmap = mach->vm->vm_map.pmap;
3191 struct vmx_machdata *machdata;
3192
3193 /* Convert to EPT. */
3194 pmap_ept_transform(pmap);
3195
3196 /* Fill in pmap info. */
3197 pmap->pm_data = (void *)mach;
3198 pmap->pm_tlb_flush = vmx_tlb_flush;
3199
3200 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
3201 mach->machdata = machdata;
3202
3203 /* Start with an hTLB flush everywhere. */
3204 machdata->mach_htlb_gen = 1;
3205 }
3206
3207 static void
vmx_machine_destroy(struct nvmm_machine * mach)3208 vmx_machine_destroy(struct nvmm_machine *mach)
3209 {
3210 struct vmx_machdata *machdata = mach->machdata;
3211
3212 kmem_free(machdata, sizeof(struct vmx_machdata));
3213 }
3214
3215 static int
vmx_machine_configure(struct nvmm_machine * mach,uint64_t op,void * data)3216 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
3217 {
3218 panic("%s: impossible", __func__);
3219 }
3220
3221 /* -------------------------------------------------------------------------- */
3222
3223 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
3224 ((msrval & __BIT(32 + bitoff)) != 0)
3225 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
3226 ((msrval & __BIT(bitoff)) == 0)
3227
3228 static int
vmx_check_ctls(uint64_t msr_ctls,uint64_t msr_true_ctls,uint64_t set_one)3229 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
3230 {
3231 uint64_t basic, val, true_val;
3232 bool has_true;
3233 size_t i;
3234
3235 basic = rdmsr(MSR_IA32_VMX_BASIC);
3236 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3237
3238 val = rdmsr(msr_ctls);
3239 if (has_true) {
3240 true_val = rdmsr(msr_true_ctls);
3241 } else {
3242 true_val = val;
3243 }
3244
3245 for (i = 0; i < 32; i++) {
3246 if (!(set_one & __BIT(i))) {
3247 continue;
3248 }
3249 if (!CTLS_ONE_ALLOWED(true_val, i)) {
3250 return -1;
3251 }
3252 }
3253
3254 return 0;
3255 }
3256
3257 static int
vmx_init_ctls(uint64_t msr_ctls,uint64_t msr_true_ctls,uint64_t set_one,uint64_t set_zero,uint64_t * res)3258 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
3259 uint64_t set_one, uint64_t set_zero, uint64_t *res)
3260 {
3261 uint64_t basic, val, true_val;
3262 bool one_allowed, zero_allowed, has_true;
3263 size_t i;
3264
3265 basic = rdmsr(MSR_IA32_VMX_BASIC);
3266 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3267
3268 val = rdmsr(msr_ctls);
3269 if (has_true) {
3270 true_val = rdmsr(msr_true_ctls);
3271 } else {
3272 true_val = val;
3273 }
3274
3275 for (i = 0; i < 32; i++) {
3276 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3277 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3278
3279 if (zero_allowed && !one_allowed) {
3280 if (set_one & __BIT(i))
3281 return -1;
3282 *res &= ~__BIT(i);
3283 } else if (one_allowed && !zero_allowed) {
3284 if (set_zero & __BIT(i))
3285 return -1;
3286 *res |= __BIT(i);
3287 } else {
3288 if (set_zero & __BIT(i)) {
3289 *res &= ~__BIT(i);
3290 } else if (set_one & __BIT(i)) {
3291 *res |= __BIT(i);
3292 } else if (!has_true) {
3293 *res &= ~__BIT(i);
3294 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3295 *res &= ~__BIT(i);
3296 } else if (CTLS_ONE_ALLOWED(val, i)) {
3297 *res |= __BIT(i);
3298 } else {
3299 return -1;
3300 }
3301 }
3302 }
3303
3304 return 0;
3305 }
3306
3307 static bool
vmx_ident(void)3308 vmx_ident(void)
3309 {
3310 uint64_t msr;
3311 int ret;
3312
3313 if (!(cpu_feature[1] & CPUID2_VMX)) {
3314 return false;
3315 }
3316
3317 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3318 if ((msr & IA32_FEATURE_CONTROL_LOCK) != 0 &&
3319 (msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3320 printf("NVMM: VMX disabled in BIOS\n");
3321 return false;
3322 }
3323
3324 msr = rdmsr(MSR_IA32_VMX_BASIC);
3325 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3326 printf("NVMM: I/O reporting not supported\n");
3327 return false;
3328 }
3329 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3330 printf("NVMM: WB memory not supported\n");
3331 return false;
3332 }
3333
3334 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3335 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3336 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3337 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3338 if (ret == -1) {
3339 printf("NVMM: CR0 requirements not satisfied\n");
3340 return false;
3341 }
3342
3343 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3344 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3345 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
3346 if (ret == -1) {
3347 printf("NVMM: CR4 requirements not satisfied\n");
3348 return false;
3349 }
3350
3351 /* Init the CTLSs right now, and check for errors. */
3352 ret = vmx_init_ctls(
3353 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3354 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3355 &vmx_pinbased_ctls);
3356 if (ret == -1) {
3357 printf("NVMM: pin-based-ctls requirements not satisfied\n");
3358 return false;
3359 }
3360 ret = vmx_init_ctls(
3361 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3362 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3363 &vmx_procbased_ctls);
3364 if (ret == -1) {
3365 printf("NVMM: proc-based-ctls requirements not satisfied\n");
3366 return false;
3367 }
3368 ret = vmx_init_ctls(
3369 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3370 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3371 &vmx_procbased_ctls2);
3372 if (ret == -1) {
3373 printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
3374 return false;
3375 }
3376 ret = vmx_check_ctls(
3377 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3378 PROC_CTLS2_INVPCID_ENABLE);
3379 if (ret != -1) {
3380 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3381 }
3382 ret = vmx_init_ctls(
3383 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3384 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3385 &vmx_entry_ctls);
3386 if (ret == -1) {
3387 printf("NVMM: entry-ctls requirements not satisfied\n");
3388 return false;
3389 }
3390 ret = vmx_init_ctls(
3391 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3392 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3393 &vmx_exit_ctls);
3394 if (ret == -1) {
3395 printf("NVMM: exit-ctls requirements not satisfied\n");
3396 return false;
3397 }
3398
3399 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3400 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3401 printf("NVMM: 4-level page tree not supported\n");
3402 return false;
3403 }
3404 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3405 printf("NVMM: INVEPT not supported\n");
3406 return false;
3407 }
3408 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3409 printf("NVMM: INVVPID not supported\n");
3410 return false;
3411 }
3412 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3413 pmap_ept_has_ad = true;
3414 } else {
3415 pmap_ept_has_ad = false;
3416 }
3417 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3418 printf("NVMM: EPT UC/WB memory types not supported\n");
3419 return false;
3420 }
3421
3422 return true;
3423 }
3424
3425 static void
vmx_init_asid(uint32_t maxasid)3426 vmx_init_asid(uint32_t maxasid)
3427 {
3428 size_t allocsz;
3429
3430 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
3431
3432 vmx_maxasid = maxasid;
3433 allocsz = roundup(maxasid, 8) / 8;
3434 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
3435
3436 /* ASID 0 is reserved for the host. */
3437 vmx_asidmap[0] |= __BIT(0);
3438 }
3439
3440 static void
vmx_change_cpu(void * arg1,void * arg2)3441 vmx_change_cpu(void *arg1, void *arg2)
3442 {
3443 struct cpu_info *ci = curcpu();
3444 bool enable = arg1 != NULL;
3445 uint64_t msr, cr4;
3446
3447 if (enable) {
3448 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3449 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3450 /* Lock now, with VMX-outside-SMX enabled. */
3451 wrmsr(MSR_IA32_FEATURE_CONTROL, msr |
3452 IA32_FEATURE_CONTROL_LOCK |
3453 IA32_FEATURE_CONTROL_OUT_SMX);
3454 }
3455 }
3456
3457 if (!enable) {
3458 vmx_vmxoff();
3459 }
3460
3461 cr4 = rcr4();
3462 if (enable) {
3463 cr4 |= CR4_VMXE;
3464 } else {
3465 cr4 &= ~CR4_VMXE;
3466 }
3467 lcr4(cr4);
3468
3469 if (enable) {
3470 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
3471 }
3472 }
3473
3474 static void
vmx_init_l1tf(void)3475 vmx_init_l1tf(void)
3476 {
3477 u_int descs[4];
3478 uint64_t msr;
3479
3480 if (cpuid_level < 7) {
3481 return;
3482 }
3483
3484 x86_cpuid(7, descs);
3485
3486 if (descs[3] & CPUID_SEF_ARCH_CAP) {
3487 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3488 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3489 /* No mitigation needed. */
3490 return;
3491 }
3492 }
3493
3494 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
3495 /* Enable hardware mitigation. */
3496 vmx_msrlist_entry_nmsr += 1;
3497 }
3498 }
3499
3500 static void
vmx_suspend_interrupt(void)3501 vmx_suspend_interrupt(void)
3502 {
3503
3504 /*
3505 * Generates IPIs, which cause #VMEXITs. No other purpose for
3506 * the TLB business; the #VMEXIT triggered by IPI is the only
3507 * effect that matters here.
3508 */
3509 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
3510 }
3511
3512 static void
vmx_suspend(void)3513 vmx_suspend(void)
3514 {
3515 uint64_t xc;
3516
3517 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
3518 xc_wait(xc);
3519 }
3520
3521 static void
vmx_resume(void)3522 vmx_resume(void)
3523 {
3524 uint64_t xc;
3525
3526 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
3527 xc_wait(xc);
3528 }
3529
3530 static void
vmx_init(void)3531 vmx_init(void)
3532 {
3533 CPU_INFO_ITERATOR cii;
3534 struct cpu_info *ci;
3535 uint64_t msr;
3536 struct vmxon *vmxon;
3537 uint32_t revision;
3538 u_int descs[4];
3539 paddr_t pa;
3540 vaddr_t va;
3541 int error;
3542
3543 /* Init the ASID bitmap (VPID). */
3544 vmx_init_asid(VPID_MAX);
3545
3546 /* Init the XCR0 mask. */
3547 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3548
3549 /* Init the max basic CPUID leaf. */
3550 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC);
3551
3552 /* Init the max extended CPUID leaf. */
3553 x86_cpuid(0x80000000, descs);
3554 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED);
3555
3556 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3557 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3558 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3559 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3560 } else {
3561 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3562 }
3563 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3564 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3565 } else {
3566 vmx_ept_flush_op = VMX_INVEPT_ALL;
3567 }
3568 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3569 vmx_eptp_type = EPTP_TYPE_WB;
3570 } else {
3571 vmx_eptp_type = EPTP_TYPE_UC;
3572 }
3573
3574 /* Init the L1TF mitigation. */
3575 vmx_init_l1tf();
3576
3577 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3578 revision = vmx_get_revision();
3579
3580 for (CPU_INFO_FOREACH(cii, ci)) {
3581 error = vmx_memalloc(&pa, &va, 1);
3582 if (error) {
3583 panic("%s: out of memory", __func__);
3584 }
3585 vmxoncpu[cpu_index(ci)].pa = pa;
3586 vmxoncpu[cpu_index(ci)].va = va;
3587
3588 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
3589 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3590 }
3591
3592 vmx_resume();
3593 }
3594
3595 static void
vmx_fini_asid(void)3596 vmx_fini_asid(void)
3597 {
3598 size_t allocsz;
3599
3600 allocsz = roundup(vmx_maxasid, 8) / 8;
3601 kmem_free(vmx_asidmap, allocsz);
3602
3603 mutex_destroy(&vmx_asidlock);
3604 }
3605
3606 static void
vmx_fini(void)3607 vmx_fini(void)
3608 {
3609 size_t i;
3610
3611 vmx_suspend();
3612
3613 for (i = 0; i < MAXCPUS; i++) {
3614 if (vmxoncpu[i].pa != 0)
3615 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3616 }
3617
3618 vmx_fini_asid();
3619 }
3620
3621 static void
vmx_capability(struct nvmm_capability * cap)3622 vmx_capability(struct nvmm_capability *cap)
3623 {
3624 cap->arch.mach_conf_support = 0;
3625 cap->arch.vcpu_conf_support =
3626 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3627 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3628 cap->arch.xcr0_mask = vmx_xcr0_mask;
3629 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3630 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3631 }
3632
3633 const struct nvmm_impl nvmm_x86_vmx = {
3634 .name = "x86-vmx",
3635 .ident = vmx_ident,
3636 .init = vmx_init,
3637 .fini = vmx_fini,
3638 .suspend_interrupt = vmx_suspend_interrupt,
3639 .suspend = vmx_suspend,
3640 .resume = vmx_resume,
3641 .capability = vmx_capability,
3642 .mach_conf_max = NVMM_X86_MACH_NCONF,
3643 .mach_conf_sizes = NULL,
3644 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3645 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3646 .state_size = sizeof(struct nvmm_x64_state),
3647 .machine_create = vmx_machine_create,
3648 .machine_destroy = vmx_machine_destroy,
3649 .machine_configure = vmx_machine_configure,
3650 .vcpu_create = vmx_vcpu_create,
3651 .vcpu_destroy = vmx_vcpu_destroy,
3652 .vcpu_configure = vmx_vcpu_configure,
3653 .vcpu_setstate = vmx_vcpu_setstate,
3654 .vcpu_getstate = vmx_vcpu_getstate,
3655 .vcpu_inject = vmx_vcpu_inject,
3656 .vcpu_run = vmx_vcpu_run,
3657 .vcpu_suspend = vmx_vcpu_suspend,
3658 .vcpu_resume = vmx_vcpu_resume,
3659 };
3660