Lines Matching full:vcpu
816 * bitmap is currently per-VM rather than per-vCPU while the
818 * per-vCPU basis).
1130 vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
1134 struct vmx_vcpu *vcpu;
1141 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO);
1142 vcpu->vmx = vmx;
1143 vcpu->vcpu = vcpu1;
1144 vcpu->vcpuid = vcpuid;
1145 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX,
1147 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX,
1149 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX,
1152 vmcs = vcpu->vmcs;
1156 panic("vmx_init: vmclear error %d on vcpu %d\n",
1160 vmx_msr_guest_init(vmx, vcpu);
1167 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx);
1171 if (vcpu_trap_wbinvd(vcpu->vcpu)) {
1191 if (vcpu_trace_exceptions(vcpu->vcpu))
1197 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1;
1201 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page));
1213 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc));
1218 vcpu->cap.set = 0;
1219 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0;
1220 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0;
1221 vcpu->cap.proc_ctls = procbased_ctls;
1222 vcpu->cap.proc_ctls2 = procbased_ctls2;
1223 vcpu->cap.exc_bitmap = exc_bitmap;
1225 vcpu->state.nextrip = ~0;
1226 vcpu->state.lastcpu = NOCPU;
1227 vcpu->state.vpid = vpid;
1243 vcpu->ctx.pmap = vmx->pmap;
1245 return (vcpu);
1249 vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx)
1253 handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax,
1260 vmx_run_trace(struct vmx_vcpu *vcpu)
1262 VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip());
1266 vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason,
1269 VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx",
1275 vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip)
1277 VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip);
1287 vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running)
1292 vmxstate = &vcpu->state;
1300 * This will invalidate TLB entries tagged with the vcpu's
1307 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1308 "critical section", __func__, vcpu->vcpuid));
1313 * We do this because this vcpu was executing on a different host
1320 * move the thread associated with this vcpu between host cpus.
1331 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1);
1339 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1);
1344 vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap)
1348 vmxstate = &vcpu->state;
1354 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
1359 vmx_invvpid(vmx, vcpu, pmap, 1);
1368 vmx_set_int_window_exiting(struct vmx_vcpu *vcpu)
1371 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1372 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1373 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
1374 VMX_CTR0(vcpu, "Enabling interrupt window exiting");
1379 vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu)
1382 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1383 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls));
1384 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1385 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
1386 VMX_CTR0(vcpu, "Disabling interrupt window exiting");
1390 vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu)
1393 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1394 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1395 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
1396 VMX_CTR0(vcpu, "Enabling NMI window exiting");
1401 vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu)
1404 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1405 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls));
1406 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1407 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
1408 VMX_CTR0(vcpu, "Disabling NMI window exiting");
1412 vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset)
1416 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1417 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET;
1418 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
1419 VMX_CTR0(vcpu, "Enabling TSC offsetting");
1425 vm_set_tsc_offset(vcpu->vcpu, offset);
1436 vmx_inject_nmi(struct vmx_vcpu *vcpu)
1455 VMX_CTR0(vcpu, "Injecting vNMI");
1458 vm_nmi_clear(vcpu->vcpu);
1462 vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic,
1469 if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) {
1473 if (vcpu->state.nextrip != guestrip) {
1476 VMX_CTR2(vcpu, "Guest interrupt blocking "
1478 vcpu->state.nextrip, guestrip);
1484 if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) {
1509 if (vm_nmi_pending(vcpu->vcpu)) {
1526 vmx_inject_nmi(vcpu);
1529 VMX_CTR1(vcpu, "Cannot inject NMI "
1533 VMX_CTR1(vcpu, "Cannot inject NMI due to "
1538 vmx_set_nmi_window_exiting(vcpu);
1541 extint_pending = vm_extint_pending(vcpu->vcpu);
1553 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1554 VMX_CTR0(vcpu, "Skip interrupt injection due to "
1574 vatpic_pending_intr(vcpu->vmx->vm, &vector);
1589 VMX_CTR2(vcpu, "Cannot inject vector %d due to "
1596 VMX_CTR2(vcpu, "Cannot inject vector %d due to "
1610 VMX_CTR2(vcpu, "Cannot inject vector %d due to "
1624 vm_extint_clear(vcpu->vcpu);
1625 vatpic_intr_accepted(vcpu->vmx->vm, vector);
1638 vmx_set_int_window_exiting(vcpu);
1641 VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector);
1650 vmx_set_int_window_exiting(vcpu);
1663 vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu)
1667 VMX_CTR0(vcpu, "Restore Virtual-NMI blocking");
1674 vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu)
1678 VMX_CTR0(vcpu, "Clear Virtual-NMI blocking");
1685 vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu)
1695 vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu,
1702 vmxctx = &vcpu->ctx;
1713 vm_inject_gp(vcpu->vcpu);
1719 vm_inject_ud(vcpu->vcpu);
1725 vm_inject_gp(vcpu->vcpu);
1730 vm_inject_gp(vcpu->vcpu);
1737 vm_inject_gp(vcpu->vcpu);
1748 vm_inject_gp(vcpu->vcpu);
1758 vm_inject_gp(vcpu->vcpu);
1772 vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident)
1776 vmxctx = &vcpu->ctx;
1817 vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval)
1821 vmxctx = &vcpu->ctx;
1878 vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual)
1886 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf);
1916 vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual)
1924 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf);
1936 vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu,
1948 vlapic = vm_lapic(vcpu->vcpu);
1952 vmx_set_guest_reg(vcpu, regnum, cr8);
1954 cr8 = vmx_get_guest_reg(vcpu, regnum);
2010 inout_str_index(struct vmx_vcpu *vcpu, int in)
2017 error = vmx_getreg(vcpu, reg, &val);
2023 inout_str_count(struct vmx_vcpu *vcpu, int rep)
2029 error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val);
2056 inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in,
2068 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
2157 apic_access_virtualization(struct vmx_vcpu *vcpu)
2161 proc_ctls2 = vcpu->cap.proc_ctls2;
2166 x2apic_virtualization(struct vmx_vcpu *vcpu)
2170 proc_ctls2 = vcpu->cap.proc_ctls2;
2175 vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic,
2185 if (!apic_access_virtualization(vcpu)) {
2193 if (x2apic_virtualization(vcpu) &&
2243 apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa)
2246 if (apic_access_virtualization(vcpu) &&
2254 vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
2259 if (!apic_access_virtualization(vcpu))
2340 emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
2345 error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
2347 error = vmx_wrmsr(vcpu, num, val, retu);
2353 emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu)
2361 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
2363 error = vmx_rdmsr(vcpu, num, &result, retu);
2367 vmxctx = &vcpu->ctx;
2380 vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
2399 vmxctx = &vcpu->ctx;
2401 vcpuid = vcpu->vcpuid;
2408 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
2418 VMX_CTR0(vcpu, "Handling MCE during VM-entry");
2439 error = vm_exit_intinfo(vcpu->vcpu, exitintinfo);
2457 vmx_clear_nmi_blocking(vcpu);
2459 vmx_assert_nmi_blocking(vcpu);
2513 VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, "
2519 vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1);
2523 handled = vmx_emulate_cr0_access(vcpu, qual);
2526 handled = vmx_emulate_cr4_access(vcpu, qual);
2529 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2534 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
2537 VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx);
2539 error = emulate_rdmsr(vcpu, ecx, &retu);
2552 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
2557 VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx",
2561 error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax,
2576 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
2587 vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1);
2593 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
2598 vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1);
2600 vmx_clear_int_window_exiting(vcpu);
2631 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
2636 if (vm_nmi_pending(vcpu->vcpu))
2637 vmx_inject_nmi(vcpu);
2638 vmx_clear_nmi_window_exiting(vcpu);
2639 vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1);
2642 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
2657 vis->index = inout_str_index(vcpu, in);
2658 vis->count = inout_str_count(vcpu, vis->inout.rep);
2660 inout_str_seginfo(vcpu, inst_info, in, vis);
2665 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
2667 handled = vmx_handle_cpuid(vcpu, vmxctx);
2670 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
2690 vmx_restore_nmi_blocking(vcpu);
2703 VMX_CTR0(vcpu, "Vectoring to MCE handler");
2713 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) {
2741 VMX_CTR2(vcpu, "Reflecting exception %d/%#x into "
2745 error = vm_inject_exception(vcpu->vcpu, intr_vec,
2758 if (vm_mem_allocated(vcpu->vcpu, gpa) ||
2759 apic_access_fault(vcpu, gpa)) {
2764 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
2769 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
2783 vmx_restore_nmi_blocking(vcpu);
2793 handled = vmx_handle_apic_access(vcpu, vmexit);
2801 vlapic = vm_lapic(vcpu->vcpu);
2804 handled = vmx_handle_apic_write(vcpu, vlapic, qual);
2808 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2819 vlapic = vm_lapic(vcpu->vcpu);
2845 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
2920 vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
2936 VMX_CTR0(vcpu, "Vectoring to NMI handler");
3038 struct vmx_vcpu *vcpu;
3047 vcpu = vcpui;
3048 vmx = vcpu->vmx;
3049 vmcs = vcpu->vmcs;
3050 vmxctx = &vcpu->ctx;
3051 vlapic = vm_lapic(vcpu->vcpu);
3052 vmexit = vm_exitinfo(vcpu->vcpu);
3058 vmx_msr_guest_enter(vcpu);
3073 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
3098 vmx_inject_interrupts(vcpu, vlapic, rip);
3101 * Check for vcpu suspension after injecting events because
3102 * vmx_inject_interrupts() can suspend the vcpu due to a
3107 vm_exit_suspended(vcpu->vcpu, rip);
3111 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) {
3113 vm_exit_rendezvous(vcpu->vcpu, rip);
3119 vm_exit_reqidle(vcpu->vcpu, rip);
3123 if (vcpu_should_yield(vcpu->vcpu)) {
3125 vm_exit_astpending(vcpu->vcpu, rip);
3126 vmx_astpending_trace(vcpu, rip);
3131 if (vcpu_debugged(vcpu->vcpu)) {
3133 vm_exit_debug(vcpu->vcpu, rip);
3142 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) {
3178 vmx_msr_guest_enter_tsc_aux(vmx, vcpu);
3188 vmx_run_trace(vcpu);
3193 vmx_msr_guest_exit_tsc_aux(vmx, vcpu);
3206 vcpu->state.nextrip = rip;
3209 vmx_exit_handle_nmi(vcpu, vmexit);
3211 handled = vmx_exit_process(vmx, vcpu, vmexit);
3217 vmx_exit_trace(vcpu, rip, exit_reason, handled);
3231 VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d",
3235 vmx_msr_guest_exit(vcpu);
3243 struct vmx_vcpu *vcpu = vcpui;
3245 vpid_free(vcpu->state.vpid);
3246 free(vcpu->pir_desc, M_VMX);
3247 free(vcpu->apic_page, M_VMX);
3248 free(vcpu->vmcs, M_VMX);
3249 free(vcpu, M_VMX);
3344 vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval)
3349 error = vmcs_getreg(vcpu->vmcs, running,
3356 vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val)
3363 * Forcing the vcpu into an interrupt shadow is not supported.
3370 vmcs = vcpu->vmcs;
3378 VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val,
3408 struct vmx_vcpu *vcpu = vcpui;
3409 struct vmx *vmx = vcpu->vmx;
3411 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
3414 vcpu->vcpuid);
3418 return (vmx_get_intr_shadow(vcpu, running, retval));
3420 *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE];
3423 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu));
3427 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0)
3430 return (vmcs_getreg(vcpu->vmcs, running, reg, retval));
3439 struct vmx_vcpu *vcpu = vcpui;
3440 struct vmx *vmx = vcpu->vmx;
3442 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
3445 vcpu->vcpuid);
3448 return (vmx_modify_intr_shadow(vcpu, running, val));
3450 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0)
3457 error = vmcs_setreg(vcpu->vmcs, running, reg, val);
3467 vmcs_getreg(vcpu->vmcs, running,
3473 vmcs_setreg(vcpu->vmcs, running,
3482 error = vmcs_setreg(vcpu->vmcs, running,
3488 * Invalidate the guest vcpu's TLB mappings to emulate
3494 pmap = vcpu->ctx.pmap;
3495 vmx_invvpid(vmx, vcpu, pmap, running);
3506 struct vmx_vcpu *vcpu = vcpui;
3507 struct vmx *vmx = vcpu->vmx;
3509 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
3512 vcpu->vcpuid);
3514 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc));
3521 struct vmx_vcpu *vcpu = vcpui;
3522 struct vmx *vmx = vcpu->vmx;
3524 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
3527 vcpu->vcpuid);
3529 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc));
3535 struct vmx_vcpu *vcpu = vcpui;
3541 vcap = vcpu->cap.set;
3589 struct vmx_vcpu *vcpu = vcpui;
3590 struct vmcs *vmcs = vcpu->vmcs;
3606 pptr = &vcpu->cap.proc_ctls;
3615 pptr = &vcpu->cap.proc_ctls;
3624 pptr = &vcpu->cap.proc_ctls;
3644 pptr = &vcpu->cap.proc_ctls2;
3653 pptr = &vcpu->cap.proc_ctls2;
3663 if (vcpu->cap.exc_bitmap != 0xffffffff) {
3664 pptr = &vcpu->cap.exc_bitmap;
3673 vlapic = vm_lapic(vcpu->vcpu);
3707 vcpu->cap.set |= (1 << type);
3709 vcpu->cap.set &= ~(1 << type);
3730 struct vmx_vcpu *vcpu;
3765 * modified if the vcpu is running.
3777 * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3778 * the 0->1 'pending' transition with a notification, but the vCPU
3779 * would ignore the interrupt for the time being. The same vCPU would
3835 vmexit = vm_exitinfo(vlapic->vcpu);
3870 * processor priority of this vCPU, ensure that 'pending_prio' does not
3901 KASSERT(!vcpu_is_running(vlapic->vcpu, NULL),
3902 ("vmx_set_tmr: vcpu cannot be running"));
3905 vmcs = vlapic_vtx->vcpu->vmcs;
3922 struct vmx_vcpu *vcpu;
3927 vcpu = vlapic_vtx->vcpu;
3928 vmcs = vcpu->vmcs;
3930 proc_ctls = vcpu->cap.proc_ctls;
3934 vcpu->cap.proc_ctls = proc_ctls;
3946 struct vmx_vcpu *vcpu;
3952 vcpu = vlapic_vtx->vcpu;
3953 vmx = vcpu->vmx;
3954 vmcs = vcpu->vmcs;
3956 proc_ctls2 = vcpu->cap.proc_ctls2;
3962 vcpu->cap.proc_ctls2 = proc_ctls2;
3979 * once in the context of vcpu 0.
4092 struct vmx_vcpu *vcpu;
4096 vcpu = vcpui;
4097 vmx = vcpu->vmx;
4101 vlapic->vcpu = vcpu->vcpu;
4102 vlapic->vcpuid = vcpu->vcpuid;
4103 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page;
4106 vlapic_vtx->pir_desc = vcpu->pir_desc;
4107 vlapic_vtx->vcpu = vcpu;
4143 struct vmx_vcpu *vcpu;
4148 vcpu = vcpui;
4149 vmx = vcpu->vmx;
4150 vmcs = vcpu->vmcs;
4152 run = vcpu_is_running(vcpu->vcpu, &hostcpu);
4155 vcpu->vcpuid);
4214 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs,
4215 sizeof(vcpu->guest_msrs), meta, err, done);
4217 SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc,
4218 sizeof(*vcpu->pir_desc), meta, err, done);
4220 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr,
4221 sizeof(vcpu->mtrr), meta, err, done);
4223 vmxctx = &vcpu->ctx;
4253 struct vmx_vcpu *vcpu = vcpui;
4258 vmx = vcpu->vmx;
4259 vmcs = vcpu->vmcs;
4261 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
4264 vcpu->vcpuid);
4271 error = vmx_set_tsc_offset(vcpu, offset);