Lines Matching full:vcpu
294 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
298 ctrl = svm_get_vmcb_ctrl(vcpu);
301 svm_set_dirty(vcpu, VMCB_CACHE_I);
302 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset);
304 vm_set_tsc_offset(vcpu->vcpu, offset);
354 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
390 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask)
396 ctrl = svm_get_vmcb_ctrl(vcpu);
401 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled)
408 ctrl = svm_get_vmcb_ctrl(vcpu);
417 svm_set_dirty(vcpu, VMCB_CACHE_I);
418 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx,
424 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
427 svm_set_intercept(vcpu, off, bitmask, 0);
431 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
434 svm_set_intercept(vcpu, off, bitmask, 1);
438 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
446 ctrl = svm_get_vmcb_ctrl(vcpu);
447 state = svm_get_vmcb_state(vcpu);
463 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask);
465 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask);
472 if (vcpu_trace_exceptions(vcpu->vcpu)) {
480 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n));
483 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
493 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
494 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
495 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE);
496 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
497 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
499 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
508 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
509 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
510 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
511 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
512 if (vcpu_trap_wbinvd(vcpu->vcpu)) {
513 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
521 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
617 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
620 struct svm_vcpu *vcpu;
622 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
623 vcpu->sc = sc;
624 vcpu->vcpu = vcpu1;
625 vcpu->vcpuid = vcpuid;
626 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
628 vcpu->nextrip = ~0;
629 vcpu->lastcpu = NOCPU;
630 vcpu->vmcb_pa = vtophys(vcpu->vmcb);
631 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap),
633 svm_msr_guest_init(sc, vcpu);
634 return (vcpu);
729 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in,
742 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
783 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit)
792 state = svm_get_vmcb_state(vcpu);
793 ctrl = svm_get_vmcb_ctrl(vcpu);
794 regs = svm_get_guest_regctx(vcpu);
820 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging);
826 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
937 * Inject an event to vcpu as described in section 15.20, "Event injection".
940 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
945 ctrl = svm_get_vmcb_ctrl(vcpu);
970 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x",
973 SVM_CTR2(vcpu, "Injecting %s at vector %d",
979 svm_update_virqinfo(struct svm_vcpu *vcpu)
984 vlapic = vm_lapic(vcpu->vcpu);
985 ctrl = svm_get_vmcb_ctrl(vcpu);
996 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
1001 ctrl = svm_get_vmcb_ctrl(vcpu);
1012 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
1014 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
1015 vm_exit_intinfo(vcpu->vcpu, intinfo);
1020 vintr_intercept_enabled(struct svm_vcpu *vcpu)
1023 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR));
1028 enable_intr_window_exiting(struct svm_vcpu *vcpu)
1032 ctrl = svm_get_vmcb_ctrl(vcpu);
1036 KASSERT(vintr_intercept_enabled(vcpu),
1041 SVM_CTR0(vcpu, "Enable intr window exiting");
1045 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1046 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1050 disable_intr_window_exiting(struct svm_vcpu *vcpu)
1054 ctrl = svm_get_vmcb_ctrl(vcpu);
1057 KASSERT(!vintr_intercept_enabled(vcpu),
1062 SVM_CTR0(vcpu, "Disable intr window exiting");
1065 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1066 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1070 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val)
1075 ctrl = svm_get_vmcb_ctrl(vcpu);
1080 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval);
1086 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val)
1090 ctrl = svm_get_vmcb_ctrl(vcpu);
1098 * to track when the vcpu is done handling the NMI.
1101 nmi_blocked(struct svm_vcpu *vcpu)
1105 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1110 enable_nmi_blocking(struct svm_vcpu *vcpu)
1113 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked"));
1114 SVM_CTR0(vcpu, "vNMI blocking enabled");
1115 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1119 clear_nmi_blocking(struct svm_vcpu *vcpu)
1123 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked"));
1124 SVM_CTR0(vcpu, "vNMI blocking cleared");
1126 * When the IRET intercept is cleared the vcpu will attempt to execute
1128 * another NMI into the vcpu before the "iret" has actually executed.
1132 * the vcpu it will be injected into the guest.
1136 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1142 error = svm_modify_intr_shadow(vcpu, 1);
1149 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
1157 state = svm_get_vmcb_state(vcpu);
1160 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1184 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
1193 vme = vm_exitinfo(vcpu->vcpu);
1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
1205 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
1209 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval);
1213 vm_inject_gp(vcpu->vcpu);
1218 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
1224 error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
1226 error = svm_write_efer(sc, vcpu, val, retu);
1228 error = svm_wrmsr(vcpu, num, val, retu);
1234 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu)
1242 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
1244 error = svm_rdmsr(vcpu, num, &result, retu);
1247 state = svm_get_vmcb_state(vcpu);
1248 ctx = svm_get_guest_regctx(vcpu);
1331 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
1343 ctx = svm_get_guest_regctx(vcpu);
1344 vmcb = svm_get_vmcb(vcpu);
1357 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
1376 svm_update_virqinfo(vcpu);
1377 svm_save_intinfo(svm_sc, vcpu);
1385 clear_nmi_blocking(vcpu);
1389 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
1393 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
1400 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
1410 SVM_CTR0(vcpu, "Vectoring to MCE handler");
1414 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2);
1438 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6);
1440 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) {
1445 if (vcpu->dbg.popf_sstep) {
1452 vcpu->dbg.popf_sstep = 0;
1459 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS,
1461 vcpu->dbg.rflags_tf = rflags & PSL_T;
1462 } else if (vcpu->dbg.pushf_sstep) {
1467 vcpu->dbg.pushf_sstep = 0;
1477 vcpu->dbg.rflags_tf;
1478 svm_paging_info(svm_get_vmcb(vcpu),
1484 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6);
1511 SVM_CTR2(vcpu, "Reset inst_length from %d "
1528 SVM_CTR2(vcpu, "Reflecting exception "
1530 error = vm_inject_exception(vcpu->vcpu, idtvec,
1544 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
1546 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val);
1547 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1558 SVM_CTR1(vcpu, "rdmsr %#x", ecx);
1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
1560 if (emulate_rdmsr(vcpu, ecx, &retu)) {
1572 handled = svm_handle_io(vcpu, vmexit);
1573 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
1577 handled = x86_emulate_cpuid(vcpu->vcpu,
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
1588 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
1593 SVM_CTR2(vcpu, "nested page fault with "
1596 } else if (vm_mem_allocated(vcpu->vcpu, info2)) {
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
1601 SVM_CTR3(vcpu, "nested page fault "
1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
1607 SVM_CTR3(vcpu, "inst_emul fault "
1619 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1622 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1626 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1629 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1632 vcpu->dbg.pushf_sstep = 1;
1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1641 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1645 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1648 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1649 vcpu->dbg.popf_sstep = 1;
1664 vm_inject_ud(vcpu->vcpu);
1673 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
1677 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d",
1703 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
1707 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
1713 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1717 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
1718 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo);
1725 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
1734 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) {
1738 state = svm_get_vmcb_state(vcpu);
1739 ctrl = svm_get_vmcb_ctrl(vcpu);
1743 if (vcpu->nextrip != state->rip) {
1745 SVM_CTR2(vcpu, "Guest interrupt blocking "
1747 vcpu->nextrip, state->rip);
1751 * Inject pending events or exceptions for this vcpu.
1759 svm_inj_intinfo(sc, vcpu);
1762 if (vm_nmi_pending(vcpu->vcpu)) {
1763 if (nmi_blocked(vcpu)) {
1768 SVM_CTR0(vcpu, "Cannot inject NMI due "
1772 * Can't inject an NMI if the vcpu is in an intr_shadow.
1774 SVM_CTR0(vcpu, "Cannot inject NMI due to "
1783 SVM_CTR1(vcpu, "Cannot inject NMI due to "
1798 vm_nmi_clear(vcpu->vcpu);
1801 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
1805 enable_nmi_blocking(vcpu);
1807 SVM_CTR0(vcpu, "Injecting vNMI");
1811 extint_pending = vm_extint_pending(vcpu->vcpu);
1829 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1836 SVM_CTR1(vcpu, "Cannot inject vector %d due to "
1843 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1849 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1854 vm_extint_clear(vcpu->vcpu);
1859 * Force a VM-exit as soon as the vcpu is ready to accept another
1880 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x",
1883 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1901 enable_intr_window_exiting(vcpu);
1903 disable_intr_window_exiting(vcpu);
1925 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap)
1936 ctrl = svm_get_vmcb_ctrl(vcpu);
1939 * The TLB entries associated with the vcpu's ASID are not valid
1942 * 1. The vcpu's ASID generation is different than the host cpu's
1943 * ASID generation. This happens when the vcpu migrates to a new
1961 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1977 if (vcpu->asid.gen != asid[cpu].gen) {
1979 } else if (vcpu->eptgen != eptgen) {
2007 vcpu->asid.gen = asid[cpu].gen;
2008 vcpu->asid.num = asid[cpu].num;
2010 ctrl->asid = vcpu->asid.num;
2011 svm_set_dirty(vcpu, VMCB_CACHE_ASID);
2020 vcpu->eptgen = eptgen;
2023 KASSERT(ctrl->asid == vcpu->asid.num,
2024 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num));
2103 * Start vcpu with specified RIP.
2110 struct svm_vcpu *vcpu;
2119 vcpu = vcpui;
2120 svm_sc = vcpu->sc;
2121 state = svm_get_vmcb_state(vcpu);
2122 ctrl = svm_get_vmcb_ctrl(vcpu);
2123 vmexit = vm_exitinfo(vcpu->vcpu);
2124 vlapic = vm_lapic(vcpu->vcpu);
2126 gctx = svm_get_guest_regctx(vcpu);
2127 vmcb_pa = vcpu->vmcb_pa;
2129 if (vcpu->lastcpu != curcpu) {
2133 vcpu->asid.gen = 0;
2138 svm_set_dirty(vcpu, 0xffffffff);
2142 * Setting 'vcpu->lastcpu' here is bit premature because
2147 * This works for now but any new side-effects of vcpu
2150 vcpu->lastcpu = curcpu;
2151 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
2154 svm_msr_guest_enter(vcpu);
2171 vm_exit_suspended(vcpu->vcpu, state->rip);
2175 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) {
2177 vm_exit_rendezvous(vcpu->vcpu, state->rip);
2183 vm_exit_reqidle(vcpu->vcpu, state->rip);
2188 if (vcpu_should_yield(vcpu->vcpu)) {
2190 vm_exit_astpending(vcpu->vcpu, state->rip);
2194 if (vcpu_debugged(vcpu->vcpu)) {
2196 vm_exit_debug(vcpu->vcpu, state->rip);
2209 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2213 * ensure that the vcpu does not use stale TLB mappings.
2215 svm_pmap_activate(vcpu, pmap);
2217 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
2218 vcpu->dirty = 0;
2219 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2222 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip);
2243 vcpu->nextrip = state->rip;
2246 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2249 svm_msr_guest_exit(vcpu);
2257 struct svm_vcpu *vcpu = vcpui;
2259 free(vcpu->vmcb, M_SVM);
2260 free(vcpu, M_SVM);
2322 struct svm_vcpu *vcpu;
2325 vcpu = vcpui;
2328 return (svm_get_intr_shadow(vcpu, val));
2331 if (vmcb_read(vcpu, ident, val) == 0) {
2335 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2342 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident);
2349 struct svm_vcpu *vcpu;
2352 vcpu = vcpui;
2355 return (svm_modify_intr_shadow(vcpu, val));
2360 if (vmcb_write(vcpu, ident, val) == 0) {
2365 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2379 * vcpu's ASID. This needs to be treated differently depending on
2383 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident);
2431 struct svm_vcpu *vcpu;
2435 vcpu = vcpui;
2440 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2444 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2453 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val);
2456 vlapic = vm_lapic(vcpu->vcpu);
2460 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR);
2461 vcpu->caps |= (val << VM_CAP_MASK_HWINTR);
2467 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) {
2473 vcpu->dbg.rflags_tf = rflags & PSL_T;
2475 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2480 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF);
2483 * Restore shadowed RFLAGS.TF only if vCPU was
2486 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
2488 rflags |= vcpu->dbg.rflags_tf;
2489 vcpu->dbg.rflags_tf = 0;
2491 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2496 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF);
2500 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val);
2501 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF,
2503 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF,
2517 struct svm_vcpu *vcpu;
2521 vcpu = vcpui;
2526 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2530 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2537 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP));
2540 vlapic = vm_lapic(vcpu->vcpu);
2544 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF));
2547 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR));
2571 struct svm_vcpu *vcpu;
2574 vcpu = vcpui;
2576 vlapic->vm = vcpu->sc->vm;
2577 vlapic->vcpu = vcpu->vcpu;
2578 vlapic->vcpuid = vcpu->vcpuid;
2600 struct svm_vcpu *vcpu;
2603 vcpu = vcpui;
2606 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
2608 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
2609 vcpu->vcpuid);
2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta);
2614 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta);
2615 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta);
2616 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta);
2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta);
2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta);
2621 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta);
2623 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta);
2624 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta);
2625 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta);
2629 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta);
2630 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta);
2633 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta);
2634 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta);
2637 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta);
2638 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta);
2641 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta);
2642 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta);
2645 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta);
2646 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta);
2649 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta);
2650 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta);
2653 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta);
2654 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta);
2657 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta);
2658 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta);
2661 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta);
2664 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta);
2665 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta);
2668 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
2670 err += vmcb_snapshot_any(vcpu,
2672 err += vmcb_snapshot_any(vcpu,
2674 err += vmcb_snapshot_any(vcpu,
2676 err += vmcb_snapshot_any(vcpu,
2678 err += vmcb_snapshot_any(vcpu,
2681 err += vmcb_snapshot_any(vcpu,
2683 err += vmcb_snapshot_any(vcpu,
2686 err += vmcb_snapshot_any(vcpu,
2689 err += vmcb_snapshot_any(vcpu,
2692 err += vmcb_snapshot_any(vcpu,
2695 err += vmcb_snapshot_any(vcpu,
2697 err += vmcb_snapshot_any(vcpu,
2699 err += vmcb_snapshot_any(vcpu,
2701 err += vmcb_snapshot_any(vcpu,
2704 err += vmcb_snapshot_any(vcpu,
2707 err += vmcb_snapshot_any(vcpu,
2709 err += vmcb_snapshot_any(vcpu,
2711 err += vmcb_snapshot_any(vcpu,
2713 err += vmcb_snapshot_any(vcpu,
2716 err += vmcb_snapshot_any(vcpu,
2719 err += vmcb_snapshot_any(vcpu,
2721 err += vmcb_snapshot_any(vcpu,
2723 err += vmcb_snapshot_any(vcpu,
2726 err += vmcb_snapshot_any(vcpu,
2729 err += vmcb_snapshot_any(vcpu,
2732 err += vmcb_snapshot_any(vcpu,
2734 err += vmcb_snapshot_any(vcpu,
2736 err += vmcb_snapshot_any(vcpu,
2739 err += vmcb_snapshot_any(vcpu,
2742 err += vmcb_snapshot_any(vcpu,
2744 err += vmcb_snapshot_any(vcpu,
2746 err += vmcb_snapshot_any(vcpu,
2748 err += vmcb_snapshot_any(vcpu,
2750 err += vmcb_snapshot_any(vcpu,
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done);
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done);
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done);
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done);
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done);
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done);
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done);
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done);
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done);
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done);
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done);
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done);
2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done);
2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done);
2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done);
2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done);
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done);
2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done);
2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done);
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done);
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done);
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done);
2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done);
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done);
2790 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done);
2794 svm_set_dirty(vcpu, 0xffffffff);
2803 struct svm_vcpu *vcpu = vcpui;
2805 svm_set_tsc_offset(vcpu, offset);