Lines Matching +full:fault +full:- +full:inject
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
267 asid[cpu].num = nasid - 1;
299 ctrl->tsc_offset = offset;
304 vm_set_tsc_offset(vcpu->vcpu, offset);
327 *index = -1;
336 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
338 off = (msr - MSR_AMD6TH_START);
343 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
345 off = (msr - MSR_AMD7TH_START);
397 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
409 oldval = ctrl->intercept[idx];
412 ctrl->intercept[idx] |= bitmask;
414 ctrl->intercept[idx] &= ~bitmask;
416 if (ctrl->intercept[idx] != oldval) {
419 oldval, ctrl->intercept[idx]);
449 ctrl->iopm_base_pa = iopm_base_pa;
450 ctrl->msrpm_base_pa = msrpm_base_pa;
453 ctrl->np_enable = 1;
454 ctrl->n_cr3 = np_pml4;
458 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
472 if (vcpu_trace_exceptions(vcpu->vcpu)) {
504 * Non-intercepted VMMCALL causes #UD, skip it.
512 if (vcpu_trap_wbinvd(vcpu->vcpu)) {
524 * The ASID will be set to a non-zero value just before VMRUN.
526 ctrl->asid = 0;
534 ctrl->v_intr_masking = 1;
537 ctrl->lbr_virt_en = 1;
538 state->dbgctl = BIT(0);
541 state->efer = EFER_SVM;
543 /* Set up the PAT to power-on state */
544 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
553 /* Set up DR6/7 to power-on state */
554 state->dr6 = DBREG_DR6_RESERVED1;
555 state->dr7 = DBREG_DR7_RESERVED1;
568 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
570 if (svm_sc->msr_bitmap == NULL)
572 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM,
574 if (svm_sc->iopm_bitmap == NULL)
577 svm_sc->vm = vm;
578 svm_sc->nptp = vtophys(pmap->pm_pmltop);
583 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
603 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
608 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
611 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
623 vcpu->sc = sc;
624 vcpu->vcpu = vcpu1;
625 vcpu->vcpuid = vcpuid;
626 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
628 vcpu->nextrip = ~0;
629 vcpu->lastcpu = NOCPU;
630 vcpu->vmcb_pa = vtophys(vcpu->vmcb);
631 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap),
632 sc->nptp);
638 * Collateral for a generic SVM VM-exit.
644 vme->exitcode = VM_EXITCODE_SVM;
645 vme->u.svm.exitcode = code;
646 vme->u.svm.exitinfo1 = info1;
647 vme->u.svm.exitinfo2 = info2;
659 return (state->cpl);
669 state = &vmcb->state;
671 if (state->efer & EFER_LMA) {
684 } else if (state->cr0 & CR0_PE) {
713 val = in ? regs->sctx_rdi : regs->sctx_rsi;
723 val = rep ? regs->sctx_rcx : 1;
735 vis->seg_name = VM_REG_GUEST_ES;
739 vis->seg_name = vm_segment_name(s);
742 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
769 state = &vmcb->state;
770 paging->cr3 = state->cr3;
771 paging->cpl = svm_cpl(state);
772 paging->cpu_mode = svm_vcpu_mode(vmcb);
773 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
774 state->efer);
796 info1 = ctrl->exitinfo1;
809 vmexit->exitcode = VM_EXITCODE_INOUT;
810 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
811 vmexit->u.inout.string = inout_string;
812 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
813 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
814 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
815 vmexit->u.inout.eax = (uint32_t)(state->rax);
818 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
819 vis = &vmexit->u.inout_str;
820 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging);
821 vis->rflags = state->rflags;
822 vis->cr0 = state->cr0;
823 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
824 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
825 vis->addrsize = svm_inout_str_addrsize(info1);
826 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
872 ctrl = &vmcb->ctrl;
873 paging = &vmexit->u.inst_emul.paging;
875 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
876 vmexit->u.inst_emul.gpa = gpa;
877 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
883 switch(paging->cpu_mode) {
885 vmexit->u.inst_emul.cs_base = seg.base;
886 vmexit->u.inst_emul.cs_d = 0;
890 vmexit->u.inst_emul.cs_base = seg.base;
895 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
899 vmexit->u.inst_emul.cs_base = 0;
900 vmexit->u.inst_emul.cs_d = 0;
908 inst_len = ctrl->inst_len;
909 inst_bytes = ctrl->inst_bytes;
914 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
937 * Inject an event to vcpu as described in section 15.20, "Event injection".
947 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
948 ("%s: event already pending %#lx", __func__, ctrl->eventinj));
966 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
968 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
969 ctrl->eventinj |= (uint64_t)error << 32;
984 vlapic = vm_lapic(vcpu->vcpu);
988 vlapic_set_cr8(vlapic, ctrl->v_tpr);
991 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
992 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
1002 intinfo = ctrl->exitintinfo;
1014 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
1015 vm_exit_intinfo(vcpu->vcpu, intinfo);
1034 if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
1035 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
1042 ctrl->v_irq = 1;
1043 ctrl->v_ign_tpr = 1;
1044 ctrl->v_intr_vector = 0;
1056 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
1063 ctrl->v_irq = 0;
1064 ctrl->v_intr_vector = 0;
1076 oldval = ctrl->intr_shadow;
1079 ctrl->intr_shadow = newval;
1091 *val = ctrl->intr_shadow;
1127 * the "iret" when it runs next. However, it is possible to inject
1159 oldval = state->efer;
1162 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
1168 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1170 if (state->cr0 & CR0_PG)
1175 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
1184 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
1189 * XXX bhyve does not enforce segment limits in 64-bit mode. Until
1193 vme = vm_exitinfo(vcpu->vcpu);
1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
1205 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
1213 vm_inject_gp(vcpu->vcpu);
1224 error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
1242 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
1249 state->rax = result & 0xffffffff;
1250 ctx->sctx_rdx = result >> 32;
1345 state = &vmcb->state;
1346 ctrl = &vmcb->ctrl;
1349 code = ctrl->exitcode;
1350 info1 = ctrl->exitinfo1;
1351 info2 = ctrl->exitinfo2;
1353 vmexit->exitcode = VM_EXITCODE_BOGUS;
1354 vmexit->rip = state->rip;
1355 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1357 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
1369 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1370 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1372 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1374 vmexit->inst_length, code, info1, info2));
1384 vmexit->inst_length = 0;
1389 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
1393 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
1400 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
1402 idtvec = code - 0x40;
1440 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) {
1441 vmexit->exitcode = VM_EXITCODE_DB;
1442 vmexit->u.dbg.trace_trap = 1;
1443 vmexit->u.dbg.pushf_intercept = 0;
1445 if (vcpu->dbg.popf_sstep) {
1452 vcpu->dbg.popf_sstep = 0;
1461 vcpu->dbg.rflags_tf = rflags & PSL_T;
1462 } else if (vcpu->dbg.pushf_sstep) {
1467 vcpu->dbg.pushf_sstep = 0;
1475 vmexit->u.dbg.pushf_intercept = 1;
1476 vmexit->u.dbg.tf_shadow_val =
1477 vcpu->dbg.rflags_tf;
1479 &vmexit->u.dbg.paging);
1482 /* Clear DR6 "single-step" bit. */
1494 vmexit->exitcode = VM_EXITCODE_BPT;
1495 vmexit->u.bpt.inst_length = vmexit->inst_length;
1496 vmexit->inst_length = 0;
1505 * 'inst_length' is non-zero.
1513 vmexit->inst_length, idtvec);
1514 vmexit->inst_length = 0;
1523 KASSERT(vmexit->inst_length == 0,
1526 vmexit->inst_length, idtvec));
1530 error = vm_inject_exception(vcpu->vcpu, idtvec,
1538 eax = state->rax;
1539 ecx = ctx->sctx_rcx;
1540 edx = ctx->sctx_rdx;
1544 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
1548 vmexit->exitcode = VM_EXITCODE_WRMSR;
1549 vmexit->u.msr.code = ecx;
1550 vmexit->u.msr.wval = val;
1554 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
1561 vmexit->exitcode = VM_EXITCODE_RDMSR;
1562 vmexit->u.msr.code = ecx;
1566 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1573 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
1577 handled = x86_emulate_cpuid(vcpu->vcpu,
1578 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx,
1579 &ctx->sctx_rdx);
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
1583 vmexit->exitcode = VM_EXITCODE_HLT;
1584 vmexit->u.hlt.rflags = state->rflags;
1587 vmexit->exitcode = VM_EXITCODE_PAUSE;
1588 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
1593 SVM_CTR2(vcpu, "nested page fault with "
1596 } else if (vm_mem_allocated(vcpu->vcpu, info2)) {
1597 vmexit->exitcode = VM_EXITCODE_PAGING;
1598 vmexit->u.paging.gpa = info2;
1599 vmexit->u.paging.fault_type = npf_fault_type(info1);
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
1601 SVM_CTR3(vcpu, "nested page fault "
1603 info2, info1, state->rip);
1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
1607 SVM_CTR3(vcpu, "inst_emul fault "
1609 info2, info1, state->rip);
1613 vmexit->exitcode = VM_EXITCODE_MONITOR;
1616 vmexit->exitcode = VM_EXITCODE_MWAIT;
1619 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1624 vmexit->inst_length = 0;
1625 /* Disable PUSHF intercepts - avoid a loop. */
1632 vcpu->dbg.pushf_sstep = 1;
1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1643 vmexit->inst_length = 0;
1644 /* Disable POPF intercepts - avoid a loop*/
1649 vcpu->dbg.popf_sstep = 1;
1664 vm_inject_ud(vcpu->vcpu);
1673 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
1679 vmexit->rip, vmexit->inst_length);
1682 vmexit->rip += vmexit->inst_length;
1683 vmexit->inst_length = 0;
1684 state->rip = vmexit->rip;
1686 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1707 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
1717 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
1722 * Inject event to virtual cpu.
1734 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) {
1743 if (vcpu->nextrip != state->rip) {
1744 ctrl->intr_shadow = 0;
1747 vcpu->nextrip, state->rip);
1751 * Inject pending events or exceptions for this vcpu.
1754 * during event delivery (i.e. ctrl->exitintinfo).
1762 if (vm_nmi_pending(vcpu->vcpu)) {
1765 * Can't inject another NMI if the guest has not
1768 SVM_CTR0(vcpu, "Cannot inject NMI due "
1769 "to NMI-blocking");
1770 } else if (ctrl->intr_shadow) {
1772 * Can't inject an NMI if the vcpu is in an intr_shadow.
1774 SVM_CTR0(vcpu, "Cannot inject NMI due to "
1778 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1783 SVM_CTR1(vcpu, "Cannot inject NMI due to "
1784 "eventinj %#lx", ctrl->eventinj);
1787 * Use self-IPI to trigger a VM-exit as soon as
1798 vm_nmi_clear(vcpu->vcpu);
1800 /* Inject NMI, vector number is not used */
1811 extint_pending = vm_extint_pending(vcpu->vcpu);
1818 /* Ask the legacy pic for a vector to inject */
1819 vatpic_pending_intr(sc->vm, &vector);
1826 * then we cannot inject the pending interrupt.
1828 if ((state->rflags & PSL_I) == 0) {
1829 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1830 "rflags %#lx", vector, state->rflags);
1835 if (ctrl->intr_shadow) {
1836 SVM_CTR1(vcpu, "Cannot inject vector %d due to "
1842 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1843 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1844 "eventinj %#lx", vector, ctrl->eventinj);
1854 vm_extint_clear(vcpu->vcpu);
1855 vatpic_intr_accepted(sc->vm, vector);
1859 * Force a VM-exit as soon as the vcpu is ready to accept another
1861 * that it wants to inject. Also, if the APIC has a pending interrupt
1862 * that was preempted by the ExtInt then it allows us to inject the
1879 if (ctrl->v_tpr != v_tpr) {
1881 ctrl->v_tpr, v_tpr);
1882 ctrl->v_tpr = v_tpr;
1896 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1897 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1900 ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1917 * type to "64-bit available TSS".
1920 tss_sd->sd_type = SDT_SYSTSS;
1933 CPU_SET_ATOMIC(cpu, &pmap->pm_active);
1934 smr_enter(pmap->pm_eptsmr);
1974 eptgen = atomic_load_long(&pmap->pm_eptgen);
1975 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1977 if (vcpu->asid.gen != asid[cpu].gen) {
1979 } else if (vcpu->eptgen != eptgen) {
1981 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
1989 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1990 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1999 * If this cpu does not support "flush-by-asid"
2005 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
2007 vcpu->asid.gen = asid[cpu].gen;
2008 vcpu->asid.num = asid[cpu].num;
2010 ctrl->asid = vcpu->asid.num;
2013 * If this cpu supports "flush-by-asid" then the TLB
2018 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
2020 vcpu->eptgen = eptgen;
2022 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
2023 KASSERT(ctrl->asid == vcpu->asid.num,
2024 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num));
2030 smr_exit(pmap->pm_eptsmr);
2031 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
2053 gctx->host_dr7 = rdr7();
2054 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2066 gctx->host_dr0 = rdr0();
2067 gctx->host_dr1 = rdr1();
2068 gctx->host_dr2 = rdr2();
2069 gctx->host_dr3 = rdr3();
2070 gctx->host_dr6 = rdr6();
2073 load_dr0(gctx->sctx_dr0);
2074 load_dr1(gctx->sctx_dr1);
2075 load_dr2(gctx->sctx_dr2);
2076 load_dr3(gctx->sctx_dr3);
2084 gctx->sctx_dr0 = rdr0();
2085 gctx->sctx_dr1 = rdr1();
2086 gctx->sctx_dr2 = rdr2();
2087 gctx->sctx_dr3 = rdr3();
2093 load_dr0(gctx->host_dr0);
2094 load_dr1(gctx->host_dr1);
2095 load_dr2(gctx->host_dr2);
2096 load_dr3(gctx->host_dr3);
2097 load_dr6(gctx->host_dr6);
2098 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
2099 load_dr7(gctx->host_dr7);
2120 svm_sc = vcpu->sc;
2123 vmexit = vm_exitinfo(vcpu->vcpu);
2124 vlapic = vm_lapic(vcpu->vcpu);
2127 vmcb_pa = vcpu->vmcb_pa;
2129 if (vcpu->lastcpu != curcpu) {
2133 vcpu->asid.gen = 0;
2142 * Setting 'vcpu->lastcpu' here is bit premature because
2147 * This works for now but any new side-effects of vcpu
2150 vcpu->lastcpu = curcpu;
2151 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
2157 state->rip = rip;
2171 vm_exit_suspended(vcpu->vcpu, state->rip);
2175 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) {
2177 vm_exit_rendezvous(vcpu->vcpu, state->rip);
2183 vm_exit_reqidle(vcpu->vcpu, state->rip);
2188 if (vcpu_should_yield(vcpu->vcpu)) {
2190 vm_exit_astpending(vcpu->vcpu, state->rip);
2194 if (vcpu_debugged(vcpu->vcpu)) {
2196 vm_exit_debug(vcpu->vcpu, state->rip);
2217 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
2218 vcpu->dirty = 0;
2219 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2222 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip);
2239 /* #VMEXIT disables interrupts so re-enable them here. */
2243 vcpu->nextrip = state->rip;
2259 free(vcpu->vmcb, M_SVM);
2268 free(sc->iopm_bitmap, M_SVM);
2269 free(sc->msr_bitmap, M_SVM);
2279 return (®ctx->sctx_rbx);
2281 return (®ctx->sctx_rcx);
2283 return (®ctx->sctx_rdx);
2285 return (®ctx->sctx_rdi);
2287 return (®ctx->sctx_rsi);
2289 return (®ctx->sctx_rbp);
2291 return (®ctx->sctx_r8);
2293 return (®ctx->sctx_r9);
2295 return (®ctx->sctx_r10);
2297 return (®ctx->sctx_r11);
2299 return (®ctx->sctx_r12);
2301 return (®ctx->sctx_r13);
2303 return (®ctx->sctx_r14);
2305 return (®ctx->sctx_r15);
2307 return (®ctx->sctx_dr0);
2309 return (®ctx->sctx_dr1);
2311 return (®ctx->sctx_dr2);
2313 return (®ctx->sctx_dr3);
2406 if (meta->op == VM_SNAPSHOT_SAVE) {
2412 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
2456 vlapic = vm_lapic(vcpu->vcpu);
2457 vlapic->ipi_exit = val;
2460 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR);
2461 vcpu->caps |= (val << VM_CAP_MASK_HWINTR);
2473 vcpu->dbg.rflags_tf = rflags & PSL_T;
2480 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF);
2486 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
2488 rflags |= vcpu->dbg.rflags_tf;
2489 vcpu->dbg.rflags_tf = 0;
2496 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF);
2540 vlapic = vm_lapic(vcpu->vcpu);
2541 *retval = vlapic->ipi_exit;
2544 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF));
2547 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR));
2576 vlapic->vm = vcpu->sc->vm;
2577 vlapic->vcpu = vcpu->vcpu;
2578 vlapic->vcpuid = vcpu->vcpuid;
2579 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC,
2592 free(vlapic->apic_page, M_SVM_VLAPIC);
2606 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
2608 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
2609 vcpu->vcpuid);
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done);
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done);
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done);
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done);
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done);
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done);
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done);
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done);
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done);
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done);
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done);
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done);
2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done);
2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done);
2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done);
2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done);
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done);
2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done);
2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done);
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done);
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done);
2784 /* Restore EPTGEN field - EPT is Extended Page Table */
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done);
2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done);
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done);
2790 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done);
2793 if (meta->op == VM_SNAPSHOT_RESTORE)