Lines Matching refs:vcpu
669 svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi) in svm_event_waitexit_enable() argument
671 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_event_waitexit_enable()
688 svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi) in svm_event_waitexit_disable() argument
690 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_event_waitexit_disable()
739 svm_vcpu_inject(struct nvmm_cpu *vcpu) in svm_vcpu_inject() argument
741 struct nvmm_comm_page *comm = vcpu->comm; in svm_vcpu_inject()
742 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_inject()
770 svm_event_waitexit_enable(vcpu, true); in svm_vcpu_inject()
791 svm_inject_ud(struct nvmm_cpu *vcpu) in svm_inject_ud() argument
793 struct nvmm_comm_page *comm = vcpu->comm; in svm_inject_ud()
800 ret = svm_vcpu_inject(vcpu); in svm_inject_ud()
805 svm_inject_gp(struct nvmm_cpu *vcpu) in svm_inject_gp() argument
807 struct nvmm_comm_page *comm = vcpu->comm; in svm_inject_gp()
814 ret = svm_vcpu_inject(vcpu); in svm_inject_gp()
819 svm_vcpu_event_commit(struct nvmm_cpu *vcpu) in svm_vcpu_event_commit() argument
821 if (__predict_true(!vcpu->comm->event_commit)) { in svm_vcpu_event_commit()
824 vcpu->comm->event_commit = false; in svm_vcpu_event_commit()
825 return svm_vcpu_inject(vcpu); in svm_vcpu_event_commit()
860 svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx) in svm_inkernel_handle_cpuid() argument
862 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_inkernel_handle_cpuid()
890 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, in svm_inkernel_handle_cpuid()
1064 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_cpuid() argument
1067 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_cpuid()
1075 svm_inkernel_handle_cpuid(vcpu, eax, ecx); in svm_exit_cpuid()
1112 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_hlt() argument
1115 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_hlt()
1119 svm_event_waitexit_disable(vcpu, false); in svm_exit_hlt()
1139 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_io() argument
1142 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_io()
1178 svm_vcpu_state_provide(vcpu, in svm_exit_io()
1191 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_inkernel_handle_msr() argument
1194 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_inkernel_handle_msr()
1252 svm_inject_gp(vcpu); in svm_inkernel_handle_msr()
1257 svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_rdmsr() argument
1260 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_rdmsr()
1266 if (svm_inkernel_handle_msr(mach, vcpu, exit)) { in svm_exit_rdmsr()
1271 svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); in svm_exit_rdmsr()
1275 svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_wrmsr() argument
1278 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_wrmsr()
1289 if (svm_inkernel_handle_msr(mach, vcpu, exit)) { in svm_exit_wrmsr()
1294 svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); in svm_exit_wrmsr()
1298 svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_msr() argument
1301 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_msr()
1305 svm_exit_rdmsr(mach, vcpu, exit); in svm_exit_msr()
1307 svm_exit_wrmsr(mach, vcpu, exit); in svm_exit_msr()
1312 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_npf() argument
1315 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_npf()
1330 svm_vcpu_state_provide(vcpu, in svm_exit_npf()
1336 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_exit_xsetbv() argument
1339 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_exit_xsetbv()
1364 svm_inject_gp(vcpu); in svm_exit_xsetbv()
1377 svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) in svm_vcpu_guest_fpu_enter() argument
1379 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_fpu_enter()
1392 svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) in svm_vcpu_guest_fpu_leave() argument
1394 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_fpu_leave()
1407 svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) in svm_vcpu_guest_dbregs_enter() argument
1409 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_dbregs_enter()
1422 svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu) in svm_vcpu_guest_dbregs_leave() argument
1424 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_dbregs_leave()
1435 svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu) in svm_vcpu_guest_misc_enter() argument
1437 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_misc_enter()
1444 svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu) in svm_vcpu_guest_misc_leave() argument
1446 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_guest_misc_leave()
1459 svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu) in svm_gtlb_catchup() argument
1461 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_gtlb_catchup()
1463 if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) { in svm_gtlb_catchup()
1469 svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu) in svm_htlb_catchup() argument
1516 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, in svm_vcpu_run() argument
1519 struct nvmm_comm_page *comm = vcpu->comm; in svm_vcpu_run()
1521 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_run()
1526 svm_vcpu_state_commit(vcpu); in svm_vcpu_run()
1529 if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) { in svm_vcpu_run()
1536 svm_gtlb_catchup(vcpu, hcpu); in svm_vcpu_run()
1537 svm_htlb_catchup(vcpu, hcpu); in svm_vcpu_run()
1539 if (vcpu->hcpu_last != hcpu) { in svm_vcpu_run()
1544 svm_vcpu_guest_dbregs_enter(vcpu); in svm_vcpu_run()
1545 svm_vcpu_guest_misc_enter(vcpu); in svm_vcpu_run()
1559 svm_vcpu_guest_fpu_enter(vcpu); in svm_vcpu_run()
1565 svm_vcpu_guest_fpu_leave(vcpu); in svm_vcpu_run()
1572 vcpu->hcpu_last = hcpu; in svm_vcpu_run()
1582 svm_event_waitexit_disable(vcpu, false); in svm_vcpu_run()
1586 svm_event_waitexit_disable(vcpu, true); in svm_vcpu_run()
1590 svm_exit_cpuid(mach, vcpu, exit); in svm_vcpu_run()
1593 svm_exit_hlt(mach, vcpu, exit); in svm_vcpu_run()
1596 svm_exit_io(mach, vcpu, exit); in svm_vcpu_run()
1599 svm_exit_msr(mach, vcpu, exit); in svm_vcpu_run()
1620 svm_inject_ud(vcpu); in svm_vcpu_run()
1631 svm_exit_xsetbv(mach, vcpu, exit); in svm_vcpu_run()
1634 svm_exit_npf(mach, vcpu, exit); in svm_vcpu_run()
1643 if (nvmm_return_needed(vcpu, exit)) { in svm_vcpu_run()
1653 svm_vcpu_guest_misc_leave(vcpu); in svm_vcpu_run()
1654 svm_vcpu_guest_dbregs_leave(vcpu); in svm_vcpu_run()
1828 svm_vcpu_setstate(struct nvmm_cpu *vcpu) in svm_vcpu_setstate() argument
1830 struct nvmm_comm_page *comm = vcpu->comm; in svm_vcpu_setstate()
1832 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_setstate()
1936 svm_event_waitexit_enable(vcpu, false); in svm_vcpu_setstate()
1938 svm_event_waitexit_disable(vcpu, false); in svm_vcpu_setstate()
1942 svm_event_waitexit_enable(vcpu, true); in svm_vcpu_setstate()
1944 svm_event_waitexit_disable(vcpu, true); in svm_vcpu_setstate()
1970 svm_vcpu_getstate(struct nvmm_cpu *vcpu) in svm_vcpu_getstate() argument
1972 struct nvmm_comm_page *comm = vcpu->comm; in svm_vcpu_getstate()
1974 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_getstate()
2073 svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags) in svm_vcpu_state_provide() argument
2075 vcpu->comm->state_wanted = flags; in svm_vcpu_state_provide()
2076 svm_vcpu_getstate(vcpu); in svm_vcpu_state_provide()
2080 svm_vcpu_state_commit(struct nvmm_cpu *vcpu) in svm_vcpu_state_commit() argument
2082 vcpu->comm->state_wanted = vcpu->comm->state_commit; in svm_vcpu_state_commit()
2083 vcpu->comm->state_commit = 0; in svm_vcpu_state_commit()
2084 svm_vcpu_setstate(vcpu); in svm_vcpu_state_commit()
2090 svm_asid_alloc(struct nvmm_cpu *vcpu) in svm_asid_alloc() argument
2092 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_asid_alloc()
2122 svm_asid_free(struct nvmm_cpu *vcpu) in svm_asid_free() argument
2124 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_asid_free()
2141 svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) in svm_vcpu_init() argument
2143 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_init()
2246 svm_asid_alloc(vcpu); in svm_vcpu_init()
2266 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state, in svm_vcpu_init()
2268 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL; in svm_vcpu_init()
2269 vcpu->comm->state_cached = 0; in svm_vcpu_init()
2270 svm_vcpu_setstate(vcpu); in svm_vcpu_init()
2274 svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) in svm_vcpu_create() argument
2283 vcpu->cpudata = cpudata; in svm_vcpu_create()
2304 svm_vcpu_init(mach, vcpu); in svm_vcpu_create()
2327 svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) in svm_vcpu_destroy() argument
2329 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_destroy()
2331 svm_asid_free(vcpu); in svm_vcpu_destroy()
2399 svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data) in svm_vcpu_configure() argument
2401 struct svm_cpudata *cpudata = vcpu->cpudata; in svm_vcpu_configure()