/freebsd-src/sys/amd64/vmm/ |
H A D | vmm.c | 97 * (a) allocated when vcpu is created 98 * (i) initialized when vcpu is created and when it is reinitialized 99 * (o) initialized the first time the vcpu is created 102 struct vcpu { 104 enum vcpu_state state; /* (o) vcpu state */ argument 106 int hostcpu; /* (o) vcpu's host cpu */ argument 107 int reqidle; /* (i) request vcpu to idle */ 128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 160 * [v] reads require one frozen vcpu, writes require freezing all vcpus 187 struct vcpu **vcp 101 struct vcpu { global() struct 103 statevcpu global() argument 105 hostcpuvcpu global() argument 127 tsc_offsetvcpu global() argument 186 struct vcpu **vcpu; /* (o) guest vcpus */ global() member 196 VMM_CTR0(vcpu,format) global() argument 199 VMM_CTR1(vcpu,format,p1) global() argument 202 VMM_CTR2(vcpu,format,p1,p2) global() argument 205 VMM_CTR3(vcpu,format,p1,p2,p3) global() argument 208 VMM_CTR4(vcpu,format,p1,p2,p3,p4) global() argument 328 vcpu_cleanup(struct vcpu * vcpu,bool destroy) vcpu_cleanup() argument 344 struct vcpu *vcpu; vcpu_alloc() local 362 vcpu_init(struct vcpu * vcpu) vcpu_init() argument 378 vcpu_trace_exceptions(struct vcpu * vcpu) vcpu_trace_exceptions() argument 385 vcpu_trap_wbinvd(struct vcpu * vcpu) vcpu_trap_wbinvd() argument 391 vm_exitinfo(struct vcpu * vcpu) vm_exitinfo() argument 397 vm_exitinfo_cpuset(struct vcpu * vcpu) vm_exitinfo_cpuset() argument 533 struct vcpu *vcpu; vm_alloc_vcpu() local 783 vm_mem_allocated(struct vcpu * vcpu,vm_paddr_t gpa) vm_mem_allocated() argument 1191 vm_gpa_hold(struct vcpu * vcpu,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie) vm_gpa_hold() argument 1223 vm_get_register(struct vcpu * vcpu,int reg,uint64_t * retval) vm_get_register() argument 1233 vm_set_register(struct vcpu * vcpu,int reg,uint64_t val) vm_set_register() argument 1283 vm_get_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc) vm_get_seg_desc() argument 1293 vm_set_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc) vm_set_seg_desc() argument 1303 restore_guest_fpustate(struct vcpu * vcpu) restore_guest_fpustate() argument 1325 save_guest_fpustate(struct vcpu * vcpu) save_guest_fpustate() argument 1346 vcpu_set_state_locked(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle) vcpu_set_state_locked() argument 1418 vcpu_require_state(struct vcpu * vcpu,enum vcpu_state newstate) vcpu_require_state() argument 1427 vcpu_require_state_locked(struct vcpu * vcpu,enum vcpu_state newstate) vcpu_require_state_locked() argument 1436 vm_handle_rendezvous(struct vcpu * vcpu) vm_handle_rendezvous() argument 1483 vm_handle_hlt(struct vcpu * vcpu,bool intr_disabled,bool * retu) vm_handle_hlt() argument 1583 vm_handle_paging(struct vcpu * vcpu,bool * retu) vm_handle_paging() argument 1624 vm_handle_inst_emul(struct vcpu * vcpu,bool * retu) vm_handle_inst_emul() argument 1700 vm_handle_suspend(struct vcpu * vcpu,bool * retu) vm_handle_suspend() argument 1758 vm_handle_reqidle(struct vcpu * vcpu,bool * retu) vm_handle_reqidle() argument 1769 vm_handle_db(struct vcpu * vcpu,struct vm_exit * vme,bool * retu) vm_handle_db() argument 1830 vm_exit_suspended(struct vcpu * vcpu,uint64_t rip) vm_exit_suspended() argument 1846 vm_exit_debug(struct vcpu * vcpu,uint64_t rip) vm_exit_debug() argument 1857 vm_exit_rendezvous(struct vcpu * vcpu,uint64_t rip) vm_exit_rendezvous() argument 1869 vm_exit_reqidle(struct vcpu * vcpu,uint64_t rip) vm_exit_reqidle() argument 1881 vm_exit_astpending(struct vcpu * vcpu,uint64_t rip) vm_exit_astpending() argument 1893 vm_run(struct vcpu * vcpu) vm_run() argument 2001 vm_restart_instruction(struct vcpu * vcpu) vm_restart_instruction() argument 2037 vm_exit_intinfo(struct vcpu * vcpu,uint64_t info) vm_exit_intinfo() argument 2113 nested_fault(struct vcpu * vcpu,uint64_t info1,uint64_t info2,uint64_t * retinfo) nested_fault() argument 2155 vcpu_exception_intinfo(struct vcpu * vcpu) vcpu_exception_intinfo() argument 2171 vm_entry_intinfo(struct vcpu * vcpu,uint64_t * retinfo) vm_entry_intinfo() argument 2208 vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2) vm_get_intinfo() argument 2216 vm_inject_exception(struct vcpu * vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction) vm_inject_exception() argument 2271 vm_inject_fault(struct vcpu * vcpu,int vector,int errcode_valid,int errcode) vm_inject_fault() argument 2283 vm_inject_pf(struct vcpu * vcpu,int error_code,uint64_t cr2) vm_inject_pf() argument 2299 vm_inject_nmi(struct vcpu * vcpu) vm_inject_nmi() argument 2308 vm_nmi_pending(struct vcpu * vcpu) vm_nmi_pending() argument 2314 vm_nmi_clear(struct vcpu * vcpu) vm_nmi_clear() argument 2326 vm_inject_extint(struct vcpu * vcpu) vm_inject_extint() argument 2335 vm_extint_pending(struct vcpu * vcpu) vm_extint_pending() argument 2341 vm_extint_clear(struct vcpu * vcpu) vm_extint_clear() argument 2351 vm_get_capability(struct vcpu * vcpu,int type,int * retval) vm_get_capability() argument 2360 vm_set_capability(struct vcpu * vcpu,int type,int val) vm_set_capability() argument 2369 vcpu_vm(struct vcpu * vcpu) vcpu_vm() argument 2375 vcpu_vcpuid(struct vcpu * vcpu) vcpu_vcpuid() argument 2387 vm_lapic(struct vcpu * vcpu) vm_lapic() argument 2456 vcpu_set_state(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle) vcpu_set_state() argument 2468 vcpu_get_state(struct vcpu * vcpu,int * hostcpu) vcpu_get_state() argument 2482 vm_activate_cpu(struct vcpu * vcpu) vm_activate_cpu() argument 2495 vm_suspend_cpu(struct vm * vm,struct vcpu * vcpu) vm_suspend_cpu() argument 2514 vm_resume_cpu(struct vm * vm,struct vcpu * vcpu) vm_resume_cpu() argument 2529 vcpu_debugged(struct vcpu * vcpu) vcpu_debugged() argument 2581 vcpu_stats(struct vcpu * vcpu) vcpu_stats() argument 2588 vm_get_x2apic_state(struct vcpu * vcpu,enum x2apic_state * state) vm_get_x2apic_state() argument 2596 vm_set_x2apic_state(struct vcpu * vcpu,enum x2apic_state state) vm_set_x2apic_state() argument 2616 vcpu_notify_event_locked(struct vcpu * vcpu,bool lapic_intr) vcpu_notify_event_locked() argument 2647 vcpu_notify_event(struct vcpu * vcpu,bool lapic_intr) vcpu_notify_event() argument 2671 vm_smp_rendezvous(struct vcpu * vcpu,cpuset_t dest,vm_rendezvous_func_t func,void * arg) vm_smp_rendezvous() argument 2775 vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct vm_copyinfo * copyinfo,int num_copyinfo,int * fault) vm_copy_setup() argument 2860 vm_get_rescnt(struct vcpu * vcpu,struct vmm_stat_type * stat) vm_get_rescnt() argument 2870 vm_get_wiredcnt(struct vcpu * vcpu,struct vmm_stat_type * stat) vm_get_wiredcnt() argument 2888 struct vcpu *vcpu; vm_snapshot_vcpus() local 2941 struct vcpu *vcpu; vm_snapshot_vcpu() local 3009 vm_set_tsc_offset(struct vcpu * vcpu,uint64_t offset) vm_set_tsc_offset() argument 3019 struct vcpu *vcpu; vm_restore_time() local [all...] |
H A D | vmm_lapic.h | 32 struct vcpu; 36 int lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu); 37 int lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t wval, bool *retu); 39 int lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, 41 int lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, 48 int lapic_set_intr(struct vcpu *vcpu, int vector, bool trig); 53 lapic_intr_level(struct vcpu *vcpu, int vector) in lapic_intr_level() argument 56 return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL)); in lapic_intr_level() 60 lapic_intr_edge(struct vcpu *vcpu, int vector) in lapic_intr_edge() argument 63 return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE)); in lapic_intr_edge() [all …]
|
H A D | vmm_instruction_emul.c | 289 vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval) in vie_read_register() argument 293 error = vm_get_register(vcpu, reg, rval); in vie_read_register() 325 vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval) in vie_read_bytereg() argument 332 error = vm_get_register(vcpu, reg, &val); in vie_read_bytereg() 346 vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte) in vie_write_bytereg() argument 353 error = vm_get_register(vcpu, reg, &origval); in vie_write_bytereg() 366 error = vm_set_register(vcpu, re in vie_write_bytereg() 372 vie_update_register(struct vcpu * vcpu,enum vm_reg_name reg,uint64_t val,int size) vie_update_register() argument 508 emulate_mov(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_mov() argument 622 emulate_movx(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_movx() argument 710 get_gla(struct vcpu * vcpu,struct vie * vie __unused,struct vm_guest_paging * paging,int opsize,int addrsize,int prot,enum vm_reg_name seg,enum vm_reg_name gpr,uint64_t * gla,int * fault) get_gla() argument 763 emulate_movs(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_movs() argument 945 emulate_stos(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging __unused,mem_region_read_t memread __unused,mem_region_write_t memwrite,void * arg) emulate_stos() argument 1007 emulate_and(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_and() argument 1095 emulate_or(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_or() argument 1183 emulate_cmp(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_cmp() argument 1275 emulate_test(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_test() argument 1324 emulate_bextr(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_bextr() argument 1401 emulate_add(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_add() argument 1457 emulate_sub(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg) emulate_sub() argument 1513 emulate_stack_op(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_stack_op() argument 1615 emulate_push(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_push() argument 1636 emulate_pop(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg) emulate_pop() argument 1657 emulate_group1(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging __unused,mem_region_read_t memread,mem_region_write_t memwrite,void * memarg) emulate_group1() argument 1685 emulate_bittest(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * memarg) emulate_bittest() argument 1728 emulate_twob_group15(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * memarg) emulate_twob_group15() argument 1760 vmm_emulate_instruction(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * memarg) vmm_emulate_instruction() argument 2055 ptp_hold(struct vcpu * vcpu,vm_paddr_t ptpphys,size_t len,void ** cookie) ptp_hold() argument 2065 _vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault,bool check_only) _vm_gla2gpa() argument 2266 vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault) vm_gla2gpa() argument 2275 vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault) vm_gla2gpa_nofault() argument 2284 vmm_fetch_instruction(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t rip,int inst_length,struct vie * vie,int * faultptr) vmm_fetch_instruction() argument 2812 verify_gla(struct vcpu * vcpu,uint64_t gla,struct vie * vie,enum vm_cpu_mode cpu_mode) verify_gla() argument 2902 vmm_decode_instruction(struct vcpu * vcpu,uint64_t gla,enum vm_cpu_mode cpu_mode,int cs_d,struct vie * vie) vmm_decode_instruction() argument [all...] |
/freebsd-src/sys/amd64/vmm/amd/ |
H A D | svm.c | 294 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) in svm_set_tsc_offset() 298 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_tsc_offset() 301 svm_set_dirty(vcpu, VMCB_CACHE_I); 302 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 304 vm_set_tsc_offset(vcpu->vcpu, offset); 354 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. in svm_msr_perm() 390 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) in svm_get_intercept() 396 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_intercept() 401 svm_set_intercept(struct svm_vcpu *vcpu, in in svm_set_intercept() 288 svm_set_tsc_offset(struct svm_vcpu * vcpu,uint64_t offset) svm_set_tsc_offset() argument 384 svm_get_intercept(struct svm_vcpu * vcpu,int idx,uint32_t bitmask) svm_get_intercept() argument 395 svm_set_intercept(struct svm_vcpu * vcpu,int idx,uint32_t bitmask,int enabled) svm_set_intercept() argument 418 svm_disable_intercept(struct svm_vcpu * vcpu,int off,uint32_t bitmask) svm_disable_intercept() argument 425 svm_enable_intercept(struct svm_vcpu * vcpu,int off,uint32_t bitmask) svm_enable_intercept() argument 432 vmcb_init(struct svm_softc * sc,struct svm_vcpu * vcpu,uint64_t iopm_base_pa,uint64_t msrpm_base_pa,uint64_t np_pml4) vmcb_init() argument 614 struct svm_vcpu *vcpu; svm_vcpu_init() local 723 svm_inout_str_seginfo(struct svm_vcpu * vcpu,int64_t info1,int in,struct vm_inout_str * vis) svm_inout_str_seginfo() argument 777 svm_handle_io(struct svm_vcpu * vcpu,struct vm_exit * vmexit) svm_handle_io() argument 934 svm_eventinject(struct svm_vcpu * vcpu,int intr_type,int vector,uint32_t error,bool ec_valid) svm_eventinject() argument 973 svm_update_virqinfo(struct svm_vcpu * vcpu) svm_update_virqinfo() argument 990 svm_save_intinfo(struct svm_softc * svm_sc,struct svm_vcpu * vcpu) svm_save_intinfo() argument 1014 vintr_intercept_enabled(struct svm_vcpu * vcpu) vintr_intercept_enabled() argument 1022 enable_intr_window_exiting(struct svm_vcpu * vcpu) enable_intr_window_exiting() argument 1044 disable_intr_window_exiting(struct svm_vcpu * vcpu) disable_intr_window_exiting() argument 1064 svm_modify_intr_shadow(struct svm_vcpu * vcpu,uint64_t val) svm_modify_intr_shadow() argument 1080 svm_get_intr_shadow(struct svm_vcpu * vcpu,uint64_t * val) svm_get_intr_shadow() argument 1095 nmi_blocked(struct svm_vcpu * vcpu) nmi_blocked() argument 1104 enable_nmi_blocking(struct svm_vcpu * vcpu) enable_nmi_blocking() argument 1113 clear_nmi_blocking(struct svm_vcpu * vcpu) clear_nmi_blocking() argument 1143 svm_write_efer(struct svm_softc * sc,struct svm_vcpu * vcpu,uint64_t newval,bool * retu) svm_write_efer() argument 1212 emulate_wrmsr(struct svm_softc * sc,struct svm_vcpu * vcpu,u_int num,uint64_t val,bool * retu) emulate_wrmsr() argument 1228 emulate_rdmsr(struct svm_vcpu * vcpu,u_int num,bool * retu) emulate_rdmsr() argument 1325 svm_vmexit(struct svm_softc * svm_sc,struct svm_vcpu * vcpu,struct vm_exit * vmexit) svm_vmexit() argument 1697 svm_inj_intinfo(struct svm_softc * svm_sc,struct svm_vcpu * vcpu) svm_inj_intinfo() argument 1719 svm_inj_interrupts(struct svm_softc * sc,struct svm_vcpu * vcpu,struct vlapic * vlapic) svm_inj_interrupts() argument 1919 svm_pmap_activate(struct svm_vcpu * vcpu,pmap_t pmap) svm_pmap_activate() argument 2104 struct svm_vcpu *vcpu; svm_run() local 2251 struct svm_vcpu *vcpu = vcpui; svm_vcpu_cleanup() local 2316 struct svm_vcpu *vcpu; svm_getreg() local 2343 struct svm_vcpu *vcpu; svm_setreg() local 2425 struct svm_vcpu *vcpu; svm_setcap() local 2511 struct svm_vcpu *vcpu; svm_getcap() local 2565 struct svm_vcpu *vcpu; svm_vlapic_init() local 2594 struct svm_vcpu *vcpu; svm_vcpu_snapshot() local 2797 struct svm_vcpu *vcpu = vcpui; svm_restore_tsc() local [all...] |
H A D | svm_softc.h | 40 uint32_t rflags_tf; /* saved RFLAGS.TF value when single-stepping a vcpu */ 52 struct vcpu *vcpu; member 53 struct vmcb *vmcb; /* hardware saved vcpu context */ 54 struct svm_regctx swctx; /* software saved vcpu context */ 57 int lastcpu; /* host cpu that the vcpu last ran on */ 59 long eptgen; /* pmap->pm_eptgen when the vcpu last ran */ 77 #define SVM_CTR0(vcpu, format) \ argument 78 VCPU_CTR0((vcpu)->sc->vm, (vcpu)->vcpuid, format) 80 #define SVM_CTR1(vcpu, format, p1) \ argument 81 VCPU_CTR1((vcpu)->sc->vm, (vcpu)->vcpuid, format, p1) [all …]
|
H A D | vmcb.c | 118 vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val) in vmcb_access() argument 124 vmcb = svm_get_vmcb(vcpu); in vmcb_access() 147 SVM_CTR1(vcpu, "Invalid size %d for VMCB access: %d", bytes); in vmcb_access() 153 svm_set_dirty(vcpu, 0xffffffff); in vmcb_access() 162 vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval) in vmcb_read() argument 169 vmcb = svm_get_vmcb(vcpu); in vmcb_read() 174 return (vmcb_access(vcpu, 0, ident, retval)); in vmcb_read() 248 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmcb_read() 268 vmcb_write(struct svm_vcpu *vcpu, in argument 381 vmcb_setdesc(struct svm_vcpu * vcpu,int reg,struct seg_desc * desc) vmcb_setdesc() argument 431 vmcb_getdesc(struct svm_vcpu * vcpu,int reg,struct seg_desc * desc) vmcb_getdesc() argument 471 vmcb_getany(struct svm_vcpu * vcpu,int ident,uint64_t * val) vmcb_getany() argument 487 vmcb_setany(struct svm_vcpu * vcpu,int ident,uint64_t val) vmcb_setany() argument 503 vmcb_snapshot_desc(struct svm_vcpu * vcpu,int reg,struct vm_snapshot_meta * meta) vmcb_snapshot_desc() argument 535 vmcb_snapshot_any(struct svm_vcpu * vcpu,int ident,struct vm_snapshot_meta * meta) vmcb_snapshot_any() argument [all...] |
/freebsd-src/sys/arm64/vmm/ |
H A D | vmm.c | 76 struct vcpu { struct 80 int hostcpu; /* host cpuid this vcpu last ran on */ argument 91 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument 151 struct vcpu **vcpu; /* (i) guest vcpus */ member 166 static int vm_handle_wfi(struct vcpu *vcpu, 172 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 178 "IPI vector used for vcpu notifications"); 240 static void vcpu_notify_event_locked(struct vcpu *vcp 78 statevcpu global() argument 81 vcpuidvcpu global() argument 273 vcpu_cleanup(struct vcpu * vcpu,bool destroy) vcpu_cleanup() argument 287 struct vcpu *vcpu; vcpu_alloc() local 304 vcpu_init(struct vcpu * vcpu) vcpu_init() argument 313 vm_exitinfo(struct vcpu * vcpu) vm_exitinfo() argument 420 struct vcpu *vcpu; vm_alloc_vcpu() local 645 vm_mem_allocated(struct vcpu * vcpu,vm_paddr_t gpa) vm_mem_allocated() argument 898 vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * is_fault) vm_gla2gpa_nofault() argument 907 vmm_reg_raz(struct vcpu * vcpu,uint64_t * rval,void * arg) vmm_reg_raz() argument 914 vmm_reg_read_arg(struct vcpu * vcpu,uint64_t * rval,void * arg) vmm_reg_read_arg() argument 921 vmm_reg_wi(struct vcpu * vcpu,uint64_t wval,void * arg) vmm_reg_wi() argument 1025 vm_handle_reg_emul(struct vcpu * vcpu,bool * retu) vm_handle_reg_emul() argument 1111 vm_handle_inst_emul(struct vcpu * vcpu,bool * retu) vm_handle_inst_emul() argument 1181 vm_exit_suspended(struct vcpu * vcpu,uint64_t pc) vm_exit_suspended() argument 1197 vm_exit_debug(struct vcpu * vcpu,uint64_t pc) vm_exit_debug() argument 1208 vm_activate_cpu(struct vcpu * vcpu) vm_activate_cpu() argument 1221 vm_suspend_cpu(struct vm * vm,struct vcpu * vcpu) vm_suspend_cpu() argument 1240 vm_resume_cpu(struct vm * vm,struct vcpu * vcpu) vm_resume_cpu() argument 1255 vcpu_debugged(struct vcpu * vcpu) vcpu_debugged() argument 1284 vcpu_stats(struct vcpu * vcpu) vcpu_stats() argument 1298 vcpu_notify_event_locked(struct vcpu * vcpu) vcpu_notify_event_locked() argument 1324 vcpu_notify_event(struct vcpu * vcpu) vcpu_notify_event() argument 1332 restore_guest_fpustate(struct vcpu * vcpu) restore_guest_fpustate() argument 1352 save_guest_fpustate(struct vcpu * vcpu) save_guest_fpustate() argument 1367 vcpu_set_state_locked(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle) vcpu_set_state_locked() argument 1433 vcpu_require_state(struct vcpu * vcpu,enum vcpu_state newstate) vcpu_require_state() argument 1442 vcpu_require_state_locked(struct vcpu * vcpu,enum vcpu_state newstate) vcpu_require_state_locked() argument 1451 vm_get_capability(struct vcpu * vcpu,int type,int * retval) vm_get_capability() argument 1460 vm_set_capability(struct vcpu * vcpu,int type,int val) vm_set_capability() argument 1469 vcpu_vm(struct vcpu * vcpu) vcpu_vm() argument 1475 vcpu_vcpuid(struct vcpu * vcpu) vcpu_vcpuid() argument 1481 vcpu_get_cookie(struct vcpu * vcpu) vcpu_get_cookie() argument 1493 vcpu_set_state(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle) vcpu_set_state() argument 1505 vcpu_get_state(struct vcpu * vcpu,int * hostcpu) vcpu_get_state() argument 1551 vm_gpa_hold(struct vcpu * vcpu,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie) vm_gpa_hold() argument 1583 vm_get_register(struct vcpu * vcpu,int reg,uint64_t * retval) vm_get_register() argument 1593 vm_set_register(struct vcpu * vcpu,int reg,uint64_t val) vm_set_register() argument 1615 vm_inject_exception(struct vcpu * vcpu,uint64_t esr,uint64_t far) vm_inject_exception() argument 1647 vm_handle_smccc_call(struct vcpu * vcpu,struct vm_exit * vme,bool * retu) vm_handle_smccc_call() argument 1667 vm_handle_wfi(struct vcpu * vcpu,struct vm_exit * vme,bool * retu) vm_handle_wfi() argument 1692 vm_handle_paging(struct vcpu * vcpu,bool * retu) vm_handle_paging() argument 1729 vm_handle_suspend(struct vcpu * vcpu,bool * retu) vm_handle_suspend() argument 1777 vm_run(struct vcpu * vcpu) vm_run() argument [all...] |
/freebsd-src/sys/amd64/include/ |
H A D | vmm.h | 36 struct vcpu; 179 typedef void * (*vmi_vcpu_init_func_t)(void *vmi, struct vcpu *vcpu, 228 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); 258 * APIs that inspect the guest memory map require only a *single* vcpu to 267 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, 272 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); 274 int vm_get_register(struct vcpu *vcp 335 vcpu_rendezvous_pending(struct vcpu * vcpu,struct vm_eventinfo * info) vcpu_rendezvous_pending() argument 384 vcpu_is_running(struct vcpu * vcpu,int * hostcpu) vcpu_is_running() argument 391 vcpu_should_yield(struct vcpu * vcpu) vcpu_should_yield() argument 754 int vcpu; global() member 785 vm_inject_ud(struct vcpu * vcpu) vm_inject_ud() argument 791 vm_inject_gp(struct vcpu * vcpu) vm_inject_gp() argument 797 vm_inject_ac(struct vcpu * vcpu,int errcode) vm_inject_ac() argument 803 vm_inject_ss(struct vcpu * vcpu,int errcode) vm_inject_ss() argument [all...] |
/freebsd-src/lib/libvmmapi/ |
H A D | vmmapi.h | 46 struct vcpu; 126 struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid); 127 void vm_vcpu_close(struct vcpu *vcpu); 128 int vcpu_id(struct vcpu *vcpu); 136 int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 139 int vm_gla2gpa_nofault(struct vcpu *vcpu, [all...] |
H A D | vmmapi.c | 210 struct vcpu * in vm_get_lowmem_limit() 213 struct vcpu *vcpu; in vm_set_memflags() 215 vcpu = malloc(sizeof(*vcpu)); in vm_set_memflags() 216 vcpu->ctx = ctx; in vm_set_memflags() 217 vcpu->vcpuid = vcpuid; in vm_set_memflags() 218 return (vcpu); 222 vm_vcpu_close(struct vcpu *vcpu) in vm_get_memflags() 161 struct vcpu *vcpu; vm_vcpu_open() local 170 vm_vcpu_close(struct vcpu * vcpu) vm_vcpu_close() argument 176 vcpu_id(struct vcpu * vcpu) vcpu_id() argument 601 vcpu_ioctl(struct vcpu * vcpu,u_long cmd,void * arg) vcpu_ioctl() argument 613 vm_set_register(struct vcpu * vcpu,int reg,uint64_t val) vm_set_register() argument 627 vm_get_register(struct vcpu * vcpu,int reg,uint64_t * ret_val) vm_get_register() argument 641 vm_set_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals) vm_set_register_set() argument 657 vm_get_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals) vm_get_register_set() argument 673 vm_run(struct vcpu * vcpu,struct vm_run * vmrun) vm_run() argument 719 vm_get_capability(struct vcpu * vcpu,enum vm_cap_type cap,int * retval) vm_get_capability() argument 733 vm_set_capability(struct vcpu * vcpu,enum vm_cap_type cap,int val) vm_set_capability() argument 745 vm_get_stats(struct vcpu * vcpu,struct timeval * ret_tv,int * ret_entries) vm_get_stats() argument 823 vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault) vm_gla2gpa() argument 844 vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault) vm_gla2gpa_nofault() argument 869 vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct iovec * iov,int iovcnt,int * fault) vm_copy_setup() argument 994 vm_activate_cpu(struct vcpu * vcpu) vm_activate_cpu() argument 1017 vm_suspend_cpu(struct vcpu * vcpu) vm_suspend_cpu() argument 1028 vm_resume_cpu(struct vcpu * vcpu) vm_resume_cpu() argument 1052 vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2) vm_get_intinfo() argument 1067 vm_set_intinfo(struct vcpu * vcpu,uint64_t info1) vm_set_intinfo() argument 1081 vm_restart_instruction(struct vcpu * vcpu) vm_restart_instruction() argument [all...] |
/freebsd-src/sys/arm64/include/ |
H A D | vmm.h | 38 struct vcpu; 145 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); 166 * APIs that inspect the guest memory map require only a *single* vcpu to 175 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, 180 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); 182 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, 190 int vm_get_register(struct vcpu *vcp 231 vcpu_is_running(struct vcpu * vcpu,int * hostcpu) vcpu_is_running() argument 238 vcpu_should_yield(struct vcpu * vcpu) vcpu_should_yield() argument [all...] |
/freebsd-src/usr.sbin/bhyve/amd64/ |
H A D | task_switch.c | 100 GETREG(struct vcpu *vcpu, int reg) in GETREG() argument 105 error = vm_get_register(vcpu, reg, &val); in GETREG() 111 SETREG(struct vcpu *vcpu, int reg, uint64_t val) in SETREG() argument 115 error = vm_set_register(vcpu, reg, val); in SETREG() 151 sel_exception(struct vcpu *vcpu, int vector, uint16_t sel, int ext) in sel_exception() argument 165 vm_inject_fault(vcpu, vector, 1, sel); in sel_exception() 173 desc_table_limit_check(struct vcpu *vcpu, uint16_t sel) in desc_table_limit_check() argument 180 error = vm_get_desc(vcpu, reg, &base, &limit, &access); in desc_table_limit_check() 203 desc_table_rw(struct vcpu *vcpu, struct vm_guest_paging *paging, in desc_table_rw() argument 213 error = vm_get_desc(vcpu, reg, &base, &limit, &access); in desc_table_rw() [all …]
|
H A D | vmexit.c | 60 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, in vm_inject_fault() argument 67 error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, in vm_inject_fault() 73 vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) in vmexit_inout() argument 84 error = emulate_inout(ctx, vcpu, vme); in vmexit_inout() 97 vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_rdmsr() argument 108 error = emulate_rdmsr(vcpu, vme->u.msr.code, &val); in vmexit_rdmsr() 110 EPRINTLN("rdmsr to register %#x on vcpu %d", in vmexit_rdmsr() 111 vme->u.msr.code, vcpu_id(vcpu)); in vmexit_rdmsr() 113 vm_inject_gp(vcpu); in vmexit_rdmsr() 119 error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); in vmexit_rdmsr() [all …]
|
/freebsd-src/lib/libvmmapi/amd64/ |
H A D | vmmapi_machdep.c | 97 vm_set_desc(struct vcpu *vcpu, int reg, in vm_set_desc() argument 109 error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); in vm_set_desc() 114 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit, in vm_get_desc() argument 123 error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); in vm_get_desc() 133 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc) in vm_get_seg_desc() argument 137 error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit, in vm_get_seg_desc() 143 vm_lapic_irq(struct vcpu *vcpu, int vector) in vm_lapic_irq() argument 150 return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq)); in vm_lapic_irq() 154 vm_lapic_local_irq(struct vcpu *vcpu, int vector) in vm_lapic_local_irq() argument 161 return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq)); in vm_lapic_local_irq() [all …]
|
H A D | vmmapi_freebsd_machdep.c | 66 * Setup the 'vcpu' register set such that it will begin execution at 70 vm_setup_freebsd_registers_i386(struct vcpu *vcpu, uint32_t eip, in vm_setup_freebsd_registers_i386() argument 80 error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp); in vm_setup_freebsd_registers_i386() 83 error = vm_set_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); in vm_setup_freebsd_registers_i386() 88 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0) in vm_setup_freebsd_registers_i386() 91 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, 0)) != 0) in vm_setup_freebsd_registers_i386() 98 if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, 0))) in vm_setup_freebsd_registers_i386() 101 gdt = vm_map_gpa(vcpu->ctx, gdtbase, 0x1000); in vm_setup_freebsd_registers_i386() 107 error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR, in vm_setup_freebsd_registers_i386() 117 error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags); in vm_setup_freebsd_registers_i386() [all …]
|
/freebsd-src/sys/amd64/vmm/intel/ |
H A D | vmx.c | 816 * bitmap is currently per-VM rather than per-vCPU while the in vmx_modinit() 818 * per-vCPU basis). in vmx_modinit() 1130 vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) in vmx_vcpu_init() 1134 struct vmx_vcpu *vcpu; in vmx_vcpu_init() 1141 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); in vmx_vcpu_init() 1142 vcpu->vmx = vmx; in vmx_vcpu_init() 1143 vcpu->vcpu = vcpu1; in vmx_vcpu_init() 1144 vcpu in vmx_vcpu_init() 1125 struct vmx_vcpu *vcpu; vmx_vcpu_init() local 1240 vmx_handle_cpuid(struct vmx_vcpu * vcpu,struct vmxctx * vmxctx) vmx_handle_cpuid() argument 1251 vmx_run_trace(struct vmx_vcpu * vcpu) vmx_run_trace() argument 1257 vmx_exit_trace(struct vmx_vcpu * vcpu,uint64_t rip,uint32_t exit_reason,int handled) vmx_exit_trace() argument 1266 vmx_astpending_trace(struct vmx_vcpu * vcpu,uint64_t rip) vmx_astpending_trace() argument 1278 vmx_invvpid(struct vmx * vmx,struct vmx_vcpu * vcpu,pmap_t pmap,int running) vmx_invvpid() argument 1335 vmx_set_pcpu_defaults(struct vmx * vmx,struct vmx_vcpu * vcpu,pmap_t pmap) vmx_set_pcpu_defaults() argument 1359 vmx_set_int_window_exiting(struct vmx_vcpu * vcpu) vmx_set_int_window_exiting() argument 1370 vmx_clear_int_window_exiting(struct vmx_vcpu * vcpu) vmx_clear_int_window_exiting() argument 1381 vmx_set_nmi_window_exiting(struct vmx_vcpu * vcpu) vmx_set_nmi_window_exiting() argument 1392 vmx_clear_nmi_window_exiting(struct vmx_vcpu * vcpu) vmx_clear_nmi_window_exiting() argument 1403 vmx_set_tsc_offset(struct vmx_vcpu * vcpu,uint64_t offset) vmx_set_tsc_offset() argument 1427 vmx_inject_nmi(struct vmx_vcpu * vcpu) vmx_inject_nmi() argument 1453 vmx_inject_interrupts(struct vmx_vcpu * vcpu,struct vlapic * vlapic,uint64_t guestrip) vmx_inject_interrupts() argument 1654 vmx_restore_nmi_blocking(struct vmx_vcpu * vcpu) vmx_restore_nmi_blocking() argument 1665 vmx_clear_nmi_blocking(struct vmx_vcpu * vcpu) vmx_clear_nmi_blocking() argument 1676 vmx_assert_nmi_blocking(struct vmx_vcpu * vcpu) vmx_assert_nmi_blocking() argument 1686 vmx_emulate_xsetbv(struct vmx * vmx,struct vmx_vcpu * vcpu,struct vm_exit * vmexit) vmx_emulate_xsetbv() argument 1763 vmx_get_guest_reg(struct vmx_vcpu * vcpu,int ident) vmx_get_guest_reg() argument 1808 vmx_set_guest_reg(struct vmx_vcpu * vcpu,int ident,uint64_t regval) vmx_set_guest_reg() argument 1869 vmx_emulate_cr0_access(struct vmx_vcpu * vcpu,uint64_t exitqual) vmx_emulate_cr0_access() argument 1907 vmx_emulate_cr4_access(struct vmx_vcpu * vcpu,uint64_t exitqual) vmx_emulate_cr4_access() argument 1927 vmx_emulate_cr8_access(struct vmx * vmx,struct vmx_vcpu * vcpu,uint64_t exitqual) vmx_emulate_cr8_access() argument 2001 inout_str_index(struct vmx_vcpu * vcpu,int in) inout_str_index() argument 2014 inout_str_count(struct vmx_vcpu * vcpu,int rep) inout_str_count() argument 2047 inout_str_seginfo(struct vmx_vcpu * vcpu,uint32_t inst_info,int in,struct vm_inout_str * vis) inout_str_seginfo() argument 2148 apic_access_virtualization(struct vmx_vcpu * vcpu) apic_access_virtualization() argument 2157 x2apic_virtualization(struct vmx_vcpu * vcpu) x2apic_virtualization() argument 2166 vmx_handle_apic_write(struct vmx_vcpu * vcpu,struct vlapic * vlapic,uint64_t qual) vmx_handle_apic_write() argument 2234 apic_access_fault(struct vmx_vcpu * vcpu,uint64_t gpa) apic_access_fault() argument 2245 vmx_handle_apic_access(struct vmx_vcpu * vcpu,struct vm_exit * vmexit) vmx_handle_apic_access() argument 2331 emulate_wrmsr(struct vmx_vcpu * vcpu,u_int num,uint64_t val,bool * retu) emulate_wrmsr() argument 2344 emulate_rdmsr(struct vmx_vcpu * vcpu,u_int num,bool * retu) emulate_rdmsr() argument 2371 vmx_exit_process(struct vmx * vmx,struct vmx_vcpu * vcpu,struct vm_exit * vmexit) vmx_exit_process() argument 2911 vmx_exit_handle_nmi(struct vmx_vcpu * vcpu,struct vm_exit * vmexit) vmx_exit_handle_nmi() argument 3029 struct vmx_vcpu *vcpu; vmx_run() local 3234 struct vmx_vcpu *vcpu = vcpui; vmx_vcpu_cleanup() local 3335 vmx_get_intr_shadow(struct vmx_vcpu * vcpu,int running,uint64_t * retval) vmx_get_intr_shadow() argument 3347 vmx_modify_intr_shadow(struct vmx_vcpu * vcpu,int running,uint64_t val) vmx_modify_intr_shadow() argument 3399 struct vmx_vcpu *vcpu = vcpui; vmx_getreg() local 3430 struct vmx_vcpu *vcpu = vcpui; vmx_setreg() local 3497 struct vmx_vcpu *vcpu = vcpui; vmx_getdesc() local 3512 struct vmx_vcpu *vcpu = vcpui; vmx_setdesc() local 3526 struct vmx_vcpu *vcpu = vcpui; vmx_getcap() local 3580 struct vmx_vcpu *vcpu = vcpui; vmx_setcap() local 3721 struct vmx_vcpu *vcpu; global() member 3913 struct vmx_vcpu *vcpu; vmx_enable_x2apic_mode_ts() local 3937 struct vmx_vcpu *vcpu; vmx_enable_x2apic_mode_vid() local 4083 struct vmx_vcpu *vcpu; vmx_vlapic_init() local 4134 struct vmx_vcpu *vcpu; vmx_vcpu_snapshot() local 4244 struct vmx_vcpu *vcpu = vcpui; vmx_restore_tsc() local [all...] |
H A D | vmx.h | 98 int lastcpu; /* host cpu that this 'vcpu' last ran on */ 129 struct vcpu *vcpu; member 152 #define VMX_CTR0(vcpu, format) \ argument 153 VCPU_CTR0((vcpu)->vmx->vm, (vcpu)->vcpuid, format) 155 #define VMX_CTR1(vcpu, format, p1) \ argument 156 VCPU_CTR1((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1) 158 #define VMX_CTR2(vcpu, format, p1, p2) \ argument 159 VCPU_CTR2((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2) 161 #define VMX_CTR3(vcpu, format, p1, p2, p3) \ argument 162 VCPU_CTR3((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2, p3) [all …]
|
H A D | vmx_msr.c | 312 vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu) in vmx_msr_guest_init() argument 318 if (vcpu->vcpuid == 0) { in vmx_msr_guest_init() 329 vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) | in vmx_msr_guest_init() 342 vmx_msr_guest_enter(struct vmx_vcpu *vcpu) in vmx_msr_guest_enter() argument 347 wrmsr(MSR_LSTAR, vcpu->guest_msrs[IDX_MSR_LSTAR]); in vmx_msr_guest_enter() 348 wrmsr(MSR_CSTAR, vcpu->guest_msrs[IDX_MSR_CSTAR]); in vmx_msr_guest_enter() 349 wrmsr(MSR_STAR, vcpu->guest_msrs[IDX_MSR_STAR]); in vmx_msr_guest_enter() 350 wrmsr(MSR_SF_MASK, vcpu->guest_msrs[IDX_MSR_SF_MASK]); in vmx_msr_guest_enter() 351 wrmsr(MSR_KGSBASE, vcpu->guest_msrs[IDX_MSR_KGSBASE]); in vmx_msr_guest_enter() 355 vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu) in vmx_msr_guest_enter_tsc_aux() argument [all …]
|
/freebsd-src/sys/contrib/xen/ |
H A D | vcpu.h | 2 * vcpu.h 4 * VCPU initialisation, query, and hotplug. 35 * @cmd == VCPUOP_??? (VCPU operation). 36 * @vcpuid == VCPU to operate on. 41 * Initialise a VCPU. Each VCPU can be initialised only once. A 42 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. 45 * structure containing the initial state for the VCPU. For x86 52 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail 53 * if the VCPU has not been initialised (VCPUOP_initialise). 58 * Bring down a VCPU (i.e., make it non-runnable). [all …]
|
/freebsd-src/usr.sbin/bhyvectl/amd64/ |
H A D | bhyvectl_machdep.c | 185 bhyvectl_dump_vm_run_exitcode(struct vm_exit *vmexit, int vcpu) in bhyvectl_dump_vm_run_exitcode() argument 187 printf("vm exit[%d]\n", vcpu); in bhyvectl_dump_vm_run_exitcode() 327 print_msr_pm(uint64_t msr, int vcpu, int readable, int writeable) in print_msr_pm() argument 331 printf("%-20s[%d]\t\t%c%c\n", msr_name(msr), vcpu, in print_msr_pm() 340 dump_amd_msr_pm(const char *bitmap, int vcpu) in dump_amd_msr_pm() argument 352 print_msr_pm(msr, vcpu, readable, writeable); in dump_amd_msr_pm() 358 print_msr_pm(msr + MSR_AMD6TH_START, vcpu, readable, in dump_amd_msr_pm() 365 print_msr_pm(msr + MSR_AMD7TH_START, vcpu, readable, in dump_amd_msr_pm() 374 dump_intel_msr_pm(const char *bitmap, int vcpu) in dump_intel_msr_pm() argument 386 print_msr_pm(msr, vcpu, readable, writeable); in dump_intel_msr_pm() [all …]
|
/freebsd-src/usr.sbin/bhyve/aarch64/ |
H A D | vmexit.c | 60 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_inst_emul() 70 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, in vmexit_inst_emul() 89 vmexit_reg_emul(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_reg_emul() 104 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) in vmexit_suspend() 108 int vcpuid = vcpu_id(vcpu); in vmexit_suspend() 132 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_debug() 59 vmexit_inst_emul(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun) vmexit_inst_emul() argument 103 vmexit_suspend(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun) vmexit_suspend() argument 131 vmexit_debug(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun __unused) vmexit_debug() argument 167 for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) { smccc_affinity_info() local 186 vmexit_smccc(struct vmctx * ctx,struct vcpu * vcpu,struct vm_run * vmrun) vmexit_smccc() argument 271 vmexit_brk(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun) vmexit_brk() argument 278 vmexit_ss(struct vmctx * ctx __unused,struct vcpu * vcpu,struct vm_run * vmrun) vmexit_ss() argument [all...] |
/freebsd-src/sys/arm64/vmm/io/ |
H A D | vtimer.c | 77 eprintf("No active vcpu\n"); in vtimer_virtual_timer_intr() 93 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_virtual_timer_intr() 182 * Configure physical timer interrupts for the VCPU. in vtimer_cpucleanup() 244 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_inject_irq_callout_virt() 247 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 250 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_schedule_irq() 262 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_schedule_irq() 272 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_schedule_irq() 292 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_remove_irq() 307 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcp 279 vtimer_remove_irq(struct hypctx * hypctx,struct vcpu * vcpu) vtimer_remove_irq() argument 309 vtimer_phys_ctl_read(struct vcpu * vcpu,uint64_t * rval,void * arg) vtimer_phys_ctl_read() argument 331 vtimer_phys_ctl_write(struct vcpu * vcpu,uint64_t wval,void * arg) vtimer_phys_ctl_write() argument 358 vtimer_phys_cnt_read(struct vcpu * vcpu,uint64_t * rval,void * arg) vtimer_phys_cnt_read() argument 370 vtimer_phys_cnt_write(struct vcpu * vcpu,uint64_t wval,void * arg) vtimer_phys_cnt_write() argument 376 vtimer_phys_cval_read(struct vcpu * vcpu,uint64_t * rval,void * arg) vtimer_phys_cval_read() argument 390 vtimer_phys_cval_write(struct vcpu * vcpu,uint64_t wval,void * arg) vtimer_phys_cval_write() argument 409 vtimer_phys_tval_read(struct vcpu * vcpu,uint64_t * rval,void * arg) vtimer_phys_tval_read() argument 438 vtimer_phys_tval_write(struct vcpu * vcpu,uint64_t wval,void * arg) vtimer_phys_tval_write() argument [all...] |
H A D | vtimer.h | 77 int vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg); 78 int vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg); 79 int vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg); 80 int vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg); 81 int vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg); 82 int vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg); 83 int vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg); 84 int vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg);
|
/freebsd-src/usr.sbin/bhyve/ |
H A D | mem.c | 63 * Per-vCPU cache. Since most accesses from a vCPU will be to 141 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr, 145 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) in mem_read() argument 150 error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1, in mem_read() 156 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) in mem_write() argument 161 error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1, in mem_write() 167 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg) in access_memory() argument 172 vcpuid = vcpu_id(vcpu); in access_memory() 175 * First check the per-vCPU cache in access_memory() 186 /* Update the per-vCPU cache */ in access_memory() [all …]
|
H A D | bhyverun.c | 111 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu); 115 struct vcpu *vcpu; member 127 * manual page syntax specification, this results in a topology of 1 vCPU. 245 int vcpu, pcpu; in bhyve_pincpu_parse() local 247 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { in bhyve_pincpu_parse() 252 if (vcpu < 0) { in bhyve_pincpu_parse() 253 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); in bhyve_pincpu_parse() 278 parse_cpuset(int vcpu,const char * list,cpuset_t * set) parse_cpuset() argument 328 int vcpu; build_vcpumaps() local 423 fbsdrun_deletecpu(int vcpu) fbsdrun_deletecpu() argument 456 vm_loop(struct vmctx * ctx,struct vcpu * vcpu) vm_loop() argument 498 num_vcpus_allowed(struct vmctx * ctx,struct vcpu * vcpu) num_vcpus_allowed() argument [all...] |