Lines Matching full:vcpu
97 * (a) allocated when vcpu is created
98 * (i) initialized when vcpu is created and when it is reinitialized
99 * (o) initialized the first time the vcpu is created
102 struct vcpu {
104 enum vcpu_state state; /* (o) vcpu state */
106 int hostcpu; /* (o) vcpu's host cpu */
107 int reqidle; /* (i) request vcpu to idle */
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
160 * [v] reads require one frozen vcpu, writes require freezing all vcpus
187 struct vcpu **vcpu; /* (o) guest vcpus */
197 #define VMM_CTR0(vcpu, format) \
198 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
200 #define VMM_CTR1(vcpu, format, p1) \
201 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
203 #define VMM_CTR2(vcpu, format, p1, p2) \
204 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
206 #define VMM_CTR3(vcpu, format, p1, p2, p3) \
207 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
209 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
210 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
241 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
266 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
282 "IPI vector used for vcpu notifications");
299 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
302 VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
353 vcpu_cleanup(struct vcpu *vcpu, bool destroy)
355 vmmops_vlapic_cleanup(vcpu->vlapic);
356 vmmops_vcpu_cleanup(vcpu->cookie);
357 vcpu->cookie = NULL;
359 vmm_stat_free(vcpu->stats);
360 fpu_save_area_free(vcpu->guestfpu);
361 vcpu_lock_destroy(vcpu);
362 free(vcpu, M_VM);
366 static struct vcpu *
369 struct vcpu *vcpu;
372 ("vcpu_init: invalid vcpu %d", vcpu_id));
374 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO);
375 vcpu_lock_init(vcpu);
376 vcpu->state = VCPU_IDLE;
377 vcpu->hostcpu = NOCPU;
378 vcpu->vcpuid = vcpu_id;
379 vcpu->vm = vm;
380 vcpu->guestfpu = fpu_save_area_alloc();
381 vcpu->stats = vmm_stat_alloc();
382 vcpu->tsc_offset = 0;
383 return (vcpu);
387 vcpu_init(struct vcpu *vcpu)
389 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
390 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
391 vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
392 vcpu->reqidle = 0;
393 vcpu->exitintinfo = 0;
394 vcpu->nmi_pending = 0;
395 vcpu->extint_pending = 0;
396 vcpu->exception_pending = 0;
397 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
398 fpu_save_area_reset(vcpu->guestfpu);
399 vmm_stat_init(vcpu->stats);
403 vcpu_trace_exceptions(struct vcpu *vcpu)
410 vcpu_trap_wbinvd(struct vcpu *vcpu)
416 vm_exitinfo(struct vcpu *vcpu)
418 return (&vcpu->exitinfo);
422 vm_exitinfo_cpuset(struct vcpu *vcpu)
424 return (&vcpu->exitinfo_cpuset);
542 if (vm->vcpu[i] != NULL)
543 vcpu_init(vm->vcpu[i]);
556 struct vcpu *
559 struct vcpu *vcpu;
564 vcpu = (struct vcpu *)
565 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]);
566 if (__predict_true(vcpu != NULL))
567 return (vcpu);
570 vcpu = vm->vcpu[vcpuid];
571 if (vcpu == NULL && !vm->dying) {
572 vcpu = vcpu_alloc(vm, vcpuid);
573 vcpu_init(vcpu);
576 * Ensure vCPU is fully created before updating pointer
579 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
580 (uintptr_t)vcpu);
583 return (vcpu);
631 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
699 if (vm->vcpu[i] != NULL)
700 vcpu_cleanup(vm->vcpu[i], destroy);
727 free(vm->vcpu, M_VM);
806 * This function is called in the context of a running vcpu which acts as
810 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
812 struct vm *vm = vcpu->vm;
818 state = vcpu_get_state(vcpu, &hostcpu);
820 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
1218 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1223 * The current vcpu should be frozen to ensure 'vm_memmap[]'
1226 int state = vcpu_get_state(vcpu, NULL);
1227 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1230 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1250 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
1256 return (vmmops_getreg(vcpu->cookie, reg, retval));
1260 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
1267 error = vmmops_setreg(vcpu->cookie, reg, val);
1272 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
1273 vcpu->nextrip = val;
1310 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1316 return (vmmops_getdesc(vcpu->cookie, reg, desc));
1320 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1326 return (vmmops_setdesc(vcpu->cookie, reg, desc));
1330 restore_guest_fpustate(struct vcpu *vcpu)
1338 fpurestore(vcpu->guestfpu);
1342 load_xcr(0, vcpu->guest_xcr0);
1352 save_guest_fpustate(struct vcpu *vcpu)
1360 vcpu->guest_xcr0 = rxcr(0);
1366 fpusave(vcpu->guestfpu);
1370 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1373 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
1378 vcpu_assert_locked(vcpu);
1383 * ioctl() operating on a vcpu at any point.
1386 while (vcpu->state != VCPU_IDLE) {
1387 vcpu->reqidle = 1;
1388 vcpu_notify_event_locked(vcpu, false);
1389 VMM_CTR1(vcpu, "vcpu state change from %s to "
1390 "idle requested", vcpu_state2str(vcpu->state));
1391 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1394 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1395 "vcpu idle state"));
1398 if (vcpu->state == VCPU_RUNNING) {
1399 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1400 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1402 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1403 "vcpu that is not running", vcpu->hostcpu));
1412 switch (vcpu->state) {
1429 VMM_CTR2(vcpu, "vcpu state changed from %s to %s",
1430 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1432 vcpu->state = newstate;
1434 vcpu->hostcpu = curcpu;
1436 vcpu->hostcpu = NOCPU;
1439 wakeup(&vcpu->state);
1445 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
1449 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
1454 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
1458 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
1463 vm_handle_rendezvous(struct vcpu *vcpu)
1465 struct vm *vm = vcpu->vm;
1470 vcpuid = vcpu->vcpuid;
1479 VMM_CTR0(vcpu, "Calling rendezvous func");
1480 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
1485 VMM_CTR0(vcpu, "Rendezvous completed");
1491 VMM_CTR0(vcpu, "Wait for rendezvous completion");
1507 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1510 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu)
1512 struct vm *vm = vcpu->vm;
1517 vcpuid = vcpu->vcpuid;
1523 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1525 vcpu_lock(vcpu);
1530 * software events that would cause this vcpu to wakeup.
1533 * vcpu returned from vmmops_run() and before it acquired the
1534 * vcpu lock above.
1536 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1538 if (vm_nmi_pending(vcpu))
1541 if (vm_extint_pending(vcpu) ||
1542 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1547 /* Don't go to sleep if the vcpu thread needs to yield */
1548 if (vcpu_should_yield(vcpu))
1551 if (vcpu_debugged(vcpu))
1562 VMM_CTR0(vcpu, "Halted");
1576 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1581 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1582 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1583 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
1585 vcpu_unlock(vcpu);
1594 vcpu_lock(vcpu);
1601 vcpu_unlock(vcpu);
1610 vm_handle_paging(struct vcpu *vcpu, bool *retu)
1612 struct vm *vm = vcpu->vm;
1617 vme = &vcpu->exitinfo;
1631 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
1641 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
1651 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
1662 vme = &vcpu->exitinfo;
1675 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
1679 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
1690 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
1691 VMM_CTR1(vcpu, "Error decoding instruction at %#lx",
1701 vcpu->nextrip += vie->num_processed;
1702 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding",
1703 vcpu->nextrip);
1720 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
1727 vm_handle_suspend(struct vcpu *vcpu, bool *retu)
1729 struct vm *vm = vcpu->vm;
1736 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
1745 vcpu_lock(vcpu);
1748 VMM_CTR0(vcpu, "All vcpus suspended");
1753 VMM_CTR0(vcpu, "Sleeping during suspend");
1754 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1755 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1756 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1758 vcpu_unlock(vcpu);
1760 vcpu_lock(vcpu);
1763 VMM_CTR0(vcpu, "Rendezvous during suspend");
1764 vcpu_unlock(vcpu);
1765 error = vm_handle_rendezvous(vcpu);
1766 vcpu_lock(vcpu);
1769 vcpu_unlock(vcpu);
1785 vm_handle_reqidle(struct vcpu *vcpu, bool *retu)
1787 vcpu_lock(vcpu);
1788 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1789 vcpu->reqidle = 0;
1790 vcpu_unlock(vcpu);
1796 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1808 vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
1809 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t),
1857 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
1859 struct vm *vm = vcpu->vm;
1865 vmexit = vm_exitinfo(vcpu);
1873 vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
1877 vmexit = vm_exitinfo(vcpu);
1884 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
1888 vmexit = vm_exitinfo(vcpu);
1892 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
1896 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
1900 vmexit = vm_exitinfo(vcpu);
1904 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
1908 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
1912 vmexit = vm_exitinfo(vcpu);
1916 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
1920 vm_run(struct vcpu *vcpu)
1922 struct vm *vm = vcpu->vm;
1931 vcpuid = vcpu->vcpuid;
1940 vme = &vcpu->exitinfo;
1943 evinfo.iptr = &vcpu->reqidle;
1955 restore_guest_fpustate(vcpu);
1957 vcpu_require_state(vcpu, VCPU_RUNNING);
1958 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1959 vcpu_require_state(vcpu, VCPU_FROZEN);
1961 save_guest_fpustate(vcpu);
1963 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1969 vcpu->nextrip = vme->rip + vme->inst_length;
1972 error = vm_handle_reqidle(vcpu, &retu);
1975 error = vm_handle_suspend(vcpu, &retu);
1981 error = vm_handle_rendezvous(vcpu);
1985 error = vm_handle_hlt(vcpu, intr_disabled, &retu);
1988 error = vm_handle_paging(vcpu, &retu);
1991 error = vm_handle_inst_emul(vcpu, &retu);
1995 error = vm_handle_inout(vcpu, vme, &retu);
1998 error = vm_handle_db(vcpu, vme, &retu);
2003 vm_inject_ud(vcpu);
2016 error = vm_handle_ipi(vcpu, vme, &retu);
2021 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
2022 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
2028 vm_restart_instruction(struct vcpu *vcpu)
2034 state = vcpu_get_state(vcpu, NULL);
2037 * When a vcpu is "running" the next instruction is determined
2038 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
2042 vcpu->exitinfo.inst_length = 0;
2043 VMM_CTR1(vcpu, "restarting instruction at %#lx by "
2044 "setting inst_length to zero", vcpu->exitinfo.rip);
2047 * When a vcpu is "frozen" it is outside the critical section
2050 * 'nextrip' to the vcpu's %rip.
2052 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
2054 VMM_CTR2(vcpu, "restarting instruction by updating "
2055 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
2056 vcpu->nextrip = rip;
2064 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
2080 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
2081 vcpu->exitintinfo = info;
2140 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
2156 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
2158 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
2182 vcpu_exception_intinfo(struct vcpu *vcpu)
2186 if (vcpu->exception_pending) {
2187 info = vcpu->exc_vector & 0xff;
2189 if (vcpu->exc_errcode_valid) {
2191 info |= (uint64_t)vcpu->exc_errcode << 32;
2198 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
2203 info1 = vcpu->exitintinfo;
2204 vcpu->exitintinfo = 0;
2207 if (vcpu->exception_pending) {
2208 info2 = vcpu_exception_intinfo(vcpu);
2209 vcpu->exception_pending = 0;
2210 VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
2211 vcpu->exc_vector, info2);
2215 valid = nested_fault(vcpu, info1, info2, retinfo);
2227 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
2235 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
2237 *info1 = vcpu->exitintinfo;
2238 *info2 = vcpu_exception_intinfo(vcpu);
2243 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
2260 if (vcpu->exception_pending) {
2261 VMM_CTR2(vcpu, "Unable to inject exception %d due to "
2262 "pending exception %d", vector, vcpu->exc_vector);
2270 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val);
2282 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
2287 vm_restart_instruction(vcpu);
2289 vcpu->exception_pending = 1;
2290 vcpu->exc_vector = vector;
2291 vcpu->exc_errcode = errcode;
2292 vcpu->exc_errcode_valid = errcode_valid;
2293 VMM_CTR1(vcpu, "Exception %d pending", vector);
2298 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
2304 error = vm_inject_exception(vcpu, vector, errcode_valid,
2310 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
2314 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
2317 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
2320 vm_inject_fault(vcpu, IDT_PF, 1, error_code);
2323 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2326 vm_inject_nmi(struct vcpu *vcpu)
2329 vcpu->nmi_pending = 1;
2330 vcpu_notify_event(vcpu, false);
2335 vm_nmi_pending(struct vcpu *vcpu)
2337 return (vcpu->nmi_pending);
2341 vm_nmi_clear(struct vcpu *vcpu)
2343 if (vcpu->nmi_pending == 0)
2346 vcpu->nmi_pending = 0;
2347 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
2350 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2353 vm_inject_extint(struct vcpu *vcpu)
2356 vcpu->extint_pending = 1;
2357 vcpu_notify_event(vcpu, false);
2362 vm_extint_pending(struct vcpu *vcpu)
2364 return (vcpu->extint_pending);
2368 vm_extint_clear(struct vcpu *vcpu)
2370 if (vcpu->extint_pending == 0)
2373 vcpu->extint_pending = 0;
2374 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
2378 vm_get_capability(struct vcpu *vcpu, int type, int *retval)
2383 return (vmmops_getcap(vcpu->cookie, type, retval));
2387 vm_set_capability(struct vcpu *vcpu, int type, int val)
2392 return (vmmops_setcap(vcpu->cookie, type, val));
2396 vcpu_vm(struct vcpu *vcpu)
2398 return (vcpu->vm);
2402 vcpu_vcpuid(struct vcpu *vcpu)
2404 return (vcpu->vcpuid);
2407 struct vcpu *
2410 return (vm->vcpu[vcpuid]);
2414 vm_lapic(struct vcpu *vcpu)
2416 return (vcpu->vlapic);
2483 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
2487 vcpu_lock(vcpu);
2488 error = vcpu_set_state_locked(vcpu, newstate, from_idle);
2489 vcpu_unlock(vcpu);
2495 vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
2499 vcpu_lock(vcpu);
2500 state = vcpu->state;
2502 *hostcpu = vcpu->hostcpu;
2503 vcpu_unlock(vcpu);
2509 vm_activate_cpu(struct vcpu *vcpu)
2511 struct vm *vm = vcpu->vm;
2513 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2516 VMM_CTR0(vcpu, "activated");
2517 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
2522 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
2524 if (vcpu == NULL) {
2531 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2534 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2535 vcpu_notify_event(vcpu, false);
2541 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
2544 if (vcpu == NULL) {
2547 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
2550 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2556 vcpu_debugged(struct vcpu *vcpu)
2559 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
2608 vcpu_stats(struct vcpu *vcpu)
2611 return (vcpu->stats);
2615 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
2617 *state = vcpu->x2apic_state;
2623 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
2628 vcpu->x2apic_state = state;
2630 vlapic_set_x2apic_state(vcpu, state);
2636 * This function is called to ensure that a vcpu "sees" a pending event
2638 * - If the vcpu thread is sleeping then it is woken up.
2639 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2640 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2643 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2647 hostcpu = vcpu->hostcpu;
2648 if (vcpu->state == VCPU_RUNNING) {
2649 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2652 vlapic_post_intr(vcpu->vlapic, hostcpu,
2659 * If the 'vcpu' is running on 'curcpu' then it must
2661 * The pending event will be picked up when the vcpu
2666 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2667 "with hostcpu %d", vcpu->state, hostcpu));
2668 if (vcpu->state == VCPU_SLEEPING)
2669 wakeup_one(vcpu);
2674 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
2676 vcpu_lock(vcpu);
2677 vcpu_notify_event_locked(vcpu, lapic_intr);
2678 vcpu_unlock(vcpu);
2691 * XXX apic id is assumed to be numerically identical to vcpu id
2697 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
2700 struct vm *vm = vcpu->vm;
2713 * call the rendezvous handler in case this 'vcpu' is one
2716 VMM_CTR0(vcpu, "Rendezvous already in progress");
2718 error = vm_handle_rendezvous(vcpu);
2726 VMM_CTR0(vcpu, "Initiating rendezvous");
2742 return (vm_handle_rendezvous(vcpu));
2801 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
2817 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
2830 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
2881 * these are global stats, only return the values with for vCPU 0
2887 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2890 if (vcpu->vcpuid == 0) {
2891 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
2892 vmspace_resident_count(vcpu->vm->vmspace));
2897 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2900 if (vcpu->vcpuid == 0) {
2901 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
2902 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
2915 struct vcpu *vcpu;
2921 vcpu = vm->vcpu[i];
2922 if (vcpu == NULL)
2925 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
2926 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
2927 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
2928 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
2929 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
2930 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
2931 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
2932 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2940 tsc = now + vcpu->tsc_offset;
2943 vcpu->tsc_offset = tsc;
2968 struct vcpu *vcpu;
2975 vcpu = vm->vcpu[i];
2976 if (vcpu == NULL)
2979 error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
2982 "vCPU: %d; error: %d\n", __func__, i, error);
3036 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
3038 vcpu->tsc_offset = offset;
3046 struct vcpu *vcpu;
3057 vcpu = vm->vcpu[i];
3058 if (vcpu == NULL)
3061 error = vmmops_restore_tsc(vcpu->cookie,
3062 vcpu->tsc_offset - now);