Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
61 #include <machine/cpu.h>
106 int hostcpu; /* (o) vcpu's host cpu */
109 void *cookie; /* (i) cpu-specific data */
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
129 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
130 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
131 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
132 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
163 void *cookie; /* (i) cpu-specific data */
164 void *iommu; /* (x) iommu-specific data */
169 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
188 /* The following describe the vm cpu topology */
198 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
201 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
204 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
207 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
210 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
291 "WBINVD triggers a VM-exit");
325 * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU
326 * counts as well as range of vpid values for VT-x and by the capacity
328 * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
330 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
355 vmmops_vlapic_cleanup(vcpu->vlapic);
356 vmmops_vcpu_cleanup(vcpu->cookie);
357 vcpu->cookie = NULL;
359 vmm_stat_free(vcpu->stats);
360 fpu_save_area_free(vcpu->guestfpu);
371 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
376 vcpu->state = VCPU_IDLE;
377 vcpu->hostcpu = NOCPU;
378 vcpu->vcpuid = vcpu_id;
379 vcpu->vm = vm;
380 vcpu->guestfpu = fpu_save_area_alloc();
381 vcpu->stats = vmm_stat_alloc();
382 vcpu->tsc_offset = 0;
389 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
390 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
392 vcpu->reqidle = 0;
393 vcpu->exitintinfo = 0;
394 vcpu->nmi_pending = 0;
395 vcpu->extint_pending = 0;
396 vcpu->exception_pending = 0;
397 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
398 fpu_save_area_reset(vcpu->guestfpu);
399 vmm_stat_init(vcpu->stats);
418 return (&vcpu->exitinfo);
424 return (&vcpu->exitinfo_cpuset);
487 * Something bad happened - prevent new
513 * - VT-x initialization requires smp_rendezvous() and therefore must happen
515 * - vmm device initialization requires an initialized devfs.
523 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
524 vm->iommu = NULL;
525 vm->vioapic = vioapic_init(vm);
526 vm->vhpet = vhpet_init(vm);
527 vm->vatpic = vatpic_init(vm);
528 vm->vatpit = vatpit_init(vm);
529 vm->vpmtmr = vpmtmr_init(vm);
531 vm->vrtc = vrtc_init(vm);
533 CPU_ZERO(&vm->active_cpus);
534 CPU_ZERO(&vm->debug_cpus);
535 CPU_ZERO(&vm->startup_cpus);
537 vm->suspend = 0;
538 CPU_ZERO(&vm->suspended_cpus);
541 for (int i = 0; i < vm->maxcpus; i++) {
542 if (vm->vcpu[i] != NULL)
543 vcpu_init(vm->vcpu[i]);
551 sx_xlock(&vm->vcpus_init_lock);
552 vm->dying = true;
553 sx_xunlock(&vm->vcpus_init_lock);
565 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]);
569 sx_xlock(&vm->vcpus_init_lock);
570 vcpu = vm->vcpu[vcpuid];
571 if (vcpu == NULL && !vm->dying) {
579 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
582 sx_xunlock(&vm->vcpus_init_lock);
589 sx_slock(&vm->vcpus_init_lock);
595 sx_unlock(&vm->vcpus_init_lock);
599 * The default CPU topology is a single thread per package.
626 strcpy(vm->name, name);
627 vm->vmspace = vmspace;
628 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
629 sx_init(&vm->mem_segs_lock, "vm mem_segs");
630 sx_init(&vm->vcpus_init_lock, "vm vcpus");
631 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
634 vm->sockets = 1;
635 vm->cores = cores_per_package; /* XXX backwards compatibility */
636 vm->threads = threads_per_core; /* XXX backwards compatibility */
637 vm->maxcpus = vm_maxcpu;
649 *sockets = vm->sockets;
650 *cores = vm->cores;
651 *threads = vm->threads;
652 *maxcpus = vm->maxcpus;
658 return (vm->maxcpus);
666 if ((sockets * cores * threads) > vm->maxcpus)
668 vm->sockets = sockets;
669 vm->cores = cores;
670 vm->threads = threads;
685 if (vm->iommu != NULL)
686 iommu_destroy_domain(vm->iommu);
689 vrtc_cleanup(vm->vrtc);
691 vrtc_reset(vm->vrtc);
692 vpmtmr_cleanup(vm->vpmtmr);
693 vatpit_cleanup(vm->vatpit);
694 vhpet_cleanup(vm->vhpet);
695 vatpic_cleanup(vm->vatpic);
696 vioapic_cleanup(vm->vioapic);
698 for (i = 0; i < vm->maxcpus; i++) {
699 if (vm->vcpu[i] != NULL)
700 vcpu_cleanup(vm->vcpu[i], destroy);
703 vmmops_cleanup(vm->cookie);
714 mm = &vm->mem_maps[i];
724 vmmops_vmspace_free(vm->vmspace);
725 vm->vmspace = NULL;
727 free(vm->vcpu, M_VM);
728 sx_destroy(&vm->vcpus_init_lock);
729 sx_destroy(&vm->mem_segs_lock);
730 mtx_destroy(&vm->rendezvous_mtx);
749 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
763 return (vm->name);
769 sx_slock(&vm->mem_segs_lock);
775 sx_xlock(&vm->mem_segs_lock);
781 sx_unlock(&vm->mem_segs_lock);
789 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
799 vmm_mmio_free(vm->vmspace, gpa, len);
807 * an implicit lock on 'vm->mem_maps[]'.
812 struct vm *vm = vcpu->vm;
824 mm = &vm->mem_maps[i];
825 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
841 sx_assert(&vm->mem_segs_lock, SX_XLOCKED);
849 seg = &vm->mem_segs[ident];
850 if (seg->object != NULL) {
851 if (seg->len == len && seg->sysmem == sysmem)
861 seg->len = len;
862 seg->object = obj;
863 seg->sysmem = sysmem;
873 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
878 seg = &vm->mem_segs[ident];
880 *len = seg->len;
882 *sysmem = seg->sysmem;
884 *objptr = seg->object;
896 seg = &vm->mem_segs[ident];
897 if (seg->object != NULL) {
898 vm_object_deallocate(seg->object);
921 seg = &vm->mem_segs[segid];
922 if (seg->object == NULL)
926 if (first < 0 || first >= last || last > seg->len)
934 m = &vm->mem_maps[i];
935 if (m->len == 0) {
944 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
949 vm_object_reference(seg->object);
952 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
955 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
961 map->gpa = gpa;
962 map->len = len;
963 map->segoff = first;
964 map->segid = segid;
965 map->prot = prot;
966 map->flags = flags;
977 m = &vm->mem_maps[i];
978 if (m->gpa == gpa && m->len == len &&
979 (m->flags & VM_MEMMAP_F_IOMMU) == 0) {
997 mm = &vm->mem_maps[i];
998 if (mm->len == 0 || mm->gpa < *gpa)
1000 if (mmnext == NULL || mm->gpa < mmnext->gpa)
1005 *gpa = mmnext->gpa;
1007 *segid = mmnext->segid;
1009 *segoff = mmnext->segoff;
1011 *len = mmnext->len;
1013 *prot = mmnext->prot;
1015 *flags = mmnext->flags;
1028 mm = &vm->mem_maps[ident];
1029 if (mm->len) {
1030 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
1031 mm->gpa + mm->len);
1042 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
1057 mm = &vm->mem_maps[i];
1059 if (maxaddr < mm->gpa + mm->len)
1060 maxaddr = mm->gpa + mm->len;
1073 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1076 mm = &vm->mem_maps[i];
1080 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
1082 mm->gpa, mm->len, mm->flags));
1083 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
1085 mm->flags |= VM_MEMMAP_F_IOMMU;
1087 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1088 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
1093 * Because we are in pass-through mode, the
1106 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
1120 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1123 mm = &vm->mem_maps[i];
1127 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
1129 mm->flags &= ~VM_MEMMAP_F_IOMMU;
1130 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
1132 mm->gpa, mm->len, mm->flags));
1134 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1136 vmspace_pmap(vm->vmspace), gpa))),
1139 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
1147 iommu_invalidate_tlb(vm->iommu);
1173 KASSERT(vm->iommu == NULL,
1176 vm->iommu = iommu_create_domain(maxaddr);
1177 if (vm->iommu == NULL)
1195 if (len > PAGE_SIZE - pageoff)
1200 mm = &vm->mem_maps[i];
1201 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
1202 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
1230 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1237 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1256 return (vmmops_getreg(vcpu->cookie, reg, retval));
1267 error = vmmops_setreg(vcpu->cookie, reg, val);
1273 vcpu->nextrip = val;
1316 return (vmmops_getdesc(vcpu->cookie, reg, desc));
1326 return (vmmops_setdesc(vcpu->cookie, reg, desc));
1338 fpurestore(vcpu->guestfpu);
1342 load_xcr(0, vcpu->guest_xcr0);
1360 vcpu->guest_xcr0 = rxcr(0);
1366 fpusave(vcpu->guestfpu);
1386 while (vcpu->state != VCPU_IDLE) {
1387 vcpu->reqidle = 1;
1390 "idle requested", vcpu_state2str(vcpu->state));
1391 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1394 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1398 if (vcpu->state == VCPU_RUNNING) {
1399 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1400 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1402 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1403 "vcpu that is not running", vcpu->hostcpu));
1408 * IDLE -> FROZEN -> IDLE
1409 * FROZEN -> RUNNING -> FROZEN
1410 * FROZEN -> SLEEPING -> FROZEN
1412 switch (vcpu->state) {
1430 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1432 vcpu->state = newstate;
1434 vcpu->hostcpu = curcpu;
1436 vcpu->hostcpu = NOCPU;
1439 wakeup(&vcpu->state);
1465 struct vm *vm = vcpu->vm;
1470 vcpuid = vcpu->vcpuid;
1472 mtx_lock(&vm->rendezvous_mtx);
1473 while (vm->rendezvous_func != NULL) {
1475 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
1477 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1478 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1480 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
1481 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1483 if (CPU_CMP(&vm->rendezvous_req_cpus,
1484 &vm->rendezvous_done_cpus) == 0) {
1486 CPU_ZERO(&vm->rendezvous_req_cpus);
1487 vm->rendezvous_func = NULL;
1488 wakeup(&vm->rendezvous_func);
1492 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1495 mtx_unlock(&vm->rendezvous_mtx);
1499 mtx_lock(&vm->rendezvous_mtx);
1502 mtx_unlock(&vm->rendezvous_mtx);
1512 struct vm *vm = vcpu->vm;
1517 vcpuid = vcpu->vcpuid;
1523 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1536 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1542 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1565 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1567 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1578 * XXX msleep_spin() cannot be interrupted by signals so
1579 * wake up periodically to check pending signals.
1581 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1583 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
1590 &vm->halted_cpus);
1599 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1612 struct vm *vm = vcpu->vm;
1617 vme = &vcpu->exitinfo;
1619 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1620 __func__, vme->inst_length));
1622 ftype = vme->u.paging.fault_type;
1628 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1629 vme->u.paging.gpa, ftype);
1633 vme->u.paging.gpa);
1638 map = &vm->vmspace->vm_map;
1639 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1642 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1662 vme = &vcpu->exitinfo;
1664 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1665 __func__, vme->inst_length));
1667 gla = vme->u.inst_emul.gla;
1668 gpa = vme->u.inst_emul.gpa;
1669 cs_base = vme->u.inst_emul.cs_base;
1670 cs_d = vme->u.inst_emul.cs_d;
1671 vie = &vme->u.inst_emul.vie;
1672 paging = &vme->u.inst_emul.paging;
1673 cpu_mode = paging->cpu_mode;
1678 if (vie->num_valid == 0) {
1679 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
1692 vme->rip + cs_base);
1700 vme->inst_length = vie->num_processed;
1701 vcpu->nextrip += vie->num_processed;
1703 vcpu->nextrip);
1705 /* return to userland unless this is an in-kernel emulated device */
1729 struct vm *vm = vcpu->vm;
1736 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
1747 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1752 if (vm->rendezvous_func == NULL) {
1755 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1774 for (i = 0; i < vm->maxcpus; i++) {
1775 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1788 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1789 vcpu->reqidle = 0;
1804 if (!vme->u.dbg.pushf_intercept || vme->u.dbg.tf_shadow_val != 0) {
1809 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t),
1837 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1839 vm->suspend, how);
1848 for (i = 0; i < vm->maxcpus; i++) {
1849 if (CPU_ISSET(i, &vm->active_cpus))
1859 struct vm *vm = vcpu->vm;
1862 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1863 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1866 vmexit->rip = rip;
1867 vmexit->inst_length = 0;
1868 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1869 vmexit->u.suspended.how = vm->suspend;
1878 vmexit->rip = rip;
1879 vmexit->inst_length = 0;
1880 vmexit->exitcode = VM_EXITCODE_DEBUG;
1889 vmexit->rip = rip;
1890 vmexit->inst_length = 0;
1891 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1901 vmexit->rip = rip;
1902 vmexit->inst_length = 0;
1903 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1913 vmexit->rip = rip;
1914 vmexit->inst_length = 0;
1915 vmexit->exitcode = VM_EXITCODE_BOGUS;
1922 struct vm *vm = vcpu->vm;
1931 vcpuid = vcpu->vcpuid;
1933 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1936 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1939 pmap = vmspace_pmap(vm->vmspace);
1940 vme = &vcpu->exitinfo;
1941 evinfo.rptr = &vm->rendezvous_req_cpus;
1942 evinfo.sptr = &vm->suspend;
1943 evinfo.iptr = &vcpu->reqidle;
1947 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1958 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1963 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1969 vcpu->nextrip = vme->rip + vme->inst_length;
1970 switch (vme->exitcode) {
1978 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector);
1984 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
2015 if (error == 0 && vme->exitcode == VM_EXITCODE_IPI)
2022 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
2042 vcpu->exitinfo.inst_length = 0;
2044 "setting inst_length to zero", vcpu->exitinfo.rip);
2055 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
2056 vcpu->nextrip = rip;
2081 vcpu->exitintinfo = info;
2102 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
2112 * SVM and VT-x use identical type values to represent NMI,
2115 * SVM uses type '3' for all exceptions. VT-x uses type '3'
2150 * If an exception occurs while attempting to call the double-fault
2158 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
2164 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
2186 if (vcpu->exception_pending) {
2187 info = vcpu->exc_vector & 0xff;
2189 if (vcpu->exc_errcode_valid) {
2191 info |= (uint64_t)vcpu->exc_errcode << 32;
2203 info1 = vcpu->exitintinfo;
2204 vcpu->exitintinfo = 0;
2207 if (vcpu->exception_pending) {
2209 vcpu->exception_pending = 0;
2211 vcpu->exc_vector, info2);
2237 *info1 = vcpu->exitintinfo;
2260 if (vcpu->exception_pending) {
2262 "pending exception %d", vector, vcpu->exc_vector);
2289 vcpu->exception_pending = 1;
2290 vcpu->exc_vector = vector;
2291 vcpu->exc_errcode = errcode;
2292 vcpu->exc_errcode_valid = errcode_valid;
2329 vcpu->nmi_pending = 1;
2337 return (vcpu->nmi_pending);
2343 if (vcpu->nmi_pending == 0)
2346 vcpu->nmi_pending = 0;
2356 vcpu->extint_pending = 1;
2364 return (vcpu->extint_pending);
2370 if (vcpu->extint_pending == 0)
2373 vcpu->extint_pending = 0;
2383 return (vmmops_getcap(vcpu->cookie, type, retval));
2392 return (vmmops_setcap(vcpu->cookie, type, val));
2398 return (vcpu->vm);
2404 return (vcpu->vcpuid);
2410 return (vm->vcpu[vcpuid]);
2416 return (vcpu->vlapic);
2423 return (vm->vioapic);
2430 return (vm->vhpet);
2447 * names instead of a single one - yuck!
2479 return (vm->iommu);
2500 state = vcpu->state;
2502 *hostcpu = vcpu->hostcpu;
2511 struct vm *vm = vcpu->vm;
2513 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2517 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
2525 vm->debug_cpus = vm->active_cpus;
2526 for (int i = 0; i < vm->maxcpus; i++) {
2527 if (CPU_ISSET(i, &vm->active_cpus))
2531 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2534 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2545 CPU_ZERO(&vm->debug_cpus);
2547 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
2550 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2559 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
2566 return (vm->active_cpus);
2573 return (vm->debug_cpus);
2580 return (vm->suspended_cpus);
2592 mtx_lock(&vm->rendezvous_mtx);
2593 CPU_AND(&set, &vm->startup_cpus, tostart);
2594 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set);
2595 mtx_unlock(&vm->rendezvous_mtx);
2602 mtx_lock(&vm->rendezvous_mtx);
2603 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting);
2604 mtx_unlock(&vm->rendezvous_mtx);
2611 return (vcpu->stats);
2617 *state = vcpu->x2apic_state;
2628 vcpu->x2apic_state = state;
2638 * - If the vcpu thread is sleeping then it is woken up.
2639 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2647 hostcpu = vcpu->hostcpu;
2648 if (vcpu->state == VCPU_RUNNING) {
2652 vlapic_post_intr(vcpu->vlapic, hostcpu,
2667 "with hostcpu %d", vcpu->state, hostcpu));
2668 if (vcpu->state == VCPU_SLEEPING)
2684 return (vm->vmspace);
2700 struct vm *vm = vcpu->vm;
2709 mtx_lock(&vm->rendezvous_mtx);
2710 if (vm->rendezvous_func != NULL) {
2717 mtx_unlock(&vm->rendezvous_mtx);
2723 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2727 vm->rendezvous_req_cpus = dest;
2728 CPU_ZERO(&vm->rendezvous_done_cpus);
2729 vm->rendezvous_arg = arg;
2730 vm->rendezvous_func = func;
2731 mtx_unlock(&vm->rendezvous_mtx);
2734 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2737 for (i = 0; i < vm->maxcpus; i++) {
2748 return (vm->vatpic);
2754 return (vm->vatpit);
2761 return (vm->vpmtmr);
2768 return (vm->vrtc);
2821 n = min(remaining, PAGE_SIZE - off);
2824 remaining -= n;
2857 len -= copyinfo[idx].len;
2873 len -= copyinfo[idx].len;
2880 * Return the amount of in-use and wired memory for the VM. Since
2890 if (vcpu->vcpuid == 0) {
2892 vmspace_resident_count(vcpu->vm->vmspace));
2900 if (vcpu->vcpuid == 0) {
2902 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
2921 vcpu = vm->vcpu[i];
2925 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
2926 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
2927 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
2928 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
2929 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
2930 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
2931 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
2932 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2940 tsc = now + vcpu->tsc_offset;
2942 if (meta->op == VM_SNAPSHOT_RESTORE)
2943 vcpu->tsc_offset = tsc;
2959 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done);
2975 vcpu = vm->vcpu[i];
2979 error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
2992 * Save kernel-side structures to user-space for snapshotting.
2999 switch (meta->dev_req) {
3029 __func__, meta->dev_req);
3038 vcpu->tsc_offset = offset;
3057 vcpu = vm->vcpu[i];
3061 error = vmmops_restore_tsc(vcpu->cookie,
3062 vcpu->tsc_offset - now);