Lines Matching defs:gpa

142 	vm_paddr_t	gpa;
785 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
789 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
796 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
799 vmm_mmio_free(vm->vmspace, gpa, len);
804 * Return 'true' if 'gpa' is allocated in the guest address space.
810 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
825 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
826 return (true); /* 'gpa' is sysmem or devmem */
829 if (ppt_is_mmio(vm, gpa))
830 return (true); /* 'gpa' is pci passthru mmio */
904 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
929 if ((gpa | first | last) & PAGE_MASK)
944 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
952 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
955 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
961 map->gpa = gpa;
971 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
978 if (m->gpa == gpa && m->len == len &&
989 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
998 if (mm->len == 0 || mm->gpa < *gpa)
1000 if (mmnext == NULL || mm->gpa < mmnext->gpa)
1005 *gpa = mmnext->gpa;
1030 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
1031 mm->gpa + mm->len);
1059 if (maxaddr < mm->gpa + mm->len)
1060 maxaddr = mm->gpa + mm->len;
1069 vm_paddr_t gpa, hpa;
1082 mm->gpa, mm->len, mm->flags));
1087 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1088 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
1103 ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
1104 vm, (uintmax_t)gpa, (uintmax_t)hpa));
1106 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
1116 vm_paddr_t gpa;
1132 mm->gpa, mm->len, mm->flags));
1134 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1136 vmspace_pmap(vm->vmspace), gpa))),
1137 ("vm_iommu_unmap: vm %p gpa %jx not wired",
1138 vm, (uintmax_t)gpa));
1139 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
1171 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
1187 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1194 pageoff = gpa & PAGE_MASK;
1196 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
1201 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
1203 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
1218 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1230 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1234 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1238 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
1629 vme->u.paging.gpa, ftype);
1631 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
1633 vme->u.paging.gpa);
1639 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1641 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
1642 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1655 uint64_t gla, gpa, cs_base;
1668 gpa = vme->u.inst_emul.gpa;
1675 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
1706 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1709 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1712 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1720 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
2808 uint64_t gpa;
2817 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
2820 off = gpa & PAGE_MASK;
2822 copyinfo[nused].gpa = gpa;
2830 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,