1366f6083SPeter Grehan /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 35e53a4f9SPedro F. Giffuni * 4366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc. 5366f6083SPeter Grehan * All rights reserved. 6366f6083SPeter Grehan * 7366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without 8366f6083SPeter Grehan * modification, are permitted provided that the following conditions 9366f6083SPeter Grehan * are met: 10366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright 11366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer. 12366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 13366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the 14366f6083SPeter Grehan * documentation and/or other materials provided with the distribution. 15366f6083SPeter Grehan * 16366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26366f6083SPeter Grehan * SUCH DAMAGE. 27366f6083SPeter Grehan */ 28366f6083SPeter Grehan 2995ebc360SNeel Natu #include <sys/param.h> 303e9b4532SMark Johnston #include <sys/capsicum.h> 31366f6083SPeter Grehan #include <sys/sysctl.h> 32366f6083SPeter Grehan #include <sys/ioctl.h> 33366f6083SPeter Grehan #include <sys/mman.h> 34967264cfSMark Johnston #include <sys/linker.h> 35a71dc724SMarcelo Araujo #include <sys/module.h> 366303b65dSNeel Natu #include <sys/_iovec.h> 3795ebc360SNeel Natu #include <sys/cpuset.h> 38366f6083SPeter Grehan 393e9b4532SMark Johnston #include <capsicum_helpers.h> 40*99127fd1SMark Johnston #include <err.h> 419c4d5478SNeel Natu #include <errno.h> 42483d953aSJohn Baldwin #include <stdbool.h> 43366f6083SPeter Grehan #include <stdio.h> 44366f6083SPeter Grehan #include <stdlib.h> 45366f6083SPeter Grehan #include <assert.h> 46366f6083SPeter Grehan #include <string.h> 47366f6083SPeter Grehan #include <fcntl.h> 48366f6083SPeter Grehan #include <unistd.h> 49366f6083SPeter Grehan 50200758f1SNeel Natu #include <libutil.h> 51200758f1SNeel Natu 52483d953aSJohn Baldwin #include <vm/vm.h> 53366f6083SPeter Grehan #include <machine/vmm.h> 5456a26fc1SMark Johnston #ifdef WITH_VMMAPI_SNAPSHOT 55483d953aSJohn Baldwin #include <machine/vmm_snapshot.h> 5656a26fc1SMark Johnston #endif 57366f6083SPeter Grehan 58*99127fd1SMark Johnston #include <dev/vmm/vmm_dev.h> 59*99127fd1SMark Johnston 60366f6083SPeter Grehan #include "vmmapi.h" 617d9ef309SJohn Baldwin #include "internal.h" 62366f6083SPeter Grehan 63200758f1SNeel Natu #define MB (1024 * 1024UL) 64b060ba50SNeel Natu #define GB (1024 * 1024 * 1024UL) 65b060ba50SNeel Natu 665ec6c300SMark Johnston #ifdef __amd64__ 677e0fa794SMark Johnston #define VM_LOWMEM_LIMIT (3 * GB) 685ec6c300SMark Johnston #else 695ec6c300SMark Johnston #define VM_LOWMEM_LIMIT 0 705ec6c300SMark Johnston #endif 717e0fa794SMark Johnston #define VM_HIGHMEM_BASE (4 * GB) 727e0fa794SMark Johnston 739b1aa8d6SNeel Natu /* 749b1aa8d6SNeel Natu * Size of the guard region before and after the virtual address space 759b1aa8d6SNeel Natu * mapping the guest physical memory. This must be a multiple of the 769b1aa8d6SNeel Natu * superpage size for performance reasons. 779b1aa8d6SNeel Natu */ 789b1aa8d6SNeel Natu #define VM_MMAP_GUARD_SIZE (4 * MB) 799b1aa8d6SNeel Natu 809b1aa8d6SNeel Natu #define PROT_RW (PROT_READ | PROT_WRITE) 819b1aa8d6SNeel Natu #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC) 829b1aa8d6SNeel Natu 83366f6083SPeter Grehan static int 84366f6083SPeter Grehan vm_device_open(const char *name) 85366f6083SPeter Grehan { 86*99127fd1SMark Johnston char devpath[PATH_MAX]; 87366f6083SPeter Grehan 88*99127fd1SMark Johnston assert(strlen(name) <= VM_MAX_NAMELEN); 89*99127fd1SMark Johnston (void)snprintf(devpath, sizeof(devpath), "/dev/vmm/%s", name); 90*99127fd1SMark Johnston return (open(devpath, O_RDWR)); 91*99127fd1SMark Johnston } 92366f6083SPeter Grehan 93*99127fd1SMark Johnston static int 94*99127fd1SMark Johnston vm_ctl_create(const char *name, int ctlfd) 95*99127fd1SMark Johnston { 96*99127fd1SMark Johnston struct vmmctl_vm_create vmc; 97366f6083SPeter Grehan 98*99127fd1SMark Johnston memset(&vmc, 0, sizeof(vmc)); 99*99127fd1SMark Johnston if (strlcpy(vmc.name, name, sizeof(vmc.name)) >= sizeof(vmc.name)) { 100*99127fd1SMark Johnston errno = ENAMETOOLONG; 101*99127fd1SMark Johnston return (-1); 102*99127fd1SMark Johnston } 103*99127fd1SMark Johnston return (ioctl(ctlfd, VMMCTL_VM_CREATE, &vmc)); 104366f6083SPeter Grehan } 105366f6083SPeter Grehan 106366f6083SPeter Grehan int 107366f6083SPeter Grehan vm_create(const char *name) 108366f6083SPeter Grehan { 109*99127fd1SMark Johnston int error, fd; 110*99127fd1SMark Johnston 111a71dc724SMarcelo Araujo /* Try to load vmm(4) module before creating a guest. */ 112*99127fd1SMark Johnston if (modfind("vmm") < 0) { 113*99127fd1SMark Johnston error = kldload("vmm"); 114*99127fd1SMark Johnston if (error != 0) 115*99127fd1SMark Johnston return (-1); 116*99127fd1SMark Johnston } 117*99127fd1SMark Johnston 118*99127fd1SMark Johnston fd = open("/dev/vmmctl", O_RDWR, 0); 119*99127fd1SMark Johnston if (fd < 0) 120*99127fd1SMark Johnston return (fd); 121*99127fd1SMark Johnston error = vm_ctl_create(name, fd); 122*99127fd1SMark Johnston if (error != 0) { 123*99127fd1SMark Johnston error = errno; 124*99127fd1SMark Johnston (void)close(fd); 125*99127fd1SMark Johnston errno = error; 126*99127fd1SMark Johnston return (-1); 127*99127fd1SMark Johnston } 128*99127fd1SMark Johnston (void)close(fd); 129*99127fd1SMark Johnston return (0); 130366f6083SPeter Grehan } 131366f6083SPeter Grehan 132366f6083SPeter Grehan struct vmctx * 133366f6083SPeter Grehan vm_open(const char *name) 134366f6083SPeter Grehan { 135*99127fd1SMark Johnston return (vm_openf(name, 0)); 136*99127fd1SMark Johnston } 137*99127fd1SMark Johnston 138*99127fd1SMark Johnston struct vmctx * 139*99127fd1SMark Johnston vm_openf(const char *name, int flags) 140*99127fd1SMark Johnston { 141366f6083SPeter Grehan struct vmctx *vm; 142a7f81b48SRobert Wing int saved_errno; 143*99127fd1SMark Johnston bool created; 144*99127fd1SMark Johnston 145*99127fd1SMark Johnston created = false; 146366f6083SPeter Grehan 147366f6083SPeter Grehan vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 148366f6083SPeter Grehan assert(vm != NULL); 149366f6083SPeter Grehan 150*99127fd1SMark Johnston vm->fd = vm->ctlfd = -1; 1510dd10c00SNeel Natu vm->memflags = 0; 152366f6083SPeter Grehan vm->name = (char *)(vm + 1); 153366f6083SPeter Grehan strcpy(vm->name, name); 1547e0fa794SMark Johnston memset(vm->memsegs, 0, sizeof(vm->memsegs)); 155366f6083SPeter Grehan 156*99127fd1SMark Johnston if ((vm->ctlfd = open("/dev/vmmctl", O_RDWR, 0)) < 0) 157*99127fd1SMark Johnston goto err; 158*99127fd1SMark Johnston 159*99127fd1SMark Johnston vm->fd = vm_device_open(vm->name); 160*99127fd1SMark Johnston if (vm->fd < 0 && errno == ENOENT) { 161*99127fd1SMark Johnston if (flags & VMMAPI_OPEN_CREATE) { 162*99127fd1SMark Johnston if (vm_ctl_create(vm->name, vm->ctlfd) != 0) 163*99127fd1SMark Johnston goto err; 164*99127fd1SMark Johnston vm->fd = vm_device_open(vm->name); 165*99127fd1SMark Johnston created = true; 166*99127fd1SMark Johnston } 167*99127fd1SMark Johnston } 168*99127fd1SMark Johnston if (vm->fd < 0) 169*99127fd1SMark Johnston goto err; 170*99127fd1SMark Johnston 171*99127fd1SMark Johnston if (!created && (flags & VMMAPI_OPEN_REINIT) != 0 && vm_reinit(vm) != 0) 172366f6083SPeter Grehan goto err; 173366f6083SPeter Grehan 174366f6083SPeter Grehan return (vm); 175366f6083SPeter Grehan err: 176a7f81b48SRobert Wing saved_errno = errno; 177*99127fd1SMark Johnston if (created) 178*99127fd1SMark Johnston vm_destroy(vm); 179*99127fd1SMark Johnston else 180*99127fd1SMark Johnston vm_close(vm); 181a7f81b48SRobert Wing errno = saved_errno; 182366f6083SPeter Grehan return (NULL); 183366f6083SPeter Grehan } 184366f6083SPeter Grehan 185366f6083SPeter Grehan void 186f0880ab7SVitaliy Gusev vm_close(struct vmctx *vm) 187f0880ab7SVitaliy Gusev { 188f0880ab7SVitaliy Gusev assert(vm != NULL); 189f0880ab7SVitaliy Gusev 190*99127fd1SMark Johnston if (vm->fd >= 0) 191*99127fd1SMark Johnston (void)close(vm->fd); 192*99127fd1SMark Johnston if (vm->ctlfd >= 0) 193*99127fd1SMark Johnston (void)close(vm->ctlfd); 194f0880ab7SVitaliy Gusev free(vm); 195f0880ab7SVitaliy Gusev } 196f0880ab7SVitaliy Gusev 197f0880ab7SVitaliy Gusev void 198366f6083SPeter Grehan vm_destroy(struct vmctx *vm) 199366f6083SPeter Grehan { 200*99127fd1SMark Johnston struct vmmctl_vm_destroy vmd; 201366f6083SPeter Grehan 202*99127fd1SMark Johnston memset(&vmd, 0, sizeof(vmd)); 203*99127fd1SMark Johnston (void)strlcpy(vmd.name, vm->name, sizeof(vmd.name)); 204*99127fd1SMark Johnston if (ioctl(vm->ctlfd, VMMCTL_VM_DESTROY, &vmd) != 0) 205*99127fd1SMark Johnston warn("ioctl(VMMCTL_VM_DESTROY)"); 206f7d51510SNeel Natu 207*99127fd1SMark Johnston vm_close(vm); 208366f6083SPeter Grehan } 209366f6083SPeter Grehan 2107d9ef309SJohn Baldwin struct vcpu * 2117d9ef309SJohn Baldwin vm_vcpu_open(struct vmctx *ctx, int vcpuid) 2127d9ef309SJohn Baldwin { 2137d9ef309SJohn Baldwin struct vcpu *vcpu; 2147d9ef309SJohn Baldwin 2157d9ef309SJohn Baldwin vcpu = malloc(sizeof(*vcpu)); 2167d9ef309SJohn Baldwin vcpu->ctx = ctx; 2177d9ef309SJohn Baldwin vcpu->vcpuid = vcpuid; 2187d9ef309SJohn Baldwin return (vcpu); 2197d9ef309SJohn Baldwin } 2207d9ef309SJohn Baldwin 2217d9ef309SJohn Baldwin void 2227d9ef309SJohn Baldwin vm_vcpu_close(struct vcpu *vcpu) 2237d9ef309SJohn Baldwin { 2247d9ef309SJohn Baldwin free(vcpu); 2257d9ef309SJohn Baldwin } 2267d9ef309SJohn Baldwin 2277d9ef309SJohn Baldwin int 2287d9ef309SJohn Baldwin vcpu_id(struct vcpu *vcpu) 2297d9ef309SJohn Baldwin { 2307d9ef309SJohn Baldwin return (vcpu->vcpuid); 2317d9ef309SJohn Baldwin } 2327d9ef309SJohn Baldwin 233366f6083SPeter Grehan int 23445cd18ecSMark Johnston vm_parse_memsize(const char *opt, size_t *ret_memsize) 235200758f1SNeel Natu { 236200758f1SNeel Natu char *endptr; 237200758f1SNeel Natu size_t optval; 238200758f1SNeel Natu int error; 239200758f1SNeel Natu 24045cd18ecSMark Johnston optval = strtoul(opt, &endptr, 0); 24145cd18ecSMark Johnston if (*opt != '\0' && *endptr == '\0') { 242200758f1SNeel Natu /* 243200758f1SNeel Natu * For the sake of backward compatibility if the memory size 244200758f1SNeel Natu * specified on the command line is less than a megabyte then 245200758f1SNeel Natu * it is interpreted as being in units of MB. 246200758f1SNeel Natu */ 247200758f1SNeel Natu if (optval < MB) 248200758f1SNeel Natu optval *= MB; 249200758f1SNeel Natu *ret_memsize = optval; 250200758f1SNeel Natu error = 0; 251200758f1SNeel Natu } else 25245cd18ecSMark Johnston error = expand_number(opt, ret_memsize); 253200758f1SNeel Natu 254200758f1SNeel Natu return (error); 255200758f1SNeel Natu } 256200758f1SNeel Natu 257b060ba50SNeel Natu uint32_t 2587e0fa794SMark Johnston vm_get_lowmem_limit(struct vmctx *ctx __unused) 259b060ba50SNeel Natu { 260b060ba50SNeel Natu 2617e0fa794SMark Johnston return (VM_LOWMEM_LIMIT); 262b060ba50SNeel Natu } 263b060ba50SNeel Natu 2640dd10c00SNeel Natu void 2650dd10c00SNeel Natu vm_set_memflags(struct vmctx *ctx, int flags) 2660dd10c00SNeel Natu { 2670dd10c00SNeel Natu 2680dd10c00SNeel Natu ctx->memflags = flags; 2690dd10c00SNeel Natu } 2700dd10c00SNeel Natu 2719b1aa8d6SNeel Natu int 2729b1aa8d6SNeel Natu vm_get_memflags(struct vmctx *ctx) 273366f6083SPeter Grehan { 2749b1aa8d6SNeel Natu 2759b1aa8d6SNeel Natu return (ctx->memflags); 2769b1aa8d6SNeel Natu } 277366f6083SPeter Grehan 278366f6083SPeter Grehan /* 2799b1aa8d6SNeel Natu * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len). 280366f6083SPeter Grehan */ 2819b1aa8d6SNeel Natu int 2829b1aa8d6SNeel Natu vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off, 2839b1aa8d6SNeel Natu size_t len, int prot) 2849b1aa8d6SNeel Natu { 2859b1aa8d6SNeel Natu struct vm_memmap memmap; 2869b1aa8d6SNeel Natu int error, flags; 2879b1aa8d6SNeel Natu 2889b1aa8d6SNeel Natu memmap.gpa = gpa; 2899b1aa8d6SNeel Natu memmap.segid = segid; 2909b1aa8d6SNeel Natu memmap.segoff = off; 2919b1aa8d6SNeel Natu memmap.len = len; 2929b1aa8d6SNeel Natu memmap.prot = prot; 2939b1aa8d6SNeel Natu memmap.flags = 0; 2949b1aa8d6SNeel Natu 2959b1aa8d6SNeel Natu if (ctx->memflags & VM_MEM_F_WIRED) 2969b1aa8d6SNeel Natu memmap.flags |= VM_MEMMAP_F_WIRED; 2979b1aa8d6SNeel Natu 2989b1aa8d6SNeel Natu /* 2999b1aa8d6SNeel Natu * If this mapping already exists then don't create it again. This 3009b1aa8d6SNeel Natu * is the common case for SYSMEM mappings created by bhyveload(8). 3019b1aa8d6SNeel Natu */ 3029b1aa8d6SNeel Natu error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags); 3039b1aa8d6SNeel Natu if (error == 0 && gpa == memmap.gpa) { 3049b1aa8d6SNeel Natu if (segid != memmap.segid || off != memmap.segoff || 3059b1aa8d6SNeel Natu prot != memmap.prot || flags != memmap.flags) { 3069b1aa8d6SNeel Natu errno = EEXIST; 3079b1aa8d6SNeel Natu return (-1); 3089b1aa8d6SNeel Natu } else { 3099b1aa8d6SNeel Natu return (0); 3109b1aa8d6SNeel Natu } 3119b1aa8d6SNeel Natu } 3129b1aa8d6SNeel Natu 3139b1aa8d6SNeel Natu error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap); 3149b1aa8d6SNeel Natu return (error); 3159b1aa8d6SNeel Natu } 3169b1aa8d6SNeel Natu 3179b1aa8d6SNeel Natu int 318483d953aSJohn Baldwin vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr, 319483d953aSJohn Baldwin size_t *lowmem_size, size_t *highmem_size) 320483d953aSJohn Baldwin { 321483d953aSJohn Baldwin 322483d953aSJohn Baldwin *guest_baseaddr = ctx->baseaddr; 3237e0fa794SMark Johnston *lowmem_size = ctx->memsegs[VM_MEMSEG_LOW].size; 3247e0fa794SMark Johnston *highmem_size = ctx->memsegs[VM_MEMSEG_HIGH].size; 325483d953aSJohn Baldwin return (0); 326483d953aSJohn Baldwin } 327483d953aSJohn Baldwin 328483d953aSJohn Baldwin int 329f8a6ec2dSD Scott Phillips vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) 330f8a6ec2dSD Scott Phillips { 331f8a6ec2dSD Scott Phillips struct vm_munmap munmap; 332f8a6ec2dSD Scott Phillips int error; 333f8a6ec2dSD Scott Phillips 334f8a6ec2dSD Scott Phillips munmap.gpa = gpa; 335f8a6ec2dSD Scott Phillips munmap.len = len; 336f8a6ec2dSD Scott Phillips 337f8a6ec2dSD Scott Phillips error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); 338f8a6ec2dSD Scott Phillips return (error); 339f8a6ec2dSD Scott Phillips } 340f8a6ec2dSD Scott Phillips 341f8a6ec2dSD Scott Phillips int 3429b1aa8d6SNeel Natu vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, 3439b1aa8d6SNeel Natu vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 3449b1aa8d6SNeel Natu { 3459b1aa8d6SNeel Natu struct vm_memmap memmap; 3469b1aa8d6SNeel Natu int error; 3479b1aa8d6SNeel Natu 3489b1aa8d6SNeel Natu bzero(&memmap, sizeof(struct vm_memmap)); 3499b1aa8d6SNeel Natu memmap.gpa = *gpa; 3509b1aa8d6SNeel Natu error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap); 3519b1aa8d6SNeel Natu if (error == 0) { 3529b1aa8d6SNeel Natu *gpa = memmap.gpa; 3539b1aa8d6SNeel Natu *segid = memmap.segid; 3549b1aa8d6SNeel Natu *segoff = memmap.segoff; 3559b1aa8d6SNeel Natu *len = memmap.len; 3569b1aa8d6SNeel Natu *prot = memmap.prot; 3579b1aa8d6SNeel Natu *flags = memmap.flags; 358366f6083SPeter Grehan } 359366f6083SPeter Grehan return (error); 360366f6083SPeter Grehan } 361366f6083SPeter Grehan 3629b1aa8d6SNeel Natu /* 3639b1aa8d6SNeel Natu * Return 0 if the segments are identical and non-zero otherwise. 3649b1aa8d6SNeel Natu * 3659b1aa8d6SNeel Natu * This is slightly complicated by the fact that only device memory segments 3669b1aa8d6SNeel Natu * are named. 3679b1aa8d6SNeel Natu */ 3689b1aa8d6SNeel Natu static int 3699b1aa8d6SNeel Natu cmpseg(size_t len, const char *str, size_t len2, const char *str2) 3709b1aa8d6SNeel Natu { 3719b1aa8d6SNeel Natu 3729b1aa8d6SNeel Natu if (len == len2) { 3739b1aa8d6SNeel Natu if ((!str && !str2) || (str && str2 && !strcmp(str, str2))) 3749b1aa8d6SNeel Natu return (0); 3759b1aa8d6SNeel Natu } 3769b1aa8d6SNeel Natu return (-1); 3779b1aa8d6SNeel Natu } 3789b1aa8d6SNeel Natu 3799b1aa8d6SNeel Natu static int 3809b1aa8d6SNeel Natu vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name) 3819b1aa8d6SNeel Natu { 3829b1aa8d6SNeel Natu struct vm_memseg memseg; 3839b1aa8d6SNeel Natu size_t n; 3849b1aa8d6SNeel Natu int error; 3859b1aa8d6SNeel Natu 3869b1aa8d6SNeel Natu /* 3879b1aa8d6SNeel Natu * If the memory segment has already been created then just return. 3889b1aa8d6SNeel Natu * This is the usual case for the SYSMEM segment created by userspace 3899b1aa8d6SNeel Natu * loaders like bhyveload(8). 3909b1aa8d6SNeel Natu */ 3919b1aa8d6SNeel Natu error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name, 3929b1aa8d6SNeel Natu sizeof(memseg.name)); 3939b1aa8d6SNeel Natu if (error) 3949b1aa8d6SNeel Natu return (error); 3959b1aa8d6SNeel Natu 3969b1aa8d6SNeel Natu if (memseg.len != 0) { 3979b1aa8d6SNeel Natu if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) { 3989b1aa8d6SNeel Natu errno = EINVAL; 3999b1aa8d6SNeel Natu return (-1); 4009b1aa8d6SNeel Natu } else { 4019b1aa8d6SNeel Natu return (0); 4029b1aa8d6SNeel Natu } 4039b1aa8d6SNeel Natu } 4049b1aa8d6SNeel Natu 4059b1aa8d6SNeel Natu bzero(&memseg, sizeof(struct vm_memseg)); 4069b1aa8d6SNeel Natu memseg.segid = segid; 4079b1aa8d6SNeel Natu memseg.len = len; 4089b1aa8d6SNeel Natu if (name != NULL) { 4099b1aa8d6SNeel Natu n = strlcpy(memseg.name, name, sizeof(memseg.name)); 4109b1aa8d6SNeel Natu if (n >= sizeof(memseg.name)) { 4119b1aa8d6SNeel Natu errno = ENAMETOOLONG; 4129b1aa8d6SNeel Natu return (-1); 4139b1aa8d6SNeel Natu } 4149b1aa8d6SNeel Natu } 4159b1aa8d6SNeel Natu 4169b1aa8d6SNeel Natu error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg); 4179b1aa8d6SNeel Natu return (error); 4189b1aa8d6SNeel Natu } 4199b1aa8d6SNeel Natu 4209b1aa8d6SNeel Natu int 4219b1aa8d6SNeel Natu vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf, 4229b1aa8d6SNeel Natu size_t bufsize) 4239b1aa8d6SNeel Natu { 4249b1aa8d6SNeel Natu struct vm_memseg memseg; 4259b1aa8d6SNeel Natu size_t n; 4269b1aa8d6SNeel Natu int error; 4279b1aa8d6SNeel Natu 428e499fdcbSMark Johnston bzero(&memseg, sizeof(memseg)); 4299b1aa8d6SNeel Natu memseg.segid = segid; 4309b1aa8d6SNeel Natu error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg); 4319b1aa8d6SNeel Natu if (error == 0) { 4329b1aa8d6SNeel Natu *lenp = memseg.len; 4339b1aa8d6SNeel Natu n = strlcpy(namebuf, memseg.name, bufsize); 4349b1aa8d6SNeel Natu if (n >= bufsize) { 4359b1aa8d6SNeel Natu errno = ENAMETOOLONG; 4369b1aa8d6SNeel Natu error = -1; 4379b1aa8d6SNeel Natu } 4389b1aa8d6SNeel Natu } 4399b1aa8d6SNeel Natu return (error); 4409b1aa8d6SNeel Natu } 4419b1aa8d6SNeel Natu 4429b1aa8d6SNeel Natu static int 4439b1aa8d6SNeel Natu setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base) 4449b1aa8d6SNeel Natu { 4459b1aa8d6SNeel Natu char *ptr; 4469b1aa8d6SNeel Natu int error, flags; 4479b1aa8d6SNeel Natu 4489b1aa8d6SNeel Natu /* Map 'len' bytes starting at 'gpa' in the guest address space */ 4499b1aa8d6SNeel Natu error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL); 4509b1aa8d6SNeel Natu if (error) 4519b1aa8d6SNeel Natu return (error); 4529b1aa8d6SNeel Natu 4539b1aa8d6SNeel Natu flags = MAP_SHARED | MAP_FIXED; 4549b1aa8d6SNeel Natu if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 4559b1aa8d6SNeel Natu flags |= MAP_NOCORE; 4569b1aa8d6SNeel Natu 4579b1aa8d6SNeel Natu /* mmap into the process address space on the host */ 4589b1aa8d6SNeel Natu ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa); 4599b1aa8d6SNeel Natu if (ptr == MAP_FAILED) 4609b1aa8d6SNeel Natu return (-1); 4619b1aa8d6SNeel Natu 4629b1aa8d6SNeel Natu return (0); 4639b1aa8d6SNeel Natu } 4649b1aa8d6SNeel Natu 465b060ba50SNeel Natu int 466b060ba50SNeel Natu vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 467b060ba50SNeel Natu { 4689b1aa8d6SNeel Natu size_t objsize, len; 4699b1aa8d6SNeel Natu vm_paddr_t gpa; 4709b1aa8d6SNeel Natu char *baseaddr, *ptr; 4716a9648b5SJohn Baldwin int error; 472b060ba50SNeel Natu 4739b1aa8d6SNeel Natu assert(vms == VM_MMAP_ALL); 474b060ba50SNeel Natu 475b060ba50SNeel Natu /* 4767e0fa794SMark Johnston * If 'memsize' cannot fit entirely in the 'lowmem' segment then create 4777e0fa794SMark Johnston * another 'highmem' segment above VM_HIGHMEM_BASE for the remainder. 478b060ba50SNeel Natu */ 4797e0fa794SMark Johnston if (memsize > VM_LOWMEM_LIMIT) { 4807e0fa794SMark Johnston ctx->memsegs[VM_MEMSEG_LOW].size = VM_LOWMEM_LIMIT; 4817e0fa794SMark Johnston ctx->memsegs[VM_MEMSEG_HIGH].size = memsize - VM_LOWMEM_LIMIT; 4827e0fa794SMark Johnston objsize = VM_HIGHMEM_BASE + ctx->memsegs[VM_MEMSEG_HIGH].size; 483b060ba50SNeel Natu } else { 4847e0fa794SMark Johnston ctx->memsegs[VM_MEMSEG_LOW].size = memsize; 4857e0fa794SMark Johnston ctx->memsegs[VM_MEMSEG_HIGH].size = 0; 4867e0fa794SMark Johnston objsize = memsize; 4879b1aa8d6SNeel Natu } 4889b1aa8d6SNeel Natu 4899b1aa8d6SNeel Natu error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL); 4909b1aa8d6SNeel Natu if (error) 4919b1aa8d6SNeel Natu return (error); 4929b1aa8d6SNeel Natu 4939b1aa8d6SNeel Natu /* 4949b1aa8d6SNeel Natu * Stake out a contiguous region covering the guest physical memory 4959b1aa8d6SNeel Natu * and the adjoining guard regions. 4969b1aa8d6SNeel Natu */ 4979b1aa8d6SNeel Natu len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE; 4986a9648b5SJohn Baldwin ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0); 4999b1aa8d6SNeel Natu if (ptr == MAP_FAILED) 5009b1aa8d6SNeel Natu return (-1); 5019b1aa8d6SNeel Natu 5029b1aa8d6SNeel Natu baseaddr = ptr + VM_MMAP_GUARD_SIZE; 5037e0fa794SMark Johnston if (ctx->memsegs[VM_MEMSEG_HIGH].size > 0) { 5047e0fa794SMark Johnston gpa = VM_HIGHMEM_BASE; 5057e0fa794SMark Johnston len = ctx->memsegs[VM_MEMSEG_HIGH].size; 5069b1aa8d6SNeel Natu error = setup_memory_segment(ctx, gpa, len, baseaddr); 5079b1aa8d6SNeel Natu if (error) 5089b1aa8d6SNeel Natu return (error); 509b060ba50SNeel Natu } 510b060ba50SNeel Natu 5117e0fa794SMark Johnston if (ctx->memsegs[VM_MEMSEG_LOW].size > 0) { 5129b1aa8d6SNeel Natu gpa = 0; 5137e0fa794SMark Johnston len = ctx->memsegs[VM_MEMSEG_LOW].size; 5149b1aa8d6SNeel Natu error = setup_memory_segment(ctx, gpa, len, baseaddr); 515b060ba50SNeel Natu if (error) 516b060ba50SNeel Natu return (error); 517b060ba50SNeel Natu } 518b060ba50SNeel Natu 5199b1aa8d6SNeel Natu ctx->baseaddr = baseaddr; 520b060ba50SNeel Natu 521b060ba50SNeel Natu return (0); 522b060ba50SNeel Natu } 523b060ba50SNeel Natu 52436e8356eSNeel Natu /* 52536e8356eSNeel Natu * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in 52636e8356eSNeel Natu * the lowmem or highmem regions. 52736e8356eSNeel Natu * 52836e8356eSNeel Natu * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region. 52936e8356eSNeel Natu * The instruction emulation code depends on this behavior. 53036e8356eSNeel Natu */ 531b060ba50SNeel Natu void * 532b060ba50SNeel Natu vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 533366f6083SPeter Grehan { 5347e0fa794SMark Johnston vm_size_t lowsize, highsize; 535366f6083SPeter Grehan 5367e0fa794SMark Johnston lowsize = ctx->memsegs[VM_MEMSEG_LOW].size; 5377e0fa794SMark Johnston if (lowsize > 0) { 5387e0fa794SMark Johnston if (gaddr < lowsize && len <= lowsize && gaddr + len <= lowsize) 53936e8356eSNeel Natu return (ctx->baseaddr + gaddr); 54036e8356eSNeel Natu } 541b060ba50SNeel Natu 5427e0fa794SMark Johnston highsize = ctx->memsegs[VM_MEMSEG_HIGH].size; 5437e0fa794SMark Johnston if (highsize > 0 && gaddr >= VM_HIGHMEM_BASE) { 5447e0fa794SMark Johnston if (gaddr < VM_HIGHMEM_BASE + highsize && len <= highsize && 5457e0fa794SMark Johnston gaddr + len <= VM_HIGHMEM_BASE + highsize) 54636e8356eSNeel Natu return (ctx->baseaddr + gaddr); 54736e8356eSNeel Natu } 54836e8356eSNeel Natu 549b060ba50SNeel Natu return (NULL); 550366f6083SPeter Grehan } 551366f6083SPeter Grehan 552483d953aSJohn Baldwin vm_paddr_t 553483d953aSJohn Baldwin vm_rev_map_gpa(struct vmctx *ctx, void *addr) 554483d953aSJohn Baldwin { 555483d953aSJohn Baldwin vm_paddr_t offaddr; 5567e0fa794SMark Johnston vm_size_t lowsize, highsize; 557483d953aSJohn Baldwin 558483d953aSJohn Baldwin offaddr = (char *)addr - ctx->baseaddr; 559483d953aSJohn Baldwin 5607e0fa794SMark Johnston lowsize = ctx->memsegs[VM_MEMSEG_LOW].size; 5617e0fa794SMark Johnston if (lowsize > 0) 5627e0fa794SMark Johnston if (offaddr <= lowsize) 563483d953aSJohn Baldwin return (offaddr); 564483d953aSJohn Baldwin 5657e0fa794SMark Johnston highsize = ctx->memsegs[VM_MEMSEG_HIGH].size; 5667e0fa794SMark Johnston if (highsize > 0) 5677e0fa794SMark Johnston if (offaddr >= VM_HIGHMEM_BASE && 5687e0fa794SMark Johnston offaddr < VM_HIGHMEM_BASE + highsize) 569483d953aSJohn Baldwin return (offaddr); 570483d953aSJohn Baldwin 571483d953aSJohn Baldwin return ((vm_paddr_t)-1); 572483d953aSJohn Baldwin } 573483d953aSJohn Baldwin 5743efc45f3SRobert Wing const char * 5753efc45f3SRobert Wing vm_get_name(struct vmctx *ctx) 576483d953aSJohn Baldwin { 577483d953aSJohn Baldwin 5783efc45f3SRobert Wing return (ctx->name); 579483d953aSJohn Baldwin } 580483d953aSJohn Baldwin 581be679db4SNeel Natu size_t 582be679db4SNeel Natu vm_get_lowmem_size(struct vmctx *ctx) 583be679db4SNeel Natu { 584be679db4SNeel Natu 5857e0fa794SMark Johnston return (ctx->memsegs[VM_MEMSEG_LOW].size); 5867e0fa794SMark Johnston } 5877e0fa794SMark Johnston 5887e0fa794SMark Johnston vm_paddr_t 5897e0fa794SMark Johnston vm_get_highmem_base(struct vmctx *ctx __unused) 5907e0fa794SMark Johnston { 5917e0fa794SMark Johnston 5927e0fa794SMark Johnston return (VM_HIGHMEM_BASE); 593be679db4SNeel Natu } 594be679db4SNeel Natu 595be679db4SNeel Natu size_t 596be679db4SNeel Natu vm_get_highmem_size(struct vmctx *ctx) 597be679db4SNeel Natu { 598be679db4SNeel Natu 5997e0fa794SMark Johnston return (ctx->memsegs[VM_MEMSEG_HIGH].size); 600be679db4SNeel Natu } 601be679db4SNeel Natu 6029b1aa8d6SNeel Natu void * 6039b1aa8d6SNeel Natu vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len) 6049b1aa8d6SNeel Natu { 6059b1aa8d6SNeel Natu char pathname[MAXPATHLEN]; 6069b1aa8d6SNeel Natu size_t len2; 6079b1aa8d6SNeel Natu char *base, *ptr; 6089b1aa8d6SNeel Natu int fd, error, flags; 6099b1aa8d6SNeel Natu 6109b1aa8d6SNeel Natu fd = -1; 6119b1aa8d6SNeel Natu ptr = MAP_FAILED; 6129b1aa8d6SNeel Natu if (name == NULL || strlen(name) == 0) { 6139b1aa8d6SNeel Natu errno = EINVAL; 6149b1aa8d6SNeel Natu goto done; 6159b1aa8d6SNeel Natu } 6169b1aa8d6SNeel Natu 6179b1aa8d6SNeel Natu error = vm_alloc_memseg(ctx, segid, len, name); 6189b1aa8d6SNeel Natu if (error) 6199b1aa8d6SNeel Natu goto done; 6209b1aa8d6SNeel Natu 6215e4f29c0SNeel Natu strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname)); 6229b1aa8d6SNeel Natu strlcat(pathname, ctx->name, sizeof(pathname)); 6239b1aa8d6SNeel Natu strlcat(pathname, ".", sizeof(pathname)); 6249b1aa8d6SNeel Natu strlcat(pathname, name, sizeof(pathname)); 6259b1aa8d6SNeel Natu 6269b1aa8d6SNeel Natu fd = open(pathname, O_RDWR); 6279b1aa8d6SNeel Natu if (fd < 0) 6289b1aa8d6SNeel Natu goto done; 6299b1aa8d6SNeel Natu 6309b1aa8d6SNeel Natu /* 6319b1aa8d6SNeel Natu * Stake out a contiguous region covering the device memory and the 6329b1aa8d6SNeel Natu * adjoining guard regions. 6339b1aa8d6SNeel Natu */ 6349b1aa8d6SNeel Natu len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE; 6356a9648b5SJohn Baldwin base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 6366a9648b5SJohn Baldwin 0); 6379b1aa8d6SNeel Natu if (base == MAP_FAILED) 6389b1aa8d6SNeel Natu goto done; 6399b1aa8d6SNeel Natu 6409b1aa8d6SNeel Natu flags = MAP_SHARED | MAP_FIXED; 6419b1aa8d6SNeel Natu if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 6429b1aa8d6SNeel Natu flags |= MAP_NOCORE; 6439b1aa8d6SNeel Natu 6449b1aa8d6SNeel Natu /* mmap the devmem region in the host address space */ 6459b1aa8d6SNeel Natu ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0); 6469b1aa8d6SNeel Natu done: 6479b1aa8d6SNeel Natu if (fd >= 0) 6489b1aa8d6SNeel Natu close(fd); 6499b1aa8d6SNeel Natu return (ptr); 6509b1aa8d6SNeel Natu } 6519b1aa8d6SNeel Natu 652e4656e10SMark Johnston int 6537d9ef309SJohn Baldwin vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg) 6547d9ef309SJohn Baldwin { 6557d9ef309SJohn Baldwin /* 6567d9ef309SJohn Baldwin * XXX: fragile, handle with care 6577d9ef309SJohn Baldwin * Assumes that the first field of the ioctl data 6587d9ef309SJohn Baldwin * is the vcpuid. 6597d9ef309SJohn Baldwin */ 6607d9ef309SJohn Baldwin *(int *)arg = vcpu->vcpuid; 6617d9ef309SJohn Baldwin return (ioctl(vcpu->ctx->fd, cmd, arg)); 6627d9ef309SJohn Baldwin } 6637d9ef309SJohn Baldwin 664366f6083SPeter Grehan int 6657d9ef309SJohn Baldwin vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) 666366f6083SPeter Grehan { 667366f6083SPeter Grehan int error; 668366f6083SPeter Grehan struct vm_register vmreg; 669366f6083SPeter Grehan 670366f6083SPeter Grehan bzero(&vmreg, sizeof(vmreg)); 671366f6083SPeter Grehan vmreg.regnum = reg; 672366f6083SPeter Grehan vmreg.regval = val; 673366f6083SPeter Grehan 6747d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg); 675366f6083SPeter Grehan return (error); 676366f6083SPeter Grehan } 677366f6083SPeter Grehan 678366f6083SPeter Grehan int 6797d9ef309SJohn Baldwin vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val) 680366f6083SPeter Grehan { 681366f6083SPeter Grehan int error; 682366f6083SPeter Grehan struct vm_register vmreg; 683366f6083SPeter Grehan 684366f6083SPeter Grehan bzero(&vmreg, sizeof(vmreg)); 685366f6083SPeter Grehan vmreg.regnum = reg; 686366f6083SPeter Grehan 6877d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg); 688366f6083SPeter Grehan *ret_val = vmreg.regval; 689366f6083SPeter Grehan return (error); 690366f6083SPeter Grehan } 691366f6083SPeter Grehan 692366f6083SPeter Grehan int 6937d9ef309SJohn Baldwin vm_set_register_set(struct vcpu *vcpu, unsigned int count, 6944f866698SJohn Baldwin const int *regnums, uint64_t *regvals) 6954f866698SJohn Baldwin { 6964f866698SJohn Baldwin int error; 6974f866698SJohn Baldwin struct vm_register_set vmregset; 6984f866698SJohn Baldwin 6994f866698SJohn Baldwin bzero(&vmregset, sizeof(vmregset)); 7004f866698SJohn Baldwin vmregset.count = count; 7014f866698SJohn Baldwin vmregset.regnums = regnums; 7024f866698SJohn Baldwin vmregset.regvals = regvals; 7034f866698SJohn Baldwin 7047d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset); 7054f866698SJohn Baldwin return (error); 7064f866698SJohn Baldwin } 7074f866698SJohn Baldwin 7084f866698SJohn Baldwin int 7097d9ef309SJohn Baldwin vm_get_register_set(struct vcpu *vcpu, unsigned int count, 7104f866698SJohn Baldwin const int *regnums, uint64_t *regvals) 7114f866698SJohn Baldwin { 7124f866698SJohn Baldwin int error; 7134f866698SJohn Baldwin struct vm_register_set vmregset; 7144f866698SJohn Baldwin 7154f866698SJohn Baldwin bzero(&vmregset, sizeof(vmregset)); 7164f866698SJohn Baldwin vmregset.count = count; 7174f866698SJohn Baldwin vmregset.regnums = regnums; 7184f866698SJohn Baldwin vmregset.regvals = regvals; 7194f866698SJohn Baldwin 7207d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset); 7214f866698SJohn Baldwin return (error); 7224f866698SJohn Baldwin } 7234f866698SJohn Baldwin 7244f866698SJohn Baldwin int 725e17eca32SMark Johnston vm_run(struct vcpu *vcpu, struct vm_run *vmrun) 726366f6083SPeter Grehan { 727e17eca32SMark Johnston return (vcpu_ioctl(vcpu, VM_RUN, vmrun)); 728366f6083SPeter Grehan } 729366f6083SPeter Grehan 730b15a09c0SNeel Natu int 731f0fdcfe2SNeel Natu vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 732b15a09c0SNeel Natu { 733f0fdcfe2SNeel Natu struct vm_suspend vmsuspend; 734b15a09c0SNeel Natu 735f0fdcfe2SNeel Natu bzero(&vmsuspend, sizeof(vmsuspend)); 736f0fdcfe2SNeel Natu vmsuspend.how = how; 737f0fdcfe2SNeel Natu return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 738b15a09c0SNeel Natu } 739b15a09c0SNeel Natu 7405fcf252fSNeel Natu int 7415fcf252fSNeel Natu vm_reinit(struct vmctx *ctx) 7425fcf252fSNeel Natu { 7435fcf252fSNeel Natu 7445fcf252fSNeel Natu return (ioctl(ctx->fd, VM_REINIT, 0)); 7455fcf252fSNeel Natu } 7465fcf252fSNeel Natu 747d087a399SNeel Natu int 74813f4cf6cSNeel Natu vm_capability_name2type(const char *capname) 74913f4cf6cSNeel Natu { 75013f4cf6cSNeel Natu int i; 75113f4cf6cSNeel Natu 75285efb31dSMark Johnston for (i = 0; i < VM_CAP_MAX; i++) { 75385efb31dSMark Johnston if (vm_capstrmap[i] != NULL && 75485efb31dSMark Johnston strcmp(vm_capstrmap[i], capname) == 0) 755d000623aSJohn Baldwin return (i); 756366f6083SPeter Grehan } 757366f6083SPeter Grehan 758366f6083SPeter Grehan return (-1); 759366f6083SPeter Grehan } 760366f6083SPeter Grehan 76113f4cf6cSNeel Natu const char * 76213f4cf6cSNeel Natu vm_capability_type2name(int type) 76313f4cf6cSNeel Natu { 76485efb31dSMark Johnston if (type >= 0 && type < VM_CAP_MAX) 76585efb31dSMark Johnston return (vm_capstrmap[type]); 76613f4cf6cSNeel Natu 76713f4cf6cSNeel Natu return (NULL); 76813f4cf6cSNeel Natu } 76913f4cf6cSNeel Natu 770366f6083SPeter Grehan int 7717d9ef309SJohn Baldwin vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval) 772366f6083SPeter Grehan { 773366f6083SPeter Grehan int error; 774366f6083SPeter Grehan struct vm_capability vmcap; 775366f6083SPeter Grehan 776366f6083SPeter Grehan bzero(&vmcap, sizeof(vmcap)); 777366f6083SPeter Grehan vmcap.captype = cap; 778366f6083SPeter Grehan 7797d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap); 780366f6083SPeter Grehan *retval = vmcap.capval; 781366f6083SPeter Grehan return (error); 782366f6083SPeter Grehan } 783366f6083SPeter Grehan 784366f6083SPeter Grehan int 7857d9ef309SJohn Baldwin vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val) 786366f6083SPeter Grehan { 787366f6083SPeter Grehan struct vm_capability vmcap; 788366f6083SPeter Grehan 789366f6083SPeter Grehan bzero(&vmcap, sizeof(vmcap)); 790366f6083SPeter Grehan vmcap.captype = cap; 791366f6083SPeter Grehan vmcap.capval = val; 792366f6083SPeter Grehan 7937d9ef309SJohn Baldwin return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap)); 794366f6083SPeter Grehan } 795366f6083SPeter Grehan 796366f6083SPeter Grehan uint64_t * 7977d9ef309SJohn Baldwin vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv, 798366f6083SPeter Grehan int *ret_entries) 799366f6083SPeter Grehan { 80064269786SJohn Baldwin static _Thread_local uint64_t *stats_buf; 80164269786SJohn Baldwin static _Thread_local u_int stats_count; 80264269786SJohn Baldwin uint64_t *new_stats; 80364269786SJohn Baldwin struct vm_stats vmstats; 80464269786SJohn Baldwin u_int count, index; 80564269786SJohn Baldwin bool have_stats; 806366f6083SPeter Grehan 80764269786SJohn Baldwin have_stats = false; 80864269786SJohn Baldwin count = 0; 80964269786SJohn Baldwin for (index = 0;; index += nitems(vmstats.statbuf)) { 81064269786SJohn Baldwin vmstats.index = index; 8117d9ef309SJohn Baldwin if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0) 81264269786SJohn Baldwin break; 81364269786SJohn Baldwin if (stats_count < index + vmstats.num_entries) { 81464269786SJohn Baldwin new_stats = realloc(stats_buf, 81564269786SJohn Baldwin (index + vmstats.num_entries) * sizeof(uint64_t)); 81664269786SJohn Baldwin if (new_stats == NULL) { 81764269786SJohn Baldwin errno = ENOMEM; 81864269786SJohn Baldwin return (NULL); 81964269786SJohn Baldwin } 82064269786SJohn Baldwin stats_count = index + vmstats.num_entries; 82164269786SJohn Baldwin stats_buf = new_stats; 82264269786SJohn Baldwin } 82364269786SJohn Baldwin memcpy(stats_buf + index, vmstats.statbuf, 82464269786SJohn Baldwin vmstats.num_entries * sizeof(uint64_t)); 82564269786SJohn Baldwin count += vmstats.num_entries; 82664269786SJohn Baldwin have_stats = true; 827366f6083SPeter Grehan 82864269786SJohn Baldwin if (vmstats.num_entries != nitems(vmstats.statbuf)) 82964269786SJohn Baldwin break; 83064269786SJohn Baldwin } 83164269786SJohn Baldwin if (have_stats) { 832366f6083SPeter Grehan if (ret_entries) 83364269786SJohn Baldwin *ret_entries = count; 834366f6083SPeter Grehan if (ret_tv) 835366f6083SPeter Grehan *ret_tv = vmstats.tv; 83664269786SJohn Baldwin return (stats_buf); 837366f6083SPeter Grehan } else 838366f6083SPeter Grehan return (NULL); 839366f6083SPeter Grehan } 840366f6083SPeter Grehan 841366f6083SPeter Grehan const char * 842366f6083SPeter Grehan vm_get_stat_desc(struct vmctx *ctx, int index) 843366f6083SPeter Grehan { 844366f6083SPeter Grehan static struct vm_stat_desc statdesc; 845366f6083SPeter Grehan 846366f6083SPeter Grehan statdesc.index = index; 847366f6083SPeter Grehan if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 848366f6083SPeter Grehan return (statdesc.desc); 849366f6083SPeter Grehan else 850366f6083SPeter Grehan return (NULL); 851366f6083SPeter Grehan } 852366f6083SPeter Grehan 85356a26fc1SMark Johnston #ifdef __amd64__ 854e9027382SNeel Natu int 855318224bbSNeel Natu vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 856318224bbSNeel Natu { 857318224bbSNeel Natu int error, i; 858318224bbSNeel Natu struct vm_gpa_pte gpapte; 859318224bbSNeel Natu 860318224bbSNeel Natu bzero(&gpapte, sizeof(gpapte)); 861318224bbSNeel Natu gpapte.gpa = gpa; 862318224bbSNeel Natu 863318224bbSNeel Natu error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 864318224bbSNeel Natu 865318224bbSNeel Natu if (error == 0) { 866318224bbSNeel Natu *num = gpapte.ptenum; 867318224bbSNeel Natu for (i = 0; i < gpapte.ptenum; i++) 868318224bbSNeel Natu pte[i] = gpapte.pte[i]; 869318224bbSNeel Natu } 870318224bbSNeel Natu 871318224bbSNeel Natu return (error); 872318224bbSNeel Natu } 87308e3ff32SNeel Natu 87408e3ff32SNeel Natu int 8757d9ef309SJohn Baldwin vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 8769c4d5478SNeel Natu uint64_t gla, int prot, uint64_t *gpa, int *fault) 877da11f4aaSNeel Natu { 878da11f4aaSNeel Natu struct vm_gla2gpa gg; 879da11f4aaSNeel Natu int error; 880da11f4aaSNeel Natu 881da11f4aaSNeel Natu bzero(&gg, sizeof(struct vm_gla2gpa)); 882da11f4aaSNeel Natu gg.prot = prot; 883da11f4aaSNeel Natu gg.gla = gla; 884da11f4aaSNeel Natu gg.paging = *paging; 885da11f4aaSNeel Natu 8867d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg); 887da11f4aaSNeel Natu if (error == 0) { 888da11f4aaSNeel Natu *fault = gg.fault; 889da11f4aaSNeel Natu *gpa = gg.gpa; 890da11f4aaSNeel Natu } 891da11f4aaSNeel Natu return (error); 892da11f4aaSNeel Natu } 89356a26fc1SMark Johnston #endif 894da11f4aaSNeel Natu 8955f8754c0SJohn Baldwin int 8967d9ef309SJohn Baldwin vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, 8975f8754c0SJohn Baldwin uint64_t gla, int prot, uint64_t *gpa, int *fault) 8985f8754c0SJohn Baldwin { 8995f8754c0SJohn Baldwin struct vm_gla2gpa gg; 9005f8754c0SJohn Baldwin int error; 9015f8754c0SJohn Baldwin 9025f8754c0SJohn Baldwin bzero(&gg, sizeof(struct vm_gla2gpa)); 9035f8754c0SJohn Baldwin gg.prot = prot; 9045f8754c0SJohn Baldwin gg.gla = gla; 9055f8754c0SJohn Baldwin gg.paging = *paging; 9065f8754c0SJohn Baldwin 9077d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg); 9085f8754c0SJohn Baldwin if (error == 0) { 9095f8754c0SJohn Baldwin *fault = gg.fault; 9105f8754c0SJohn Baldwin *gpa = gg.gpa; 9115f8754c0SJohn Baldwin } 9125f8754c0SJohn Baldwin return (error); 9135f8754c0SJohn Baldwin } 9145f8754c0SJohn Baldwin 915da11f4aaSNeel Natu #ifndef min 916da11f4aaSNeel Natu #define min(a,b) (((a) < (b)) ? (a) : (b)) 917da11f4aaSNeel Natu #endif 918da11f4aaSNeel Natu 91956a26fc1SMark Johnston #ifdef __amd64__ 920da11f4aaSNeel Natu int 9217d9ef309SJohn Baldwin vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, 9229c4d5478SNeel Natu uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, 9239c4d5478SNeel Natu int *fault) 924da11f4aaSNeel Natu { 925009e2acbSNeel Natu void *va; 92645cd18ecSMark Johnston uint64_t gpa, off; 92745cd18ecSMark Johnston int error, i, n; 9286303b65dSNeel Natu 9296303b65dSNeel Natu for (i = 0; i < iovcnt; i++) { 9306303b65dSNeel Natu iov[i].iov_base = 0; 9316303b65dSNeel Natu iov[i].iov_len = 0; 9326303b65dSNeel Natu } 9336303b65dSNeel Natu 9346303b65dSNeel Natu while (len) { 9356303b65dSNeel Natu assert(iovcnt > 0); 9367d9ef309SJohn Baldwin error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); 9379c4d5478SNeel Natu if (error || *fault) 9389c4d5478SNeel Natu return (error); 9396303b65dSNeel Natu 9406303b65dSNeel Natu off = gpa & PAGE_MASK; 94145cd18ecSMark Johnston n = MIN(len, PAGE_SIZE - off); 9426303b65dSNeel Natu 9437d9ef309SJohn Baldwin va = vm_map_gpa(vcpu->ctx, gpa, n); 944009e2acbSNeel Natu if (va == NULL) 9459c4d5478SNeel Natu return (EFAULT); 946009e2acbSNeel Natu 947009e2acbSNeel Natu iov->iov_base = va; 9486303b65dSNeel Natu iov->iov_len = n; 9496303b65dSNeel Natu iov++; 9506303b65dSNeel Natu iovcnt--; 9516303b65dSNeel Natu 9526303b65dSNeel Natu gla += n; 9536303b65dSNeel Natu len -= n; 9546303b65dSNeel Natu } 9556303b65dSNeel Natu return (0); 9566303b65dSNeel Natu } 95756a26fc1SMark Johnston #endif 9586303b65dSNeel Natu 9596303b65dSNeel Natu void 9602b4fe856SJohn Baldwin vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused) 961009e2acbSNeel Natu { 9622b4fe856SJohn Baldwin /* 9632b4fe856SJohn Baldwin * Intentionally empty. This is used by the instruction 9642b4fe856SJohn Baldwin * emulation code shared with the kernel. The in-kernel 9652b4fe856SJohn Baldwin * version of this is non-empty. 9662b4fe856SJohn Baldwin */ 967009e2acbSNeel Natu } 968009e2acbSNeel Natu 969009e2acbSNeel Natu void 9702b4fe856SJohn Baldwin vm_copyin(struct iovec *iov, void *vp, size_t len) 9716303b65dSNeel Natu { 9726303b65dSNeel Natu const char *src; 9736303b65dSNeel Natu char *dst; 9746303b65dSNeel Natu size_t n; 975da11f4aaSNeel Natu 976da11f4aaSNeel Natu dst = vp; 977da11f4aaSNeel Natu while (len) { 9786303b65dSNeel Natu assert(iov->iov_len); 9796303b65dSNeel Natu n = min(len, iov->iov_len); 980009e2acbSNeel Natu src = iov->iov_base; 981da11f4aaSNeel Natu bcopy(src, dst, n); 982da11f4aaSNeel Natu 9836303b65dSNeel Natu iov++; 984da11f4aaSNeel Natu dst += n; 985da11f4aaSNeel Natu len -= n; 986da11f4aaSNeel Natu } 987da11f4aaSNeel Natu } 988da11f4aaSNeel Natu 9896303b65dSNeel Natu void 9902b4fe856SJohn Baldwin vm_copyout(const void *vp, struct iovec *iov, size_t len) 991da11f4aaSNeel Natu { 992da11f4aaSNeel Natu const char *src; 9936303b65dSNeel Natu char *dst; 9946303b65dSNeel Natu size_t n; 995da11f4aaSNeel Natu 996da11f4aaSNeel Natu src = vp; 997da11f4aaSNeel Natu while (len) { 9986303b65dSNeel Natu assert(iov->iov_len); 9996303b65dSNeel Natu n = min(len, iov->iov_len); 1000009e2acbSNeel Natu dst = iov->iov_base; 1001da11f4aaSNeel Natu bcopy(src, dst, n); 1002da11f4aaSNeel Natu 10036303b65dSNeel Natu iov++; 1004da11f4aaSNeel Natu src += n; 1005da11f4aaSNeel Natu len -= n; 1006da11f4aaSNeel Natu } 1007da11f4aaSNeel Natu } 100895ebc360SNeel Natu 100995ebc360SNeel Natu static int 101095ebc360SNeel Natu vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 101195ebc360SNeel Natu { 101295ebc360SNeel Natu struct vm_cpuset vm_cpuset; 101395ebc360SNeel Natu int error; 101495ebc360SNeel Natu 101595ebc360SNeel Natu bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 101695ebc360SNeel Natu vm_cpuset.which = which; 101795ebc360SNeel Natu vm_cpuset.cpusetsize = sizeof(cpuset_t); 101895ebc360SNeel Natu vm_cpuset.cpus = cpus; 101995ebc360SNeel Natu 102095ebc360SNeel Natu error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 102195ebc360SNeel Natu return (error); 102295ebc360SNeel Natu } 102395ebc360SNeel Natu 102495ebc360SNeel Natu int 102595ebc360SNeel Natu vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 102695ebc360SNeel Natu { 102795ebc360SNeel Natu 102895ebc360SNeel Natu return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 102995ebc360SNeel Natu } 103095ebc360SNeel Natu 103195ebc360SNeel Natu int 103295ebc360SNeel Natu vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 103395ebc360SNeel Natu { 103495ebc360SNeel Natu 103595ebc360SNeel Natu return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 103695ebc360SNeel Natu } 103795ebc360SNeel Natu 103895ebc360SNeel Natu int 1039fc276d92SJohn Baldwin vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus) 1040fc276d92SJohn Baldwin { 1041fc276d92SJohn Baldwin 1042fc276d92SJohn Baldwin return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus)); 1043fc276d92SJohn Baldwin } 1044fc276d92SJohn Baldwin 1045fc276d92SJohn Baldwin int 10467d9ef309SJohn Baldwin vm_activate_cpu(struct vcpu *vcpu) 104795ebc360SNeel Natu { 104895ebc360SNeel Natu struct vm_activate_cpu ac; 104995ebc360SNeel Natu int error; 105095ebc360SNeel Natu 105195ebc360SNeel Natu bzero(&ac, sizeof(struct vm_activate_cpu)); 10527d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac); 105395ebc360SNeel Natu return (error); 105495ebc360SNeel Natu } 1055091d4532SNeel Natu 1056091d4532SNeel Natu int 10577d9ef309SJohn Baldwin vm_suspend_all_cpus(struct vmctx *ctx) 1058fc276d92SJohn Baldwin { 1059fc276d92SJohn Baldwin struct vm_activate_cpu ac; 1060fc276d92SJohn Baldwin int error; 1061fc276d92SJohn Baldwin 1062fc276d92SJohn Baldwin bzero(&ac, sizeof(struct vm_activate_cpu)); 10637d9ef309SJohn Baldwin ac.vcpuid = -1; 1064fc276d92SJohn Baldwin error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac); 1065fc276d92SJohn Baldwin return (error); 1066fc276d92SJohn Baldwin } 1067fc276d92SJohn Baldwin 1068fc276d92SJohn Baldwin int 10697d9ef309SJohn Baldwin vm_suspend_cpu(struct vcpu *vcpu) 1070fc276d92SJohn Baldwin { 1071fc276d92SJohn Baldwin struct vm_activate_cpu ac; 1072fc276d92SJohn Baldwin int error; 1073fc276d92SJohn Baldwin 1074fc276d92SJohn Baldwin bzero(&ac, sizeof(struct vm_activate_cpu)); 10757d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac); 10767d9ef309SJohn Baldwin return (error); 10777d9ef309SJohn Baldwin } 10787d9ef309SJohn Baldwin 10797d9ef309SJohn Baldwin int 10807d9ef309SJohn Baldwin vm_resume_cpu(struct vcpu *vcpu) 10817d9ef309SJohn Baldwin { 10827d9ef309SJohn Baldwin struct vm_activate_cpu ac; 10837d9ef309SJohn Baldwin int error; 10847d9ef309SJohn Baldwin 10857d9ef309SJohn Baldwin bzero(&ac, sizeof(struct vm_activate_cpu)); 10867d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac); 10877d9ef309SJohn Baldwin return (error); 10887d9ef309SJohn Baldwin } 10897d9ef309SJohn Baldwin 10907d9ef309SJohn Baldwin int 10917d9ef309SJohn Baldwin vm_resume_all_cpus(struct vmctx *ctx) 10927d9ef309SJohn Baldwin { 10937d9ef309SJohn Baldwin struct vm_activate_cpu ac; 10947d9ef309SJohn Baldwin int error; 10957d9ef309SJohn Baldwin 10967d9ef309SJohn Baldwin bzero(&ac, sizeof(struct vm_activate_cpu)); 10977d9ef309SJohn Baldwin ac.vcpuid = -1; 1098fc276d92SJohn Baldwin error = ioctl(ctx->fd, VM_RESUME_CPU, &ac); 1099fc276d92SJohn Baldwin return (error); 1100fc276d92SJohn Baldwin } 1101fc276d92SJohn Baldwin 110256a26fc1SMark Johnston #ifdef __amd64__ 1103fc276d92SJohn Baldwin int 11047d9ef309SJohn Baldwin vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) 1105091d4532SNeel Natu { 1106091d4532SNeel Natu struct vm_intinfo vmii; 1107091d4532SNeel Natu int error; 1108091d4532SNeel Natu 1109091d4532SNeel Natu bzero(&vmii, sizeof(struct vm_intinfo)); 11107d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii); 1111091d4532SNeel Natu if (error == 0) { 1112091d4532SNeel Natu *info1 = vmii.info1; 1113091d4532SNeel Natu *info2 = vmii.info2; 1114091d4532SNeel Natu } 1115091d4532SNeel Natu return (error); 1116091d4532SNeel Natu } 1117091d4532SNeel Natu 1118091d4532SNeel Natu int 11197d9ef309SJohn Baldwin vm_set_intinfo(struct vcpu *vcpu, uint64_t info1) 1120091d4532SNeel Natu { 1121091d4532SNeel Natu struct vm_intinfo vmii; 1122091d4532SNeel Natu int error; 1123091d4532SNeel Natu 1124091d4532SNeel Natu bzero(&vmii, sizeof(struct vm_intinfo)); 1125091d4532SNeel Natu vmii.info1 = info1; 11267d9ef309SJohn Baldwin error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii); 1127091d4532SNeel Natu return (error); 1128091d4532SNeel Natu } 112956a26fc1SMark Johnston #endif 11300dafa5cdSNeel Natu 113156a26fc1SMark Johnston #ifdef WITH_VMMAPI_SNAPSHOT 11320dafa5cdSNeel Natu int 11337d9ef309SJohn Baldwin vm_restart_instruction(struct vcpu *vcpu) 1134d087a399SNeel Natu { 11357d9ef309SJohn Baldwin int arg; 1136d087a399SNeel Natu 11377d9ef309SJohn Baldwin return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg)); 1138d087a399SNeel Natu } 113900ef17beSBartek Rutkowski 114000ef17beSBartek Rutkowski int 11410f735657SJohn Baldwin vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta) 1142483d953aSJohn Baldwin { 1143483d953aSJohn Baldwin 11440f735657SJohn Baldwin if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) { 1145483d953aSJohn Baldwin #ifdef SNAPSHOT_DEBUG 1146483d953aSJohn Baldwin fprintf(stderr, "%s: snapshot failed for %s: %d\r\n", 1147483d953aSJohn Baldwin __func__, meta->dev_name, errno); 1148483d953aSJohn Baldwin #endif 1149483d953aSJohn Baldwin return (-1); 1150483d953aSJohn Baldwin } 1151483d953aSJohn Baldwin return (0); 1152483d953aSJohn Baldwin } 1153483d953aSJohn Baldwin 1154483d953aSJohn Baldwin int 1155483d953aSJohn Baldwin vm_restore_time(struct vmctx *ctx) 1156483d953aSJohn Baldwin { 1157483d953aSJohn Baldwin int dummy; 1158483d953aSJohn Baldwin 1159483d953aSJohn Baldwin dummy = 0; 1160483d953aSJohn Baldwin return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy)); 1161483d953aSJohn Baldwin } 116256a26fc1SMark Johnston #endif 1163483d953aSJohn Baldwin 1164483d953aSJohn Baldwin int 116501d822d3SRodney W. Grimes vm_set_topology(struct vmctx *ctx, 116601d822d3SRodney W. Grimes uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) 116701d822d3SRodney W. Grimes { 116801d822d3SRodney W. Grimes struct vm_cpu_topology topology; 116901d822d3SRodney W. Grimes 117001d822d3SRodney W. Grimes bzero(&topology, sizeof (struct vm_cpu_topology)); 117101d822d3SRodney W. Grimes topology.sockets = sockets; 117201d822d3SRodney W. Grimes topology.cores = cores; 117301d822d3SRodney W. Grimes topology.threads = threads; 117401d822d3SRodney W. Grimes topology.maxcpus = maxcpus; 117501d822d3SRodney W. Grimes return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology)); 117601d822d3SRodney W. Grimes } 117701d822d3SRodney W. Grimes 117801d822d3SRodney W. Grimes int 117901d822d3SRodney W. Grimes vm_get_topology(struct vmctx *ctx, 118001d822d3SRodney W. Grimes uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) 118101d822d3SRodney W. Grimes { 118201d822d3SRodney W. Grimes struct vm_cpu_topology topology; 118301d822d3SRodney W. Grimes int error; 118401d822d3SRodney W. Grimes 118501d822d3SRodney W. Grimes bzero(&topology, sizeof (struct vm_cpu_topology)); 118601d822d3SRodney W. Grimes error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology); 118701d822d3SRodney W. Grimes if (error == 0) { 118801d822d3SRodney W. Grimes *sockets = topology.sockets; 118901d822d3SRodney W. Grimes *cores = topology.cores; 119001d822d3SRodney W. Grimes *threads = topology.threads; 119101d822d3SRodney W. Grimes *maxcpus = topology.maxcpus; 119201d822d3SRodney W. Grimes } 119301d822d3SRodney W. Grimes return (error); 119401d822d3SRodney W. Grimes } 119501d822d3SRodney W. Grimes 11963e9b4532SMark Johnston int 11973e9b4532SMark Johnston vm_limit_rights(struct vmctx *ctx) 11983e9b4532SMark Johnston { 11993e9b4532SMark Johnston cap_rights_t rights; 12003e9b4532SMark Johnston 12013e9b4532SMark Johnston cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 12023e9b4532SMark Johnston if (caph_rights_limit(ctx->fd, &rights) != 0) 12033e9b4532SMark Johnston return (-1); 12047f00e46bSMark Johnston if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, vm_ioctl_ncmds) != 0) 12053e9b4532SMark Johnston return (-1); 12063e9b4532SMark Johnston return (0); 12073e9b4532SMark Johnston } 12083e9b4532SMark Johnston 12093e9b4532SMark Johnston /* 12103e9b4532SMark Johnston * Avoid using in new code. Operations on the fd should be wrapped here so that 12113e9b4532SMark Johnston * capability rights can be kept in sync. 12123e9b4532SMark Johnston */ 12133e9b4532SMark Johnston int 12143e9b4532SMark Johnston vm_get_device_fd(struct vmctx *ctx) 12153e9b4532SMark Johnston { 12163e9b4532SMark Johnston 12173e9b4532SMark Johnston return (ctx->fd); 12183e9b4532SMark Johnston } 12193e9b4532SMark Johnston 12203e9b4532SMark Johnston /* Legacy interface, do not use. */ 12213e9b4532SMark Johnston const cap_ioctl_t * 12223e9b4532SMark Johnston vm_get_ioctls(size_t *len) 12233e9b4532SMark Johnston { 12243e9b4532SMark Johnston cap_ioctl_t *cmds; 12257f00e46bSMark Johnston size_t sz; 122600ef17beSBartek Rutkowski 122700ef17beSBartek Rutkowski if (len == NULL) { 12287f00e46bSMark Johnston sz = vm_ioctl_ncmds * sizeof(vm_ioctl_cmds[0]); 12297f00e46bSMark Johnston cmds = malloc(sz); 123000ef17beSBartek Rutkowski if (cmds == NULL) 123100ef17beSBartek Rutkowski return (NULL); 12327f00e46bSMark Johnston bcopy(vm_ioctl_cmds, cmds, sz); 123300ef17beSBartek Rutkowski return (cmds); 123400ef17beSBartek Rutkowski } 123500ef17beSBartek Rutkowski 12367f00e46bSMark Johnston *len = vm_ioctl_ncmds; 123700ef17beSBartek Rutkowski return (NULL); 123800ef17beSBartek Rutkowski } 1239