1*b5f9b883Sjsg /* $OpenBSD: xen.c,v 1.100 2024/11/27 02:38:35 jsg Exp $ */ 2103e85a9Sreyk 3d1abe1e1Smikeb /* 4a327e742Smikeb * Copyright (c) 2015, 2016, 2017 Mike Belopuhov 5d1abe1e1Smikeb * 6d1abe1e1Smikeb * Permission to use, copy, modify, and distribute this software for any 7d1abe1e1Smikeb * purpose with or without fee is hereby granted, provided that the above 8d1abe1e1Smikeb * copyright notice and this permission notice appear in all copies. 9d1abe1e1Smikeb * 10d1abe1e1Smikeb * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11d1abe1e1Smikeb * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12d1abe1e1Smikeb * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13d1abe1e1Smikeb * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14d1abe1e1Smikeb * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15d1abe1e1Smikeb * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16d1abe1e1Smikeb * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17d1abe1e1Smikeb */ 18d1abe1e1Smikeb 19d1abe1e1Smikeb #include <sys/param.h> 20d732de30Smikeb 21d732de30Smikeb /* Xen requires locked atomic operations */ 22d732de30Smikeb #ifndef MULTIPROCESSOR 23d732de30Smikeb #define _XENMPATOMICS 24d732de30Smikeb #define MULTIPROCESSOR 25d732de30Smikeb #endif 26d732de30Smikeb #include <sys/atomic.h> 27d732de30Smikeb #ifdef _XENMPATOMICS 28d732de30Smikeb #undef MULTIPROCESSOR 29d732de30Smikeb #undef _XENMPATOMICS 30d732de30Smikeb #endif 31d732de30Smikeb 32d1abe1e1Smikeb #include <sys/systm.h> 3349e584c0Smikeb #include <sys/proc.h> 341d84a0c2Smikeb #include <sys/refcnt.h> 35d1abe1e1Smikeb #include <sys/malloc.h> 362e3b9c72Smikeb #include <sys/stdint.h> 37d1abe1e1Smikeb #include <sys/device.h> 38da2beeadSmikeb #include <sys/task.h> 39d1abe1e1Smikeb 40d1abe1e1Smikeb #include <machine/bus.h> 41d1abe1e1Smikeb #include <machine/cpu.h> 42d1abe1e1Smikeb #include <machine/cpufunc.h> 43d1abe1e1Smikeb 44d1abe1e1Smikeb #include <uvm/uvm_extern.h> 45d1abe1e1Smikeb 46a72ee3f4Smikeb #include <machine/i82489var.h> 47a72ee3f4Smikeb 48d1abe1e1Smikeb #include <dev/pv/pvvar.h> 495f2f4b1aSreyk #include <dev/pv/pvreg.h> 5068f1c940Smikeb #include <dev/pv/xenreg.h> 51d1abe1e1Smikeb #include <dev/pv/xenvar.h> 52d1abe1e1Smikeb 53df0a9447Smikeb /* #define XEN_DEBUG */ 54df0a9447Smikeb 55df0a9447Smikeb #ifdef XEN_DEBUG 56df0a9447Smikeb #define DPRINTF(x...) printf(x) 57df0a9447Smikeb #else 58df0a9447Smikeb #define DPRINTF(x...) 59df0a9447Smikeb #endif 60df0a9447Smikeb 61d1abe1e1Smikeb struct xen_softc *xen_sc; 62d1abe1e1Smikeb 6368f1c940Smikeb int xen_init_hypercall(struct xen_softc *); 6468f1c940Smikeb int xen_getfeatures(struct xen_softc *); 65c718e78eSmikeb int xen_init_info_page(struct xen_softc *); 66a72ee3f4Smikeb int xen_init_cbvec(struct xen_softc *); 6789acfff7Smikeb int xen_init_interrupts(struct xen_softc *); 68e33c817bSmikeb void xen_intr_dispatch(void *); 695bcc4d11Smikeb int xen_init_grant_tables(struct xen_softc *); 705bcc4d11Smikeb struct xen_gntent * 715bcc4d11Smikeb xen_grant_table_grow(struct xen_softc *); 725bcc4d11Smikeb int xen_grant_table_alloc(struct xen_softc *, grant_ref_t *); 735bcc4d11Smikeb void xen_grant_table_free(struct xen_softc *, grant_ref_t); 74f0d92e41Smikeb void xen_grant_table_enter(struct xen_softc *, grant_ref_t, paddr_t, 75f0d92e41Smikeb int, int); 765bcc4d11Smikeb void xen_grant_table_remove(struct xen_softc *, grant_ref_t); 773d9f0c11Smikeb void xen_disable_emulated_devices(struct xen_softc *); 7868f1c940Smikeb 79d1abe1e1Smikeb int xen_match(struct device *, void *, void *); 80d1abe1e1Smikeb void xen_attach(struct device *, struct device *, void *); 81ef89f9e6Smpi void xen_deferred(struct device *); 8249e584c0Smikeb void xen_control(void *); 832e3b9c72Smikeb void xen_hotplug(void *); 84d1abe1e1Smikeb void xen_resume(struct device *); 85d1abe1e1Smikeb int xen_activate(struct device *, int); 862e3b9c72Smikeb int xen_attach_device(struct xen_softc *, struct xen_devlist *, 872e3b9c72Smikeb const char *, const char *); 886ebc0397Smikeb int xen_probe_devices(struct xen_softc *); 89d1abe1e1Smikeb 905bcc4d11Smikeb int xen_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t, 915bcc4d11Smikeb bus_size_t, int, bus_dmamap_t *); 925bcc4d11Smikeb void xen_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 935bcc4d11Smikeb int xen_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, 945bcc4d11Smikeb struct proc *, int); 955bcc4d11Smikeb int xen_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, 965bcc4d11Smikeb int); 975bcc4d11Smikeb void xen_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 980cd4087eSmikeb void xen_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 990cd4087eSmikeb bus_size_t, int); 1005bcc4d11Smikeb 101c0d9ed9dSmikeb int xs_attach(struct xen_softc *); 102c0d9ed9dSmikeb 103e96533f7Smikeb struct cfdriver xen_cd = { 104d1abe1e1Smikeb NULL, "xen", DV_DULL 105d1abe1e1Smikeb }; 106d1abe1e1Smikeb 107d1abe1e1Smikeb const struct cfattach xen_ca = { 108d1abe1e1Smikeb sizeof(struct xen_softc), xen_match, xen_attach, NULL, xen_activate 109d1abe1e1Smikeb }; 110d1abe1e1Smikeb 1115bcc4d11Smikeb struct bus_dma_tag xen_bus_dma_tag = { 1125bcc4d11Smikeb NULL, 1135bcc4d11Smikeb xen_bus_dmamap_create, 1145bcc4d11Smikeb xen_bus_dmamap_destroy, 1155bcc4d11Smikeb xen_bus_dmamap_load, 1165bcc4d11Smikeb xen_bus_dmamap_load_mbuf, 1175bcc4d11Smikeb NULL, 1185bcc4d11Smikeb NULL, 1195bcc4d11Smikeb xen_bus_dmamap_unload, 1200cd4087eSmikeb xen_bus_dmamap_sync, 1215bcc4d11Smikeb _bus_dmamem_alloc, 1225bcc4d11Smikeb NULL, 1235bcc4d11Smikeb _bus_dmamem_free, 1245bcc4d11Smikeb _bus_dmamem_map, 1255bcc4d11Smikeb _bus_dmamem_unmap, 1265bcc4d11Smikeb NULL, 1275bcc4d11Smikeb }; 1285bcc4d11Smikeb 129d1abe1e1Smikeb int 130d1abe1e1Smikeb xen_match(struct device *parent, void *match, void *aux) 131d1abe1e1Smikeb { 132d1abe1e1Smikeb struct pv_attach_args *pva = aux; 133d1abe1e1Smikeb struct pvbus_hv *hv = &pva->pva_hv[PVBUS_XEN]; 134d1abe1e1Smikeb 135d1abe1e1Smikeb if (hv->hv_base == 0) 136d1abe1e1Smikeb return (0); 137d1abe1e1Smikeb 138d1abe1e1Smikeb return (1); 139d1abe1e1Smikeb } 140d1abe1e1Smikeb 141d1abe1e1Smikeb void 142d1abe1e1Smikeb xen_attach(struct device *parent, struct device *self, void *aux) 143d1abe1e1Smikeb { 144d1abe1e1Smikeb struct pv_attach_args *pva = (struct pv_attach_args *)aux; 145d1abe1e1Smikeb struct pvbus_hv *hv = &pva->pva_hv[PVBUS_XEN]; 146d1abe1e1Smikeb struct xen_softc *sc = (struct xen_softc *)self; 147d1abe1e1Smikeb 148d1abe1e1Smikeb sc->sc_base = hv->hv_base; 149e0053cc6Smikeb sc->sc_dmat = pva->pva_dmat; 150d1abe1e1Smikeb 15168f1c940Smikeb if (xen_init_hypercall(sc)) 15268f1c940Smikeb return; 15368f1c940Smikeb 154d1abe1e1Smikeb /* Wire it up to the global */ 155d1abe1e1Smikeb xen_sc = sc; 15668f1c940Smikeb 15768f1c940Smikeb if (xen_getfeatures(sc)) 15868f1c940Smikeb return; 159c718e78eSmikeb 160c718e78eSmikeb if (xen_init_info_page(sc)) 161c718e78eSmikeb return; 162a72ee3f4Smikeb 163a72ee3f4Smikeb xen_init_cbvec(sc); 16489acfff7Smikeb 16589acfff7Smikeb if (xen_init_interrupts(sc)) 16689acfff7Smikeb return; 16789acfff7Smikeb 1685bcc4d11Smikeb if (xen_init_grant_tables(sc)) 1695bcc4d11Smikeb return; 1705bcc4d11Smikeb 171c0d9ed9dSmikeb if (xs_attach(sc)) 172c0d9ed9dSmikeb return; 173c0d9ed9dSmikeb 1746ebc0397Smikeb xen_probe_devices(sc); 1756ebc0397Smikeb 176bf6e4a06Sreyk /* pvbus(4) key/value interface */ 177bf6e4a06Sreyk hv->hv_kvop = xs_kvop; 178bf6e4a06Sreyk hv->hv_arg = sc; 179bf6e4a06Sreyk 1804ac18c3eSmikeb xen_disable_emulated_devices(sc); 1814ac18c3eSmikeb 182ef89f9e6Smpi config_mountroot(self, xen_deferred); 18389acfff7Smikeb } 18489acfff7Smikeb 18589acfff7Smikeb void 186ef89f9e6Smpi xen_deferred(struct device *self) 18789acfff7Smikeb { 188ef89f9e6Smpi struct xen_softc *sc = (struct xen_softc *)self; 18989acfff7Smikeb 1904ac18c3eSmikeb if (!(sc->sc_flags & XSF_CBVEC)) { 19189acfff7Smikeb DPRINTF("%s: callback vector hasn't been established\n", 19289acfff7Smikeb sc->sc_dev.dv_xname); 19389acfff7Smikeb return; 19489acfff7Smikeb } 19589acfff7Smikeb 19689acfff7Smikeb xen_intr_enable(); 19749e584c0Smikeb 19849e584c0Smikeb if (xs_watch(sc, "control", "shutdown", &sc->sc_ctltsk, 19949e584c0Smikeb xen_control, sc)) 20049e584c0Smikeb printf("%s: failed to setup shutdown control watch\n", 20149e584c0Smikeb sc->sc_dev.dv_xname); 20249e584c0Smikeb } 20349e584c0Smikeb 20449e584c0Smikeb void 20549e584c0Smikeb xen_control(void *arg) 20649e584c0Smikeb { 20749e584c0Smikeb struct xen_softc *sc = arg; 20849e584c0Smikeb struct xs_transaction xst; 20949e584c0Smikeb char action[128]; 2100b43c9caSmikeb int error; 21149e584c0Smikeb 21249e584c0Smikeb memset(&xst, 0, sizeof(xst)); 21349e584c0Smikeb xst.xst_id = 0; 2149673213eSmikeb xst.xst_cookie = sc->sc_xs; 21549e584c0Smikeb 2160b43c9caSmikeb error = xs_getprop(sc, "control", "shutdown", action, sizeof(action)); 2170b43c9caSmikeb if (error) { 2180b43c9caSmikeb if (error != ENOENT) 21949e584c0Smikeb printf("%s: failed to process control event\n", 22049e584c0Smikeb sc->sc_dev.dv_xname); 22149e584c0Smikeb return; 22249e584c0Smikeb } 22349e584c0Smikeb 22449e584c0Smikeb if (strlen(action) == 0) 22549e584c0Smikeb return; 22649e584c0Smikeb 2270b43c9caSmikeb /* Acknowledge the event */ 2280b43c9caSmikeb xs_setprop(sc, "control", "shutdown", "", 0); 2290b43c9caSmikeb 23049e584c0Smikeb if (strcmp(action, "halt") == 0 || strcmp(action, "poweroff") == 0) { 2310f265ed3Sreyk pvbus_shutdown(&sc->sc_dev); 23249e584c0Smikeb } else if (strcmp(action, "reboot") == 0) { 2330f265ed3Sreyk pvbus_reboot(&sc->sc_dev); 23449e584c0Smikeb } else if (strcmp(action, "crash") == 0) { 23549e584c0Smikeb panic("xen told us to do this"); 23649e584c0Smikeb } else if (strcmp(action, "suspend") == 0) { 23749e584c0Smikeb /* Not implemented yet */ 23849e584c0Smikeb } else { 23949e584c0Smikeb printf("%s: unknown shutdown event \"%s\"\n", 24049e584c0Smikeb sc->sc_dev.dv_xname, action); 24149e584c0Smikeb } 242d1abe1e1Smikeb } 243d1abe1e1Smikeb 244d1abe1e1Smikeb void 245d1abe1e1Smikeb xen_resume(struct device *self) 246d1abe1e1Smikeb { 247d1abe1e1Smikeb } 248d1abe1e1Smikeb 249d1abe1e1Smikeb int 250d1abe1e1Smikeb xen_activate(struct device *self, int act) 251d1abe1e1Smikeb { 252d1abe1e1Smikeb int rv = 0; 253d1abe1e1Smikeb 254d1abe1e1Smikeb switch (act) { 255d1abe1e1Smikeb case DVACT_RESUME: 256d1abe1e1Smikeb xen_resume(self); 257d1abe1e1Smikeb break; 258d1abe1e1Smikeb } 259d1abe1e1Smikeb return (rv); 260d1abe1e1Smikeb } 26168f1c940Smikeb 26268f1c940Smikeb int 26368f1c940Smikeb xen_init_hypercall(struct xen_softc *sc) 26468f1c940Smikeb { 26568f1c940Smikeb extern void *xen_hypercall_page; 26668f1c940Smikeb uint32_t regs[4]; 26768f1c940Smikeb paddr_t pa; 26868f1c940Smikeb 26968f1c940Smikeb /* Get hypercall page configuration MSR */ 27068f1c940Smikeb CPUID(sc->sc_base + CPUID_OFFSET_XEN_HYPERCALL, 27168f1c940Smikeb regs[0], regs[1], regs[2], regs[3]); 27268f1c940Smikeb 27368f1c940Smikeb /* We don't support more than one hypercall page */ 27468f1c940Smikeb if (regs[0] != 1) { 27587eabc7cSmikeb printf(": requested %u hypercall pages\n", regs[0]); 27668f1c940Smikeb return (-1); 27768f1c940Smikeb } 27868f1c940Smikeb 27968f1c940Smikeb sc->sc_hc = &xen_hypercall_page; 28068f1c940Smikeb 28168f1c940Smikeb if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_hc, &pa)) { 282161168f5Smikeb printf(": hypercall page PA extraction failed\n"); 28368f1c940Smikeb return (-1); 28468f1c940Smikeb } 28568f1c940Smikeb wrmsr(regs[1], pa); 28668f1c940Smikeb 28768f1c940Smikeb return (0); 28868f1c940Smikeb } 28968f1c940Smikeb 29068f1c940Smikeb int 29168f1c940Smikeb xen_hypercall(struct xen_softc *sc, int op, int argc, ...) 29268f1c940Smikeb { 29368f1c940Smikeb va_list ap; 29468f1c940Smikeb ulong argv[5]; 29568f1c940Smikeb int i; 29668f1c940Smikeb 29768f1c940Smikeb if (argc < 0 || argc > 5) 29868f1c940Smikeb return (-1); 29968f1c940Smikeb va_start(ap, argc); 30068f1c940Smikeb for (i = 0; i < argc; i++) 30168f1c940Smikeb argv[i] = (ulong)va_arg(ap, ulong); 302cd9bddd2Smikeb va_end(ap); 30368f1c940Smikeb return (xen_hypercallv(sc, op, argc, argv)); 30468f1c940Smikeb } 30568f1c940Smikeb 30668f1c940Smikeb int 30768f1c940Smikeb xen_hypercallv(struct xen_softc *sc, int op, int argc, ulong *argv) 30868f1c940Smikeb { 30968f1c940Smikeb ulong hcall; 31068f1c940Smikeb int rv = 0; 31168f1c940Smikeb 31268f1c940Smikeb hcall = (ulong)sc->sc_hc + op * 32; 31368f1c940Smikeb 31468f1c940Smikeb #if defined(XEN_DEBUG) && disabled 31568f1c940Smikeb { 31668f1c940Smikeb int i; 31768f1c940Smikeb 31868f1c940Smikeb printf("hypercall %d", op); 31968f1c940Smikeb if (argc > 0) { 32068f1c940Smikeb printf(", args {"); 32168f1c940Smikeb for (i = 0; i < argc; i++) 32268f1c940Smikeb printf(" %#lx", argv[i]); 32368f1c940Smikeb printf(" }\n"); 32468f1c940Smikeb } else 32568f1c940Smikeb printf("\n"); 32668f1c940Smikeb } 32768f1c940Smikeb #endif 32868f1c940Smikeb 32968f1c940Smikeb switch (argc) { 33068f1c940Smikeb case 0: { 33168f1c940Smikeb HYPERCALL_RES1; 33268f1c940Smikeb __asm__ volatile ( \ 33368f1c940Smikeb HYPERCALL_LABEL \ 33468f1c940Smikeb : HYPERCALL_OUT1 \ 33568f1c940Smikeb : HYPERCALL_PTR(hcall) \ 33668f1c940Smikeb : HYPERCALL_CLOBBER \ 33768f1c940Smikeb ); 33868f1c940Smikeb HYPERCALL_RET(rv); 33968f1c940Smikeb break; 34068f1c940Smikeb } 34168f1c940Smikeb case 1: { 34268f1c940Smikeb HYPERCALL_RES1; HYPERCALL_RES2; 34368f1c940Smikeb HYPERCALL_ARG1(argv[0]); 34468f1c940Smikeb __asm__ volatile ( \ 34568f1c940Smikeb HYPERCALL_LABEL \ 34668f1c940Smikeb : HYPERCALL_OUT1 HYPERCALL_OUT2 \ 34768f1c940Smikeb : HYPERCALL_IN1 \ 34868f1c940Smikeb , HYPERCALL_PTR(hcall) \ 34968f1c940Smikeb : HYPERCALL_CLOBBER \ 35068f1c940Smikeb ); 35168f1c940Smikeb HYPERCALL_RET(rv); 35268f1c940Smikeb break; 35368f1c940Smikeb } 35468f1c940Smikeb case 2: { 35568f1c940Smikeb HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3; 35668f1c940Smikeb HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]); 35768f1c940Smikeb __asm__ volatile ( \ 35868f1c940Smikeb HYPERCALL_LABEL \ 35968f1c940Smikeb : HYPERCALL_OUT1 HYPERCALL_OUT2 \ 36068f1c940Smikeb HYPERCALL_OUT3 \ 36168f1c940Smikeb : HYPERCALL_IN1 HYPERCALL_IN2 \ 36268f1c940Smikeb , HYPERCALL_PTR(hcall) \ 36368f1c940Smikeb : HYPERCALL_CLOBBER \ 36468f1c940Smikeb ); 36568f1c940Smikeb HYPERCALL_RET(rv); 36668f1c940Smikeb break; 36768f1c940Smikeb } 36868f1c940Smikeb case 3: { 36968f1c940Smikeb HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3; 37068f1c940Smikeb HYPERCALL_RES4; 37168f1c940Smikeb HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]); 37268f1c940Smikeb HYPERCALL_ARG3(argv[2]); 37368f1c940Smikeb __asm__ volatile ( \ 37468f1c940Smikeb HYPERCALL_LABEL \ 37568f1c940Smikeb : HYPERCALL_OUT1 HYPERCALL_OUT2 \ 37668f1c940Smikeb HYPERCALL_OUT3 HYPERCALL_OUT4 \ 37768f1c940Smikeb : HYPERCALL_IN1 HYPERCALL_IN2 \ 37868f1c940Smikeb HYPERCALL_IN3 \ 37968f1c940Smikeb , HYPERCALL_PTR(hcall) \ 38068f1c940Smikeb : HYPERCALL_CLOBBER \ 38168f1c940Smikeb ); 38268f1c940Smikeb HYPERCALL_RET(rv); 38368f1c940Smikeb break; 38468f1c940Smikeb } 38568f1c940Smikeb case 4: { 38668f1c940Smikeb HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3; 38768f1c940Smikeb HYPERCALL_RES4; HYPERCALL_RES5; 38868f1c940Smikeb HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]); 38968f1c940Smikeb HYPERCALL_ARG3(argv[2]); HYPERCALL_ARG4(argv[3]); 39068f1c940Smikeb __asm__ volatile ( \ 39168f1c940Smikeb HYPERCALL_LABEL \ 39268f1c940Smikeb : HYPERCALL_OUT1 HYPERCALL_OUT2 \ 39368f1c940Smikeb HYPERCALL_OUT3 HYPERCALL_OUT4 \ 39468f1c940Smikeb HYPERCALL_OUT5 \ 39568f1c940Smikeb : HYPERCALL_IN1 HYPERCALL_IN2 \ 39668f1c940Smikeb HYPERCALL_IN3 HYPERCALL_IN4 \ 39768f1c940Smikeb , HYPERCALL_PTR(hcall) \ 39868f1c940Smikeb : HYPERCALL_CLOBBER \ 39968f1c940Smikeb ); 40068f1c940Smikeb HYPERCALL_RET(rv); 40168f1c940Smikeb break; 40268f1c940Smikeb } 40368f1c940Smikeb case 5: { 40468f1c940Smikeb HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3; 40568f1c940Smikeb HYPERCALL_RES4; HYPERCALL_RES5; HYPERCALL_RES6; 40668f1c940Smikeb HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]); 40768f1c940Smikeb HYPERCALL_ARG3(argv[2]); HYPERCALL_ARG4(argv[3]); 40868f1c940Smikeb HYPERCALL_ARG5(argv[4]); 40968f1c940Smikeb __asm__ volatile ( \ 41068f1c940Smikeb HYPERCALL_LABEL \ 41168f1c940Smikeb : HYPERCALL_OUT1 HYPERCALL_OUT2 \ 41268f1c940Smikeb HYPERCALL_OUT3 HYPERCALL_OUT4 \ 41368f1c940Smikeb HYPERCALL_OUT5 HYPERCALL_OUT6 \ 41468f1c940Smikeb : HYPERCALL_IN1 HYPERCALL_IN2 \ 41568f1c940Smikeb HYPERCALL_IN3 HYPERCALL_IN4 \ 41668f1c940Smikeb HYPERCALL_IN5 \ 41768f1c940Smikeb , HYPERCALL_PTR(hcall) \ 41868f1c940Smikeb : HYPERCALL_CLOBBER \ 41968f1c940Smikeb ); 42068f1c940Smikeb HYPERCALL_RET(rv); 42168f1c940Smikeb break; 42268f1c940Smikeb } 42368f1c940Smikeb default: 42468f1c940Smikeb DPRINTF("%s: wrong number of arguments: %d\n", __func__, argc); 42568f1c940Smikeb rv = -1; 42668f1c940Smikeb break; 42768f1c940Smikeb } 42868f1c940Smikeb return (rv); 42968f1c940Smikeb } 43068f1c940Smikeb 43168f1c940Smikeb int 43268f1c940Smikeb xen_getfeatures(struct xen_softc *sc) 43368f1c940Smikeb { 43468f1c940Smikeb struct xen_feature_info xfi; 43568f1c940Smikeb 43668f1c940Smikeb memset(&xfi, 0, sizeof(xfi)); 437161168f5Smikeb if (xen_hypercall(sc, XC_VERSION, 2, XENVER_get_features, &xfi) < 0) { 438161168f5Smikeb printf(": failed to fetch features\n"); 43968f1c940Smikeb return (-1); 44068f1c940Smikeb } 44168f1c940Smikeb sc->sc_features = xfi.submap; 442161168f5Smikeb #ifdef XEN_DEBUG 443161168f5Smikeb printf(": features %b", sc->sc_features, 44468f1c940Smikeb "\20\014DOM0\013PIRQ\012PVCLOCK\011CBVEC\010GNTFLAGS\007HMA" 44568f1c940Smikeb "\006PTUPD\005PAE4G\004SUPERVISOR\003AUTOPMAP\002WDT\001WPT"); 446161168f5Smikeb #else 447161168f5Smikeb printf(": features %#x", sc->sc_features); 448161168f5Smikeb #endif 44968f1c940Smikeb return (0); 45068f1c940Smikeb } 451c718e78eSmikeb 452c718e78eSmikeb #ifdef XEN_DEBUG 453c718e78eSmikeb void 454c718e78eSmikeb xen_print_info_page(void) 455c718e78eSmikeb { 456c718e78eSmikeb struct xen_softc *sc = xen_sc; 457c718e78eSmikeb struct shared_info *s = sc->sc_ipg; 458c718e78eSmikeb struct vcpu_info *v; 459c718e78eSmikeb int i; 460c718e78eSmikeb 4613248cb44Smikeb virtio_membar_sync(); 462c718e78eSmikeb for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) { 463c718e78eSmikeb v = &s->vcpu_info[i]; 464c718e78eSmikeb if (!v->evtchn_upcall_pending && !v->evtchn_upcall_mask && 465c718e78eSmikeb !v->evtchn_pending_sel && !v->time.version && 466c718e78eSmikeb !v->time.tsc_timestamp && !v->time.system_time && 467c718e78eSmikeb !v->time.tsc_to_system_mul && !v->time.tsc_shift) 468c718e78eSmikeb continue; 469c718e78eSmikeb printf("vcpu%d:\n" 470c718e78eSmikeb " upcall_pending=%02x upcall_mask=%02x pending_sel=%#lx\n" 471c718e78eSmikeb " time version=%u tsc=%llu system=%llu\n" 4729e8bc638Smikeb " time mul=%u shift=%d\n", 4739e8bc638Smikeb i, v->evtchn_upcall_pending, v->evtchn_upcall_mask, 474c718e78eSmikeb v->evtchn_pending_sel, v->time.version, 475c718e78eSmikeb v->time.tsc_timestamp, v->time.system_time, 476c718e78eSmikeb v->time.tsc_to_system_mul, v->time.tsc_shift); 477c718e78eSmikeb } 478c718e78eSmikeb printf("pending events: "); 479c718e78eSmikeb for (i = 0; i < nitems(s->evtchn_pending); i++) { 480c718e78eSmikeb if (s->evtchn_pending[i] == 0) 481c718e78eSmikeb continue; 482c718e78eSmikeb printf(" %d:%#lx", i, s->evtchn_pending[i]); 483c718e78eSmikeb } 484c718e78eSmikeb printf("\nmasked events: "); 485c718e78eSmikeb for (i = 0; i < nitems(s->evtchn_mask); i++) { 486c718e78eSmikeb if (s->evtchn_mask[i] == 0xffffffffffffffffULL) 487c718e78eSmikeb continue; 488c718e78eSmikeb printf(" %d:%#lx", i, s->evtchn_mask[i]); 489c718e78eSmikeb } 490c718e78eSmikeb printf("\nwc ver=%u sec=%u nsec=%u\n", s->wc_version, s->wc_sec, 491c718e78eSmikeb s->wc_nsec); 492c718e78eSmikeb printf("arch maxpfn=%lu framelist=%lu nmi=%lu\n", s->arch.max_pfn, 493c718e78eSmikeb s->arch.pfn_to_mfn_frame_list, s->arch.nmi_reason); 494c718e78eSmikeb } 495c718e78eSmikeb #endif /* XEN_DEBUG */ 496c718e78eSmikeb 497c718e78eSmikeb int 498c718e78eSmikeb xen_init_info_page(struct xen_softc *sc) 499c718e78eSmikeb { 500c718e78eSmikeb struct xen_add_to_physmap xatp; 501c718e78eSmikeb paddr_t pa; 502c718e78eSmikeb 503c718e78eSmikeb sc->sc_ipg = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO); 504c718e78eSmikeb if (sc->sc_ipg == NULL) { 505161168f5Smikeb printf(": failed to allocate shared info page\n"); 506c718e78eSmikeb return (-1); 507c718e78eSmikeb } 508c718e78eSmikeb if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_ipg, &pa)) { 509161168f5Smikeb printf(": shared info page PA extraction failed\n"); 510c718e78eSmikeb free(sc->sc_ipg, M_DEVBUF, PAGE_SIZE); 511c718e78eSmikeb return (-1); 512c718e78eSmikeb } 513c718e78eSmikeb xatp.domid = DOMID_SELF; 514c718e78eSmikeb xatp.idx = 0; 515c718e78eSmikeb xatp.space = XENMAPSPACE_shared_info; 516c718e78eSmikeb xatp.gpfn = atop(pa); 517f4354c37Smikeb if (xen_hypercall(sc, XC_MEMORY, 2, XENMEM_add_to_physmap, &xatp)) { 518161168f5Smikeb printf(": failed to register shared info page\n"); 519c718e78eSmikeb free(sc->sc_ipg, M_DEVBUF, PAGE_SIZE); 520c718e78eSmikeb return (-1); 521c718e78eSmikeb } 522c718e78eSmikeb return (0); 523c718e78eSmikeb } 524a72ee3f4Smikeb 525a72ee3f4Smikeb int 526a72ee3f4Smikeb xen_init_cbvec(struct xen_softc *sc) 527a72ee3f4Smikeb { 528a72ee3f4Smikeb struct xen_hvm_param xhp; 529a72ee3f4Smikeb 530a72ee3f4Smikeb if ((sc->sc_features & XENFEAT_CBVEC) == 0) 531a72ee3f4Smikeb return (ENOENT); 532a72ee3f4Smikeb 533a72ee3f4Smikeb xhp.domid = DOMID_SELF; 534a72ee3f4Smikeb xhp.index = HVM_PARAM_CALLBACK_IRQ; 535a72ee3f4Smikeb xhp.value = HVM_CALLBACK_VECTOR(LAPIC_XEN_VECTOR); 536f4354c37Smikeb if (xen_hypercall(sc, XC_HVM, 2, HVMOP_set_param, &xhp)) { 537a72ee3f4Smikeb /* Will retry with the xspd(4) PCI interrupt */ 538a72ee3f4Smikeb return (ENOENT); 539a72ee3f4Smikeb } 540161168f5Smikeb DPRINTF(", idtvec %d", LAPIC_XEN_VECTOR); 541a72ee3f4Smikeb 5424ac18c3eSmikeb sc->sc_flags |= XSF_CBVEC; 543a72ee3f4Smikeb 544a72ee3f4Smikeb return (0); 545a72ee3f4Smikeb } 546a72ee3f4Smikeb 54789acfff7Smikeb int 54889acfff7Smikeb xen_init_interrupts(struct xen_softc *sc) 54989acfff7Smikeb { 55089acfff7Smikeb int i; 55189acfff7Smikeb 55289acfff7Smikeb sc->sc_irq = LAPIC_XEN_VECTOR; 55389acfff7Smikeb 55489acfff7Smikeb /* 55589acfff7Smikeb * Clear all pending events and mask all interrupts 55689acfff7Smikeb */ 55789acfff7Smikeb for (i = 0; i < nitems(sc->sc_ipg->evtchn_pending); i++) { 55889acfff7Smikeb sc->sc_ipg->evtchn_pending[i] = 0; 55989acfff7Smikeb sc->sc_ipg->evtchn_mask[i] = ~0UL; 56089acfff7Smikeb } 56189acfff7Smikeb 56289acfff7Smikeb SLIST_INIT(&sc->sc_intrs); 56389acfff7Smikeb 5641d84a0c2Smikeb mtx_init(&sc->sc_islck, IPL_NET); 5651d84a0c2Smikeb 56689acfff7Smikeb return (0); 56789acfff7Smikeb } 56889acfff7Smikeb 5694a7dd08fSmikeb static int 5704a7dd08fSmikeb xen_evtchn_hypercall(struct xen_softc *sc, int cmd, void *arg, size_t len) 5714a7dd08fSmikeb { 5724a7dd08fSmikeb struct evtchn_op compat; 5734a7dd08fSmikeb int error; 5744a7dd08fSmikeb 5754a7dd08fSmikeb error = xen_hypercall(sc, XC_EVTCHN, 2, cmd, arg); 5764a7dd08fSmikeb if (error == -ENOXENSYS) { 5774a7dd08fSmikeb memset(&compat, 0, sizeof(compat)); 5784a7dd08fSmikeb compat.cmd = cmd; 5794a7dd08fSmikeb memcpy(&compat.u, arg, len); 5804a7dd08fSmikeb error = xen_hypercall(sc, XC_OEVTCHN, 1, &compat); 5814a7dd08fSmikeb } 5824a7dd08fSmikeb return (error); 5834a7dd08fSmikeb } 5844a7dd08fSmikeb 5851d84a0c2Smikeb static inline void 5861d84a0c2Smikeb xen_intsrc_add(struct xen_softc *sc, struct xen_intsrc *xi) 5871d84a0c2Smikeb { 5881d84a0c2Smikeb refcnt_init(&xi->xi_refcnt); 5891d84a0c2Smikeb mtx_enter(&sc->sc_islck); 5901d84a0c2Smikeb SLIST_INSERT_HEAD(&sc->sc_intrs, xi, xi_entry); 5911d84a0c2Smikeb mtx_leave(&sc->sc_islck); 5921d84a0c2Smikeb } 5931d84a0c2Smikeb 59489acfff7Smikeb static inline struct xen_intsrc * 5951d84a0c2Smikeb xen_intsrc_acquire(struct xen_softc *sc, evtchn_port_t port) 59689acfff7Smikeb { 597220dd317Smikeb struct xen_intsrc *xi = NULL; 59889acfff7Smikeb 5991d84a0c2Smikeb mtx_enter(&sc->sc_islck); 6001d84a0c2Smikeb SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry) { 6011d84a0c2Smikeb if (xi->xi_port == port) { 6021d84a0c2Smikeb refcnt_take(&xi->xi_refcnt); 60389acfff7Smikeb break; 6041d84a0c2Smikeb } 6051d84a0c2Smikeb } 6061d84a0c2Smikeb mtx_leave(&sc->sc_islck); 6071d84a0c2Smikeb return (xi); 6081d84a0c2Smikeb } 6091d84a0c2Smikeb 6101d84a0c2Smikeb static inline void 6111d84a0c2Smikeb xen_intsrc_release(struct xen_softc *sc, struct xen_intsrc *xi) 6121d84a0c2Smikeb { 6131d84a0c2Smikeb refcnt_rele_wake(&xi->xi_refcnt); 6141d84a0c2Smikeb } 6151d84a0c2Smikeb 6161d84a0c2Smikeb static inline struct xen_intsrc * 6171d84a0c2Smikeb xen_intsrc_remove(struct xen_softc *sc, evtchn_port_t port) 6181d84a0c2Smikeb { 6191d84a0c2Smikeb struct xen_intsrc *xi; 6201d84a0c2Smikeb 6211d84a0c2Smikeb mtx_enter(&sc->sc_islck); 6221d84a0c2Smikeb SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry) { 6231d84a0c2Smikeb if (xi->xi_port == port) { 6241d84a0c2Smikeb SLIST_REMOVE(&sc->sc_intrs, xi, xen_intsrc, xi_entry); 6251d84a0c2Smikeb break; 6261d84a0c2Smikeb } 6271d84a0c2Smikeb } 6281d84a0c2Smikeb mtx_leave(&sc->sc_islck); 6291d84a0c2Smikeb if (xi != NULL) 6301d84a0c2Smikeb refcnt_finalize(&xi->xi_refcnt, "xenisrm"); 63189acfff7Smikeb return (xi); 63289acfff7Smikeb } 63389acfff7Smikeb 634e33c817bSmikeb static inline void 635e33c817bSmikeb xen_intr_mask_acquired(struct xen_softc *sc, struct xen_intsrc *xi) 636e33c817bSmikeb { 637e33c817bSmikeb xi->xi_masked = 1; 638e33c817bSmikeb set_bit(xi->xi_port, &sc->sc_ipg->evtchn_mask[0]); 639e33c817bSmikeb } 640e33c817bSmikeb 641e33c817bSmikeb static inline int 642e33c817bSmikeb xen_intr_unmask_release(struct xen_softc *sc, struct xen_intsrc *xi) 643e33c817bSmikeb { 644e33c817bSmikeb struct evtchn_unmask eu; 645e33c817bSmikeb 646e33c817bSmikeb xi->xi_masked = 0; 6470296da5aSmikeb if (!test_bit(xi->xi_port, &sc->sc_ipg->evtchn_mask[0])) { 6480296da5aSmikeb xen_intsrc_release(sc, xi); 649e33c817bSmikeb return (0); 6500296da5aSmikeb } 651e33c817bSmikeb eu.port = xi->xi_port; 652e33c817bSmikeb xen_intsrc_release(sc, xi); 653cd638ebfSmikeb return (xen_evtchn_hypercall(sc, EVTCHNOP_unmask, &eu, sizeof(eu))); 654e33c817bSmikeb } 655e33c817bSmikeb 65689acfff7Smikeb void 65789acfff7Smikeb xen_intr_ack(void) 65889acfff7Smikeb { 65989acfff7Smikeb struct xen_softc *sc = xen_sc; 66089acfff7Smikeb struct shared_info *s = sc->sc_ipg; 66189acfff7Smikeb struct cpu_info *ci = curcpu(); 66289acfff7Smikeb struct vcpu_info *v = &s->vcpu_info[CPU_INFO_UNIT(ci)]; 66389acfff7Smikeb 66489acfff7Smikeb v->evtchn_upcall_pending = 0; 6653248cb44Smikeb virtio_membar_sync(); 66689acfff7Smikeb } 66789acfff7Smikeb 668a72ee3f4Smikeb void 669a72ee3f4Smikeb xen_intr(void) 670a72ee3f4Smikeb { 67189acfff7Smikeb struct xen_softc *sc = xen_sc; 67289acfff7Smikeb struct xen_intsrc *xi; 67389acfff7Smikeb struct shared_info *s = sc->sc_ipg; 67489acfff7Smikeb struct cpu_info *ci = curcpu(); 67589acfff7Smikeb struct vcpu_info *v = &s->vcpu_info[CPU_INFO_UNIT(ci)]; 67689acfff7Smikeb ulong pending, selector; 67789acfff7Smikeb int port, bit, row; 67889acfff7Smikeb 67989acfff7Smikeb v->evtchn_upcall_pending = 0; 68089acfff7Smikeb selector = atomic_swap_ulong(&v->evtchn_pending_sel, 0); 68189acfff7Smikeb 68289acfff7Smikeb for (row = 0; selector > 0; selector >>= 1, row++) { 68389acfff7Smikeb if ((selector & 1) == 0) 68489acfff7Smikeb continue; 68530494642Smikeb if ((sc->sc_ipg->evtchn_pending[row] & 68630494642Smikeb ~(sc->sc_ipg->evtchn_mask[row])) == 0) 68730494642Smikeb continue; 68830494642Smikeb pending = atomic_swap_ulong(&sc->sc_ipg->evtchn_pending[row], 68930494642Smikeb 0) & ~(sc->sc_ipg->evtchn_mask[row]); 69089acfff7Smikeb for (bit = 0; pending > 0; pending >>= 1, bit++) { 69189acfff7Smikeb if ((pending & 1) == 0) 69289acfff7Smikeb continue; 69389acfff7Smikeb port = (row * LONG_BIT) + bit; 6941d84a0c2Smikeb if ((xi = xen_intsrc_acquire(sc, port)) == NULL) { 69587eabc7cSmikeb printf("%s: unhandled interrupt on port %d\n", 69687d8d4c2Smikeb sc->sc_dev.dv_xname, port); 69789acfff7Smikeb continue; 69887d8d4c2Smikeb } 69989acfff7Smikeb xi->xi_evcnt.ec_count++; 700e33c817bSmikeb xen_intr_mask_acquired(sc, xi); 70178588113Smikeb if (!task_add(xi->xi_taskq, &xi->xi_task)) 70278588113Smikeb xen_intsrc_release(sc, xi); 70389acfff7Smikeb } 70489acfff7Smikeb } 70589acfff7Smikeb } 70689acfff7Smikeb 70789acfff7Smikeb void 7085c0d44b1Smikeb xen_intr_schedule(xen_intr_handle_t xih) 7095c0d44b1Smikeb { 7105c0d44b1Smikeb struct xen_softc *sc = xen_sc; 7115c0d44b1Smikeb struct xen_intsrc *xi; 7125c0d44b1Smikeb 713220dd317Smikeb if ((xi = xen_intsrc_acquire(sc, (evtchn_port_t)xih)) != NULL) { 71478588113Smikeb xen_intr_mask_acquired(sc, xi); 715220dd317Smikeb if (!task_add(xi->xi_taskq, &xi->xi_task)) 716220dd317Smikeb xen_intsrc_release(sc, xi); 717220dd317Smikeb } 7185c0d44b1Smikeb } 7195c0d44b1Smikeb 7206123be62Smikeb /* 7216123be62Smikeb * This code achieves two goals: 1) makes sure that *after* masking 7223eec3140Sjsg * the interrupt source we're not getting more task_adds: sched_barrier 7236123be62Smikeb * will take care of that, and 2) makes sure that the interrupt task 724e86b66ebSmikeb * has finished executing the current task and won't be called again: 7256123be62Smikeb * it sets up a barrier task to await completion of the current task 7266123be62Smikeb * and relies on the interrupt masking to prevent submission of new 7276123be62Smikeb * tasks in the future. 7286123be62Smikeb */ 7296123be62Smikeb void 7306123be62Smikeb xen_intr_barrier(xen_intr_handle_t xih) 7316123be62Smikeb { 7326123be62Smikeb struct xen_softc *sc = xen_sc; 7336123be62Smikeb struct xen_intsrc *xi; 7346123be62Smikeb 7353eec3140Sjsg sched_barrier(NULL); 7366123be62Smikeb 7376123be62Smikeb if ((xi = xen_intsrc_acquire(sc, (evtchn_port_t)xih)) != NULL) { 7385be04472Smikeb taskq_barrier(xi->xi_taskq); 7396123be62Smikeb xen_intsrc_release(sc, xi); 7406123be62Smikeb } 7416123be62Smikeb } 7426123be62Smikeb 7435c0d44b1Smikeb void 74489acfff7Smikeb xen_intr_signal(xen_intr_handle_t xih) 74589acfff7Smikeb { 74689acfff7Smikeb struct xen_softc *sc = xen_sc; 74789acfff7Smikeb struct xen_intsrc *xi; 74889acfff7Smikeb struct evtchn_send es; 74989acfff7Smikeb 7501d84a0c2Smikeb if ((xi = xen_intsrc_acquire(sc, (evtchn_port_t)xih)) != NULL) { 75189acfff7Smikeb es.port = xi->xi_port; 7521d84a0c2Smikeb xen_intsrc_release(sc, xi); 7534a7dd08fSmikeb xen_evtchn_hypercall(sc, EVTCHNOP_send, &es, sizeof(es)); 75489acfff7Smikeb } 75589acfff7Smikeb } 75689acfff7Smikeb 75789acfff7Smikeb int 758f2105619Smikeb xen_intr_establish(evtchn_port_t port, xen_intr_handle_t *xih, int domain, 75989acfff7Smikeb void (*handler)(void *), void *arg, char *name) 76089acfff7Smikeb { 76189acfff7Smikeb struct xen_softc *sc = xen_sc; 76289acfff7Smikeb struct xen_intsrc *xi; 76389acfff7Smikeb struct evtchn_alloc_unbound eau; 764faf0194cSmikeb #ifdef notyet 76589acfff7Smikeb struct evtchn_bind_vcpu ebv; 76689acfff7Smikeb #endif 767e615459dSmikeb #if defined(XEN_DEBUG) && disabled 76889acfff7Smikeb struct evtchn_status es; 76989acfff7Smikeb #endif 77089acfff7Smikeb 7711d84a0c2Smikeb if (port && (xi = xen_intsrc_acquire(sc, port)) != NULL) { 7721d84a0c2Smikeb xen_intsrc_release(sc, xi); 773e615459dSmikeb DPRINTF("%s: interrupt handler has already been established " 77489acfff7Smikeb "for port %u\n", sc->sc_dev.dv_xname, port); 77589acfff7Smikeb return (-1); 77689acfff7Smikeb } 77789acfff7Smikeb 77889acfff7Smikeb xi = malloc(sizeof(*xi), M_DEVBUF, M_NOWAIT | M_ZERO); 77989acfff7Smikeb if (xi == NULL) 78089acfff7Smikeb return (-1); 78189acfff7Smikeb 78289acfff7Smikeb xi->xi_port = (evtchn_port_t)*xih; 78389acfff7Smikeb 784e33c817bSmikeb xi->xi_handler = handler; 785e33c817bSmikeb xi->xi_ctx = arg; 786e33c817bSmikeb 7875c0d44b1Smikeb xi->xi_taskq = taskq_create(name, 1, IPL_NET, TASKQ_MPSAFE); 7885c0d44b1Smikeb if (!xi->xi_taskq) { 7895c0d44b1Smikeb printf("%s: failed to create interrupt task for %s\n", 7905c0d44b1Smikeb sc->sc_dev.dv_xname, name); 7915c0d44b1Smikeb free(xi, M_DEVBUF, sizeof(*xi)); 7925c0d44b1Smikeb return (-1); 7935c0d44b1Smikeb } 794e33c817bSmikeb task_set(&xi->xi_task, xen_intr_dispatch, xi); 7955c0d44b1Smikeb 79689acfff7Smikeb if (port == 0) { 79789acfff7Smikeb /* We're being asked to allocate a new event port */ 79889acfff7Smikeb memset(&eau, 0, sizeof(eau)); 79989acfff7Smikeb eau.dom = DOMID_SELF; 800f2105619Smikeb eau.remote_dom = domain; 8014a7dd08fSmikeb if (xen_evtchn_hypercall(sc, EVTCHNOP_alloc_unbound, &eau, 8028843ca44Sjsg sizeof(eau)) != 0) { 80389acfff7Smikeb DPRINTF("%s: failed to allocate new event port\n", 80489acfff7Smikeb sc->sc_dev.dv_xname); 80589acfff7Smikeb free(xi, M_DEVBUF, sizeof(*xi)); 80689acfff7Smikeb return (-1); 80789acfff7Smikeb } 80889acfff7Smikeb *xih = xi->xi_port = eau.port; 80989acfff7Smikeb } else { 81089acfff7Smikeb *xih = xi->xi_port = port; 81189acfff7Smikeb /* 81289acfff7Smikeb * The Event Channel API didn't open this port, so it is not 81389acfff7Smikeb * responsible for closing it automatically on unbind. 81489acfff7Smikeb */ 81589acfff7Smikeb xi->xi_noclose = 1; 81689acfff7Smikeb } 81789acfff7Smikeb 81889acfff7Smikeb #ifdef notyet 81989acfff7Smikeb /* Bind interrupt to VCPU#0 */ 82089acfff7Smikeb memset(&ebv, 0, sizeof(ebv)); 82189acfff7Smikeb ebv.port = xi->xi_port; 82289acfff7Smikeb ebv.vcpu = 0; 8238843ca44Sjsg if (xen_evtchn_hypercall(sc, EVTCHNOP_bind_vcpu, &ebv, sizeof(ebv))) { 82489acfff7Smikeb printf("%s: failed to bind interrupt on port %u to vcpu%d\n", 82589acfff7Smikeb sc->sc_dev.dv_xname, ebv.port, ebv.vcpu); 82689acfff7Smikeb } 82789acfff7Smikeb #endif 82889acfff7Smikeb 82989acfff7Smikeb evcount_attach(&xi->xi_evcnt, name, &sc->sc_irq); 83089acfff7Smikeb 8311d84a0c2Smikeb xen_intsrc_add(sc, xi); 83289acfff7Smikeb 833925a4773Smikeb /* Mask the event port */ 834505e9bd0Smikeb set_bit(xi->xi_port, &sc->sc_ipg->evtchn_mask[0]); 835925a4773Smikeb 836e615459dSmikeb #if defined(XEN_DEBUG) && disabled 83789acfff7Smikeb memset(&es, 0, sizeof(es)); 83889acfff7Smikeb es.dom = DOMID_SELF; 83989acfff7Smikeb es.port = xi->xi_port; 8404a7dd08fSmikeb if (xen_evtchn_hypercall(sc, EVTCHNOP_status, &es, sizeof(es))) { 84189acfff7Smikeb printf("%s: failed to obtain status for port %d\n", 84289acfff7Smikeb sc->sc_dev.dv_xname, es.port); 84389acfff7Smikeb } 8449e8bc638Smikeb printf("%s: port %u bound to vcpu%u", sc->sc_dev.dv_xname, 8459e8bc638Smikeb es.port, es.vcpu); 84689acfff7Smikeb if (es.status == EVTCHNSTAT_interdomain) 84789acfff7Smikeb printf(": domain %d port %u\n", es.u.interdomain.dom, 84889acfff7Smikeb es.u.interdomain.port); 84989acfff7Smikeb else if (es.status == EVTCHNSTAT_unbound) 85089acfff7Smikeb printf(": domain %d\n", es.u.unbound.dom); 85189acfff7Smikeb else if (es.status == EVTCHNSTAT_pirq) 85289acfff7Smikeb printf(": pirq %u\n", es.u.pirq); 85389acfff7Smikeb else if (es.status == EVTCHNSTAT_virq) 85489acfff7Smikeb printf(": virq %u\n", es.u.virq); 85589acfff7Smikeb else 85689acfff7Smikeb printf("\n"); 85789acfff7Smikeb #endif 85889acfff7Smikeb 85989acfff7Smikeb return (0); 86089acfff7Smikeb } 86189acfff7Smikeb 86289acfff7Smikeb int 86389acfff7Smikeb xen_intr_disestablish(xen_intr_handle_t xih) 86489acfff7Smikeb { 86589acfff7Smikeb struct xen_softc *sc = xen_sc; 86689acfff7Smikeb evtchn_port_t port = (evtchn_port_t)xih; 86789acfff7Smikeb struct evtchn_close ec; 86889acfff7Smikeb struct xen_intsrc *xi; 86989acfff7Smikeb 8701d84a0c2Smikeb if ((xi = xen_intsrc_remove(sc, port)) == NULL) 87189acfff7Smikeb return (-1); 87289acfff7Smikeb 87389acfff7Smikeb evcount_detach(&xi->xi_evcnt); 87489acfff7Smikeb 875eb509d47Smikeb taskq_destroy(xi->xi_taskq); 876eb509d47Smikeb 877505e9bd0Smikeb set_bit(xi->xi_port, &sc->sc_ipg->evtchn_mask[0]); 878505e9bd0Smikeb clear_bit(xi->xi_port, &sc->sc_ipg->evtchn_pending[0]); 87989acfff7Smikeb 88089acfff7Smikeb if (!xi->xi_noclose) { 88189acfff7Smikeb ec.port = xi->xi_port; 8824a7dd08fSmikeb if (xen_evtchn_hypercall(sc, EVTCHNOP_close, &ec, sizeof(ec))) { 88389acfff7Smikeb DPRINTF("%s: failed to close event port %u\n", 88489acfff7Smikeb sc->sc_dev.dv_xname, xi->xi_port); 88589acfff7Smikeb } 88689acfff7Smikeb } 88789acfff7Smikeb 88889acfff7Smikeb free(xi, M_DEVBUF, sizeof(*xi)); 88989acfff7Smikeb return (0); 89089acfff7Smikeb } 89189acfff7Smikeb 89289acfff7Smikeb void 893e33c817bSmikeb xen_intr_dispatch(void *arg) 894e33c817bSmikeb { 895e33c817bSmikeb struct xen_softc *sc = xen_sc; 896e33c817bSmikeb struct xen_intsrc *xi = arg; 897e33c817bSmikeb 898e33c817bSmikeb if (xi->xi_handler) 899e33c817bSmikeb xi->xi_handler(xi->xi_ctx); 900e33c817bSmikeb 901e33c817bSmikeb xen_intr_unmask_release(sc, xi); 902e33c817bSmikeb } 903e33c817bSmikeb 904e33c817bSmikeb void 90589acfff7Smikeb xen_intr_enable(void) 90689acfff7Smikeb { 90789acfff7Smikeb struct xen_softc *sc = xen_sc; 90889acfff7Smikeb struct xen_intsrc *xi; 90989acfff7Smikeb struct evtchn_unmask eu; 91089acfff7Smikeb 9111d84a0c2Smikeb mtx_enter(&sc->sc_islck); 91289acfff7Smikeb SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry) { 91389acfff7Smikeb if (!xi->xi_masked) { 91489acfff7Smikeb eu.port = xi->xi_port; 9154a7dd08fSmikeb if (xen_evtchn_hypercall(sc, EVTCHNOP_unmask, &eu, 9164a7dd08fSmikeb sizeof(eu))) 91789acfff7Smikeb printf("%s: unmasking port %u failed\n", 91889acfff7Smikeb sc->sc_dev.dv_xname, xi->xi_port); 9193248cb44Smikeb virtio_membar_sync(); 920505e9bd0Smikeb if (test_bit(xi->xi_port, &sc->sc_ipg->evtchn_mask[0])) 9214a7dd08fSmikeb printf("%s: port %u is still masked\n", 9224a7dd08fSmikeb sc->sc_dev.dv_xname, xi->xi_port); 92389acfff7Smikeb } 92489acfff7Smikeb } 9251d84a0c2Smikeb mtx_leave(&sc->sc_islck); 926a72ee3f4Smikeb } 9273d9f0c11Smikeb 928925a4773Smikeb void 929925a4773Smikeb xen_intr_mask(xen_intr_handle_t xih) 930925a4773Smikeb { 931925a4773Smikeb struct xen_softc *sc = xen_sc; 932925a4773Smikeb evtchn_port_t port = (evtchn_port_t)xih; 933925a4773Smikeb struct xen_intsrc *xi; 934925a4773Smikeb 9351d84a0c2Smikeb if ((xi = xen_intsrc_acquire(sc, port)) != NULL) { 936e33c817bSmikeb xen_intr_mask_acquired(sc, xi); 9371d84a0c2Smikeb xen_intsrc_release(sc, xi); 938925a4773Smikeb } 939925a4773Smikeb } 940925a4773Smikeb 941925a4773Smikeb int 942925a4773Smikeb xen_intr_unmask(xen_intr_handle_t xih) 943925a4773Smikeb { 944925a4773Smikeb struct xen_softc *sc = xen_sc; 945925a4773Smikeb evtchn_port_t port = (evtchn_port_t)xih; 946925a4773Smikeb struct xen_intsrc *xi; 947925a4773Smikeb 948e33c817bSmikeb if ((xi = xen_intsrc_acquire(sc, port)) != NULL) 949e33c817bSmikeb return (xen_intr_unmask_release(sc, xi)); 950e33c817bSmikeb 951925a4773Smikeb return (0); 952925a4773Smikeb } 953925a4773Smikeb 9545bcc4d11Smikeb int 9555bcc4d11Smikeb xen_init_grant_tables(struct xen_softc *sc) 9565bcc4d11Smikeb { 9575bcc4d11Smikeb struct gnttab_query_size gqs; 9585bcc4d11Smikeb 9595bcc4d11Smikeb gqs.dom = DOMID_SELF; 9605bcc4d11Smikeb if (xen_hypercall(sc, XC_GNTTAB, 3, GNTTABOP_query_size, &gqs, 1)) { 961161168f5Smikeb printf(": failed the query for grant table pages\n"); 9625bcc4d11Smikeb return (-1); 9635bcc4d11Smikeb } 9645bcc4d11Smikeb if (gqs.nr_frames == 0 || gqs.nr_frames > gqs.max_nr_frames) { 965161168f5Smikeb printf(": invalid number of grant table pages: %u/%u\n", 966161168f5Smikeb gqs.nr_frames, gqs.max_nr_frames); 9675bcc4d11Smikeb return (-1); 9685bcc4d11Smikeb } 9695bcc4d11Smikeb 97086c4b131Smikeb sc->sc_gntmax = gqs.max_nr_frames; 9715bcc4d11Smikeb 97286c4b131Smikeb sc->sc_gnt = mallocarray(sc->sc_gntmax + 1, sizeof(struct xen_gntent), 97386c4b131Smikeb M_DEVBUF, M_ZERO | M_NOWAIT); 97486c4b131Smikeb if (sc->sc_gnt == NULL) { 97586c4b131Smikeb printf(": failed to allocate grant table lookup table\n"); 97686c4b131Smikeb return (-1); 97786c4b131Smikeb } 9785bcc4d11Smikeb 9791d84a0c2Smikeb mtx_init(&sc->sc_gntlck, IPL_NET); 98086c4b131Smikeb 98186c4b131Smikeb if (xen_grant_table_grow(sc) == NULL) { 98286c4b131Smikeb free(sc->sc_gnt, M_DEVBUF, sc->sc_gntmax * 98386c4b131Smikeb sizeof(struct xen_gntent)); 98486c4b131Smikeb return (-1); 98586c4b131Smikeb } 98686c4b131Smikeb 98787eabc7cSmikeb printf(", %d grant table frames", sc->sc_gntmax); 9885bcc4d11Smikeb 989a2575381Smikeb xen_bus_dma_tag._cookie = sc; 9905bcc4d11Smikeb 9915bcc4d11Smikeb return (0); 9925bcc4d11Smikeb } 9935bcc4d11Smikeb 9945bcc4d11Smikeb struct xen_gntent * 9955bcc4d11Smikeb xen_grant_table_grow(struct xen_softc *sc) 9965bcc4d11Smikeb { 9975bcc4d11Smikeb struct xen_add_to_physmap xatp; 9985bcc4d11Smikeb struct xen_gntent *ge; 999c929981aSmikeb void *va; 10005bcc4d11Smikeb paddr_t pa; 10015bcc4d11Smikeb 100286c4b131Smikeb if (sc->sc_gntcnt == sc->sc_gntmax) { 100386c4b131Smikeb printf("%s: grant table frame allotment limit reached\n", 10045bcc4d11Smikeb sc->sc_dev.dv_xname); 10055bcc4d11Smikeb return (NULL); 10065bcc4d11Smikeb } 100786c4b131Smikeb 1008c929981aSmikeb va = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait); 1009c929981aSmikeb if (va == NULL) 1010c929981aSmikeb return (NULL); 1011c929981aSmikeb if (!pmap_extract(pmap_kernel(), (vaddr_t)va, &pa)) { 1012c929981aSmikeb printf("%s: grant table page PA extraction failed\n", 1013c929981aSmikeb sc->sc_dev.dv_xname); 1014c929981aSmikeb km_free(va, PAGE_SIZE, &kv_any, &kp_zero); 1015c929981aSmikeb return (NULL); 1016c929981aSmikeb } 1017c929981aSmikeb 10181d84a0c2Smikeb mtx_enter(&sc->sc_gntlck); 101986c4b131Smikeb 102086c4b131Smikeb ge = &sc->sc_gnt[sc->sc_gntcnt]; 1021c929981aSmikeb ge->ge_table = va; 1022c929981aSmikeb 10235bcc4d11Smikeb xatp.domid = DOMID_SELF; 10245bcc4d11Smikeb xatp.idx = sc->sc_gntcnt; 10255bcc4d11Smikeb xatp.space = XENMAPSPACE_grant_table; 10265bcc4d11Smikeb xatp.gpfn = atop(pa); 10275bcc4d11Smikeb if (xen_hypercall(sc, XC_MEMORY, 2, XENMEM_add_to_physmap, &xatp)) { 10285bcc4d11Smikeb printf("%s: failed to add a grant table page\n", 10295bcc4d11Smikeb sc->sc_dev.dv_xname); 10305bcc4d11Smikeb km_free(ge->ge_table, PAGE_SIZE, &kv_any, &kp_zero); 10311d84a0c2Smikeb mtx_leave(&sc->sc_gntlck); 10325bcc4d11Smikeb return (NULL); 10335bcc4d11Smikeb } 10345bcc4d11Smikeb ge->ge_start = sc->sc_gntcnt * GNTTAB_NEPG; 10355bcc4d11Smikeb /* First page has 8 reserved entries */ 10365bcc4d11Smikeb ge->ge_reserved = ge->ge_start == 0 ? GNTTAB_NR_RESERVED_ENTRIES : 0; 10375bcc4d11Smikeb ge->ge_free = GNTTAB_NEPG - ge->ge_reserved; 10386720c1a5Smikeb ge->ge_next = ge->ge_reserved; 10391d84a0c2Smikeb mtx_init(&ge->ge_lock, IPL_NET); 10405bcc4d11Smikeb 10415bcc4d11Smikeb sc->sc_gntcnt++; 10421d84a0c2Smikeb mtx_leave(&sc->sc_gntlck); 10435bcc4d11Smikeb 10445bcc4d11Smikeb return (ge); 10455bcc4d11Smikeb } 10465bcc4d11Smikeb 10475bcc4d11Smikeb int 10485bcc4d11Smikeb xen_grant_table_alloc(struct xen_softc *sc, grant_ref_t *ref) 10495bcc4d11Smikeb { 10505bcc4d11Smikeb struct xen_gntent *ge; 10515bcc4d11Smikeb int i; 10525bcc4d11Smikeb 105386c4b131Smikeb /* Start with a previously allocated table page */ 105486c4b131Smikeb ge = &sc->sc_gnt[sc->sc_gntcnt - 1]; 105586c4b131Smikeb if (ge->ge_free > 0) { 10561d84a0c2Smikeb mtx_enter(&ge->ge_lock); 105786c4b131Smikeb if (ge->ge_free > 0) 105886c4b131Smikeb goto search; 10591d84a0c2Smikeb mtx_leave(&ge->ge_lock); 106086c4b131Smikeb } 106186c4b131Smikeb 106286c4b131Smikeb /* Try other existing table pages */ 106386c4b131Smikeb for (i = 0; i < sc->sc_gntcnt; i++) { 106486c4b131Smikeb ge = &sc->sc_gnt[i]; 106586c4b131Smikeb if (ge->ge_free == 0) 10665bcc4d11Smikeb continue; 10671d84a0c2Smikeb mtx_enter(&ge->ge_lock); 106886c4b131Smikeb if (ge->ge_free > 0) 106986c4b131Smikeb goto search; 10701d84a0c2Smikeb mtx_leave(&ge->ge_lock); 107186c4b131Smikeb } 107286c4b131Smikeb 107386c4b131Smikeb alloc: 107486c4b131Smikeb /* Allocate a new table page */ 107586c4b131Smikeb if ((ge = xen_grant_table_grow(sc)) == NULL) 107686c4b131Smikeb return (-1); 107786c4b131Smikeb 10781d84a0c2Smikeb mtx_enter(&ge->ge_lock); 107986c4b131Smikeb if (ge->ge_free == 0) { 108086c4b131Smikeb /* We were not fast enough... */ 10811d84a0c2Smikeb mtx_leave(&ge->ge_lock); 108286c4b131Smikeb goto alloc; 108386c4b131Smikeb } 108486c4b131Smikeb 108586c4b131Smikeb search: 10865bcc4d11Smikeb for (i = ge->ge_next; 10875bcc4d11Smikeb /* Math works here because GNTTAB_NEPG is a power of 2 */ 10885bcc4d11Smikeb i != ((ge->ge_next + GNTTAB_NEPG - 1) & (GNTTAB_NEPG - 1)); 10895bcc4d11Smikeb i++) { 10905bcc4d11Smikeb if (i == GNTTAB_NEPG) 10915bcc4d11Smikeb i = 0; 10925bcc4d11Smikeb if (ge->ge_reserved && i < ge->ge_reserved) 10935bcc4d11Smikeb continue; 10945acf1e3cSmikeb if (ge->ge_table[i].frame != 0) 10955bcc4d11Smikeb continue; 10965bcc4d11Smikeb *ref = ge->ge_start + i; 10975acf1e3cSmikeb ge->ge_table[i].flags = GTF_invalid; 1098e86b66ebSmikeb ge->ge_table[i].frame = 0xffffffff; /* Mark as taken */ 10995bcc4d11Smikeb if ((ge->ge_next = i + 1) == GNTTAB_NEPG) 11006720c1a5Smikeb ge->ge_next = ge->ge_reserved; 11015bcc4d11Smikeb ge->ge_free--; 11021d84a0c2Smikeb mtx_leave(&ge->ge_lock); 11035bcc4d11Smikeb return (0); 11045bcc4d11Smikeb } 11051d84a0c2Smikeb mtx_leave(&ge->ge_lock); 11065bcc4d11Smikeb 11076720c1a5Smikeb panic("page full, sc %p gnt %p (%d) ge %p", sc, sc->sc_gnt, 110886c4b131Smikeb sc->sc_gntcnt, ge); 11095bcc4d11Smikeb return (-1); 11105bcc4d11Smikeb } 11115bcc4d11Smikeb 11125bcc4d11Smikeb void 11135bcc4d11Smikeb xen_grant_table_free(struct xen_softc *sc, grant_ref_t ref) 11145bcc4d11Smikeb { 11155bcc4d11Smikeb struct xen_gntent *ge; 11165bcc4d11Smikeb 111786c4b131Smikeb #ifdef XEN_DEBUG 111886c4b131Smikeb if (ref > sc->sc_gntcnt * GNTTAB_NEPG) 111986c4b131Smikeb panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc, 112086c4b131Smikeb sc->sc_gnt, sc->sc_gntcnt); 112186c4b131Smikeb #endif 112286c4b131Smikeb ge = &sc->sc_gnt[ref / GNTTAB_NEPG]; 11231d84a0c2Smikeb mtx_enter(&ge->ge_lock); 112486c4b131Smikeb #ifdef XEN_DEBUG 112586c4b131Smikeb if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) { 11261d84a0c2Smikeb mtx_leave(&ge->ge_lock); 11276720c1a5Smikeb panic("out of bounds ref %u ge %p start %u sc %p gnt %p", 112886c4b131Smikeb ref, ge, ge->ge_start, sc, sc->sc_gnt); 112986c4b131Smikeb } 113086c4b131Smikeb #endif 113186c4b131Smikeb ref -= ge->ge_start; 1132bc8bdf27Smikeb if (ge->ge_table[ref].flags != GTF_invalid) { 11331d84a0c2Smikeb mtx_leave(&ge->ge_lock); 11345acf1e3cSmikeb panic("reference %u is still in use, flags %#x frame %#x", 11355acf1e3cSmikeb ref + ge->ge_start, ge->ge_table[ref].flags, 11365acf1e3cSmikeb ge->ge_table[ref].frame); 11375bcc4d11Smikeb } 1138bc8bdf27Smikeb ge->ge_table[ref].frame = 0; 11395bcc4d11Smikeb ge->ge_next = ref; 11405bcc4d11Smikeb ge->ge_free++; 11411d84a0c2Smikeb mtx_leave(&ge->ge_lock); 11425bcc4d11Smikeb } 11435bcc4d11Smikeb 114486c4b131Smikeb void 11455bcc4d11Smikeb xen_grant_table_enter(struct xen_softc *sc, grant_ref_t ref, paddr_t pa, 1146f0d92e41Smikeb int domain, int flags) 11475bcc4d11Smikeb { 11485bcc4d11Smikeb struct xen_gntent *ge; 11495bcc4d11Smikeb 115086c4b131Smikeb #ifdef XEN_DEBUG 115186c4b131Smikeb if (ref > sc->sc_gntcnt * GNTTAB_NEPG) 115286c4b131Smikeb panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc, 115386c4b131Smikeb sc->sc_gnt, sc->sc_gntcnt); 115486c4b131Smikeb #endif 115586c4b131Smikeb ge = &sc->sc_gnt[ref / GNTTAB_NEPG]; 115686c4b131Smikeb #ifdef XEN_DEBUG 115786c4b131Smikeb if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) { 11586720c1a5Smikeb panic("out of bounds ref %u ge %p start %u sc %p gnt %p", 115986c4b131Smikeb ref, ge, ge->ge_start, sc, sc->sc_gnt); 116086c4b131Smikeb } 116186c4b131Smikeb #endif 11625bcc4d11Smikeb ref -= ge->ge_start; 11636a831e9aSmikeb if (ge->ge_table[ref].flags != GTF_invalid) { 11646a831e9aSmikeb panic("reference %u is still in use, flags %#x frame %#x", 11656a831e9aSmikeb ref + ge->ge_start, ge->ge_table[ref].flags, 11666a831e9aSmikeb ge->ge_table[ref].frame); 11676a831e9aSmikeb } 1168bc8bdf27Smikeb ge->ge_table[ref].frame = atop(pa); 1169f0d92e41Smikeb ge->ge_table[ref].domid = domain; 11703248cb44Smikeb virtio_membar_sync(); 1171bc8bdf27Smikeb ge->ge_table[ref].flags = GTF_permit_access | flags; 1172b09cdadeSmikeb virtio_membar_sync(); 11735bcc4d11Smikeb } 11745bcc4d11Smikeb 11755bcc4d11Smikeb void 11765bcc4d11Smikeb xen_grant_table_remove(struct xen_softc *sc, grant_ref_t ref) 11775bcc4d11Smikeb { 11785bcc4d11Smikeb struct xen_gntent *ge; 11795bcc4d11Smikeb uint32_t flags, *ptr; 11804f155cedSmikeb int loop; 11815bcc4d11Smikeb 118286c4b131Smikeb #ifdef XEN_DEBUG 118386c4b131Smikeb if (ref > sc->sc_gntcnt * GNTTAB_NEPG) 11846720c1a5Smikeb panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc, 118586c4b131Smikeb sc->sc_gnt, sc->sc_gntcnt); 118686c4b131Smikeb #endif 118786c4b131Smikeb ge = &sc->sc_gnt[ref / GNTTAB_NEPG]; 118886c4b131Smikeb #ifdef XEN_DEBUG 118986c4b131Smikeb if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) { 11906720c1a5Smikeb panic("out of bounds ref %u ge %p start %u sc %p gnt %p", 119186c4b131Smikeb ref, ge, ge->ge_start, sc, sc->sc_gnt); 119286c4b131Smikeb } 119386c4b131Smikeb #endif 11945bcc4d11Smikeb ref -= ge->ge_start; 11955bcc4d11Smikeb /* Invalidate the grant reference */ 1196b09cdadeSmikeb virtio_membar_sync(); 1197bc8bdf27Smikeb ptr = (uint32_t *)&ge->ge_table[ref]; 1198a80d814fSmikeb flags = (ge->ge_table[ref].flags & ~(GTF_reading|GTF_writing)) | 1199a80d814fSmikeb (ge->ge_table[ref].domid << 16); 12004f155cedSmikeb loop = 0; 12014f155cedSmikeb while (atomic_cas_uint(ptr, flags, GTF_invalid) != flags) { 1202e885e14eSmikeb if (loop++ > 10) { 1203b5e9546aSmikeb panic("grant table reference %u is held " 1204b5e9546aSmikeb "by domain %d: frame %#x flags %#x", 1205b5e9546aSmikeb ref + ge->ge_start, ge->ge_table[ref].domid, 1206b5e9546aSmikeb ge->ge_table[ref].frame, ge->ge_table[ref].flags); 12074f155cedSmikeb } 1208a4d4bfc4Smikeb #if (defined(__amd64__) || defined(__i386__)) 1209a4d4bfc4Smikeb __asm volatile("pause": : : "memory"); 1210a4d4bfc4Smikeb #endif 12114f155cedSmikeb } 1212bc8bdf27Smikeb ge->ge_table[ref].frame = 0xffffffff; 12135bcc4d11Smikeb } 12145bcc4d11Smikeb 12155bcc4d11Smikeb int 12165bcc4d11Smikeb xen_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 12175bcc4d11Smikeb bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 12185bcc4d11Smikeb { 12195bcc4d11Smikeb struct xen_softc *sc = t->_cookie; 12205bcc4d11Smikeb struct xen_gntmap *gm; 12215bcc4d11Smikeb int i, error; 12225bcc4d11Smikeb 12235bcc4d11Smikeb if (maxsegsz < PAGE_SIZE) 12245bcc4d11Smikeb return (EINVAL); 12255bcc4d11Smikeb 12265bcc4d11Smikeb /* Allocate a dma map structure */ 1227e0053cc6Smikeb error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz, 1228e0053cc6Smikeb boundary, flags, dmamp); 12295bcc4d11Smikeb if (error) 12305bcc4d11Smikeb return (error); 12319e8bc638Smikeb /* Allocate an array of grant table pa<->ref maps */ 12325bcc4d11Smikeb gm = mallocarray(nsegments, sizeof(struct xen_gntmap), M_DEVBUF, 12335bcc4d11Smikeb M_ZERO | ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)); 12345bcc4d11Smikeb if (gm == NULL) { 1235e0053cc6Smikeb bus_dmamap_destroy(sc->sc_dmat, *dmamp); 12365bcc4d11Smikeb *dmamp = NULL; 12375bcc4d11Smikeb return (ENOMEM); 12385bcc4d11Smikeb } 12395bcc4d11Smikeb /* Wire it to the dma map */ 12405bcc4d11Smikeb (*dmamp)->_dm_cookie = gm; 12415bcc4d11Smikeb /* Claim references from the grant table */ 12425bcc4d11Smikeb for (i = 0; i < (*dmamp)->_dm_segcnt; i++) { 12435bcc4d11Smikeb if (xen_grant_table_alloc(sc, &gm[i].gm_ref)) { 12445bcc4d11Smikeb xen_bus_dmamap_destroy(t, *dmamp); 12455bcc4d11Smikeb *dmamp = NULL; 12465bcc4d11Smikeb return (ENOBUFS); 12475bcc4d11Smikeb } 12485bcc4d11Smikeb } 12495bcc4d11Smikeb return (0); 12505bcc4d11Smikeb } 12515bcc4d11Smikeb 12525bcc4d11Smikeb void 12535bcc4d11Smikeb xen_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 12545bcc4d11Smikeb { 12555bcc4d11Smikeb struct xen_softc *sc = t->_cookie; 12565bcc4d11Smikeb struct xen_gntmap *gm; 12575bcc4d11Smikeb int i; 12585bcc4d11Smikeb 12595bcc4d11Smikeb gm = map->_dm_cookie; 12605bcc4d11Smikeb for (i = 0; i < map->_dm_segcnt; i++) { 12615bcc4d11Smikeb if (gm[i].gm_ref == 0) 12625bcc4d11Smikeb continue; 12635bcc4d11Smikeb xen_grant_table_free(sc, gm[i].gm_ref); 12645bcc4d11Smikeb } 12655bcc4d11Smikeb free(gm, M_DEVBUF, map->_dm_segcnt * sizeof(struct xen_gntmap)); 1266e0053cc6Smikeb bus_dmamap_destroy(sc->sc_dmat, map); 12675bcc4d11Smikeb } 12685bcc4d11Smikeb 12695bcc4d11Smikeb int 12705bcc4d11Smikeb xen_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 12715bcc4d11Smikeb bus_size_t buflen, struct proc *p, int flags) 12725bcc4d11Smikeb { 12735bcc4d11Smikeb struct xen_softc *sc = t->_cookie; 12745bcc4d11Smikeb struct xen_gntmap *gm = map->_dm_cookie; 1275f0d92e41Smikeb int i, domain, error; 12765bcc4d11Smikeb 1277f0d92e41Smikeb domain = flags >> 16; 1278f0d92e41Smikeb flags &= 0xffff; 1279e0053cc6Smikeb error = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags); 12805bcc4d11Smikeb if (error) 12815bcc4d11Smikeb return (error); 12825bcc4d11Smikeb for (i = 0; i < map->dm_nsegs; i++) { 128386c4b131Smikeb xen_grant_table_enter(sc, gm[i].gm_ref, map->dm_segs[i].ds_addr, 1284f0d92e41Smikeb domain, flags & BUS_DMA_WRITE ? GTF_readonly : 0); 12855bcc4d11Smikeb gm[i].gm_paddr = map->dm_segs[i].ds_addr; 12865bcc4d11Smikeb map->dm_segs[i].ds_addr = gm[i].gm_ref; 12875bcc4d11Smikeb } 12885bcc4d11Smikeb return (0); 12895bcc4d11Smikeb } 12905bcc4d11Smikeb 12915bcc4d11Smikeb int 12925bcc4d11Smikeb xen_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 12935bcc4d11Smikeb int flags) 12945bcc4d11Smikeb { 12955bcc4d11Smikeb struct xen_softc *sc = t->_cookie; 12965bcc4d11Smikeb struct xen_gntmap *gm = map->_dm_cookie; 1297f0d92e41Smikeb int i, domain, error; 12985bcc4d11Smikeb 1299f0d92e41Smikeb domain = flags >> 16; 1300f0d92e41Smikeb flags &= 0xffff; 1301e0053cc6Smikeb error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, flags); 13025bcc4d11Smikeb if (error) 13035bcc4d11Smikeb return (error); 13045bcc4d11Smikeb for (i = 0; i < map->dm_nsegs; i++) { 130586c4b131Smikeb xen_grant_table_enter(sc, gm[i].gm_ref, map->dm_segs[i].ds_addr, 1306f0d92e41Smikeb domain, flags & BUS_DMA_WRITE ? GTF_readonly : 0); 13075bcc4d11Smikeb gm[i].gm_paddr = map->dm_segs[i].ds_addr; 13085bcc4d11Smikeb map->dm_segs[i].ds_addr = gm[i].gm_ref; 13095bcc4d11Smikeb } 13105bcc4d11Smikeb return (0); 13115bcc4d11Smikeb } 13125bcc4d11Smikeb 13135bcc4d11Smikeb void 13145bcc4d11Smikeb xen_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 13155bcc4d11Smikeb { 13165bcc4d11Smikeb struct xen_softc *sc = t->_cookie; 13175bcc4d11Smikeb struct xen_gntmap *gm = map->_dm_cookie; 13185bcc4d11Smikeb int i; 13195bcc4d11Smikeb 13205bcc4d11Smikeb for (i = 0; i < map->dm_nsegs; i++) { 13215bcc4d11Smikeb if (gm[i].gm_paddr == 0) 13225bcc4d11Smikeb continue; 13235bcc4d11Smikeb xen_grant_table_remove(sc, gm[i].gm_ref); 13245bcc4d11Smikeb map->dm_segs[i].ds_addr = gm[i].gm_paddr; 13255bcc4d11Smikeb gm[i].gm_paddr = 0; 13265bcc4d11Smikeb } 1327e0053cc6Smikeb bus_dmamap_unload(sc->sc_dmat, map); 13285bcc4d11Smikeb } 13295bcc4d11Smikeb 13300cd4087eSmikeb void 13310cd4087eSmikeb xen_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, 13320cd4087eSmikeb bus_size_t size, int op) 13330cd4087eSmikeb { 13340cd4087eSmikeb if ((op == (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) || 13350cd4087eSmikeb (op == (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE))) 13360cd4087eSmikeb virtio_membar_sync(); 13370cd4087eSmikeb } 13380cd4087eSmikeb 13396ebc0397Smikeb static int 13406ebc0397Smikeb xen_attach_print(void *aux, const char *name) 13416ebc0397Smikeb { 13426ebc0397Smikeb struct xen_attach_args *xa = aux; 13436ebc0397Smikeb 13446ebc0397Smikeb if (name) 13456ebc0397Smikeb printf("\"%s\" at %s: %s", xa->xa_name, name, xa->xa_node); 13466ebc0397Smikeb 13476ebc0397Smikeb return (UNCONF); 13486ebc0397Smikeb } 13496ebc0397Smikeb 13506ebc0397Smikeb int 13512e3b9c72Smikeb xen_attach_device(struct xen_softc *sc, struct xen_devlist *xdl, 13522e3b9c72Smikeb const char *name, const char *unit) 13536ebc0397Smikeb { 13546ebc0397Smikeb struct xen_attach_args xa; 13552e3b9c72Smikeb struct xen_device *xdv; 13562e3b9c72Smikeb unsigned long long res; 13572e3b9c72Smikeb 13583aca4c8cSjsg memset(&xa, 0, sizeof(xa)); 13592e3b9c72Smikeb xa.xa_dmat = &xen_bus_dma_tag; 13602e3b9c72Smikeb 13612e3b9c72Smikeb strlcpy(xa.xa_name, name, sizeof(xa.xa_name)); 13622e3b9c72Smikeb snprintf(xa.xa_node, sizeof(xa.xa_node), "device/%s/%s", name, unit); 13632e3b9c72Smikeb 13642e3b9c72Smikeb if (xs_getprop(sc, xa.xa_node, "backend", xa.xa_backend, 13652e3b9c72Smikeb sizeof(xa.xa_backend))) { 13662e3b9c72Smikeb DPRINTF("%s: failed to identify \"backend\" for " 13672e3b9c72Smikeb "\"%s\"\n", sc->sc_dev.dv_xname, xa.xa_node); 13682e3b9c72Smikeb } 13692e3b9c72Smikeb 13702e3b9c72Smikeb if (xs_getnum(sc, xa.xa_node, "backend-id", &res) || res > UINT16_MAX) { 13712e3b9c72Smikeb DPRINTF("%s: invalid \"backend-id\" for \"%s\"\n", 13722e3b9c72Smikeb sc->sc_dev.dv_xname, xa.xa_node); 13732e3b9c72Smikeb } 1374*b5f9b883Sjsg if (res <= UINT16_MAX) 13752e3b9c72Smikeb xa.xa_domid = (uint16_t)res; 13762e3b9c72Smikeb 13772e3b9c72Smikeb xdv = malloc(sizeof(struct xen_device), M_DEVBUF, M_ZERO | M_NOWAIT); 13782e3b9c72Smikeb if (xdv == NULL) 13792e3b9c72Smikeb return (ENOMEM); 13802e3b9c72Smikeb 13812e3b9c72Smikeb strlcpy(xdv->dv_unit, unit, sizeof(xdv->dv_unit)); 13822e3b9c72Smikeb LIST_INSERT_HEAD(&xdl->dl_devs, xdv, dv_entry); 13832e3b9c72Smikeb 13842e3b9c72Smikeb xdv->dv_dev = config_found((struct device *)sc, &xa, xen_attach_print); 13852e3b9c72Smikeb 13862e3b9c72Smikeb return (0); 13872e3b9c72Smikeb } 13882e3b9c72Smikeb 13892e3b9c72Smikeb int 13902e3b9c72Smikeb xen_probe_devices(struct xen_softc *sc) 13912e3b9c72Smikeb { 13922e3b9c72Smikeb struct xen_devlist *xdl; 13936ebc0397Smikeb struct xs_transaction xst; 139410fc2b8eSmikeb struct iovec *iovp1 = NULL, *iovp2 = NULL; 1395adf3eb71Smikeb int i, j, error, iov1_cnt = 0, iov2_cnt = 0; 13965baa72a1Smikeb char path[256]; 13976ebc0397Smikeb 13986ebc0397Smikeb memset(&xst, 0, sizeof(xst)); 13996ebc0397Smikeb xst.xst_id = 0; 14009673213eSmikeb xst.xst_cookie = sc->sc_xs; 14016ebc0397Smikeb 1402f52dd5beSmikeb if ((error = xs_cmd(&xst, XS_LIST, "device", &iovp1, &iov1_cnt)) != 0) 14036ebc0397Smikeb return (error); 14046ebc0397Smikeb 14056ebc0397Smikeb for (i = 0; i < iov1_cnt; i++) { 1406347556beSmikeb if (strcmp("suspend", (char *)iovp1[i].iov_base) == 0) 14076ebc0397Smikeb continue; 14086ebc0397Smikeb snprintf(path, sizeof(path), "device/%s", 14096ebc0397Smikeb (char *)iovp1[i].iov_base); 1410ffaaaab6Smikeb if ((error = xs_cmd(&xst, XS_LIST, path, &iovp2, 14112e3b9c72Smikeb &iov2_cnt)) != 0) 14122e3b9c72Smikeb goto out; 14132e3b9c72Smikeb if ((xdl = malloc(sizeof(struct xen_devlist), M_DEVBUF, 14142e3b9c72Smikeb M_ZERO | M_NOWAIT)) == NULL) { 14152e3b9c72Smikeb error = ENOMEM; 14162e3b9c72Smikeb goto out; 14172e3b9c72Smikeb } 14182e3b9c72Smikeb xdl->dl_xen = sc; 14192e3b9c72Smikeb strlcpy(xdl->dl_node, (const char *)iovp1[i].iov_base, 14202e3b9c72Smikeb XEN_MAX_NODE_LEN); 14212e3b9c72Smikeb for (j = 0; j < iov2_cnt; j++) { 14222e3b9c72Smikeb error = xen_attach_device(sc, xdl, 14232e3b9c72Smikeb (const char *)iovp1[i].iov_base, 14242e3b9c72Smikeb (const char *)iovp2[j].iov_base); 14252e3b9c72Smikeb if (error) { 14262e3b9c72Smikeb printf("%s: failed to attach \"%s/%s\"\n", 14272e3b9c72Smikeb sc->sc_dev.dv_xname, path, 14282e3b9c72Smikeb (const char *)iovp2[j].iov_base); 1429*b5f9b883Sjsg continue; 14302e3b9c72Smikeb } 14312e3b9c72Smikeb } 14322e3b9c72Smikeb /* Setup a watch for every device subtree */ 14332e3b9c72Smikeb if (xs_watch(sc, "device", (char *)iovp1[i].iov_base, 14342e3b9c72Smikeb &xdl->dl_task, xen_hotplug, xdl)) 14352e3b9c72Smikeb printf("%s: failed to setup hotplug watch for \"%s\"\n", 14362e3b9c72Smikeb sc->sc_dev.dv_xname, (char *)iovp1[i].iov_base); 14372e3b9c72Smikeb SLIST_INSERT_HEAD(&sc->sc_devlists, xdl, dl_entry); 14382e3b9c72Smikeb xs_resfree(&xst, iovp2, iov2_cnt); 14392e3b9c72Smikeb iovp2 = NULL; 14402e3b9c72Smikeb iov2_cnt = 0; 14412e3b9c72Smikeb } 14422e3b9c72Smikeb 14432e3b9c72Smikeb out: 14442e3b9c72Smikeb if (iovp2) 14452e3b9c72Smikeb xs_resfree(&xst, iovp2, iov2_cnt); 14466ebc0397Smikeb xs_resfree(&xst, iovp1, iov1_cnt); 14476ebc0397Smikeb return (error); 14486ebc0397Smikeb } 14492e3b9c72Smikeb 14502e3b9c72Smikeb void 14512e3b9c72Smikeb xen_hotplug(void *arg) 14522e3b9c72Smikeb { 14532e3b9c72Smikeb struct xen_devlist *xdl = arg; 14542e3b9c72Smikeb struct xen_softc *sc = xdl->dl_xen; 14552e3b9c72Smikeb struct xen_device *xdv, *xvdn; 14562e3b9c72Smikeb struct xs_transaction xst; 14572e3b9c72Smikeb struct iovec *iovp = NULL; 14582e3b9c72Smikeb int error, i, keep, iov_cnt = 0; 14592e3b9c72Smikeb char path[256]; 14602e3b9c72Smikeb int8_t *seen; 14612e3b9c72Smikeb 14622e3b9c72Smikeb memset(&xst, 0, sizeof(xst)); 14632e3b9c72Smikeb xst.xst_id = 0; 14642e3b9c72Smikeb xst.xst_cookie = sc->sc_xs; 14652e3b9c72Smikeb 14662e3b9c72Smikeb snprintf(path, sizeof(path), "device/%s", xdl->dl_node); 14672e3b9c72Smikeb if ((error = xs_cmd(&xst, XS_LIST, path, &iovp, &iov_cnt)) != 0) 14682e3b9c72Smikeb return; 14692e3b9c72Smikeb 14702e3b9c72Smikeb seen = malloc(iov_cnt, M_TEMP, M_ZERO | M_WAITOK); 14712e3b9c72Smikeb 14722e3b9c72Smikeb /* Detect all removed and kept devices */ 14732e3b9c72Smikeb LIST_FOREACH_SAFE(xdv, &xdl->dl_devs, dv_entry, xvdn) { 14742e3b9c72Smikeb for (i = 0, keep = 0; i < iov_cnt; i++) { 1475319ac344Smikeb if (!seen[i] && 1476319ac344Smikeb !strcmp(xdv->dv_unit, (char *)iovp[i].iov_base)) { 14772e3b9c72Smikeb seen[i]++; 14782e3b9c72Smikeb keep++; 14792e3b9c72Smikeb break; 1480eadb28b2Smikeb } 1481adf3eb71Smikeb } 14822e3b9c72Smikeb if (!keep) { 14832e3b9c72Smikeb DPRINTF("%s: removing \"%s/%s\"\n", sc->sc_dev.dv_xname, 14842e3b9c72Smikeb xdl->dl_node, xdv->dv_unit); 14852e3b9c72Smikeb LIST_REMOVE(xdv, dv_entry); 14862e3b9c72Smikeb config_detach(xdv->dv_dev, 0); 14872e3b9c72Smikeb free(xdv, M_DEVBUF, sizeof(struct xen_device)); 14886ebc0397Smikeb } 14896ebc0397Smikeb } 14906ebc0397Smikeb 14912e3b9c72Smikeb /* Attach all new devices */ 14922e3b9c72Smikeb for (i = 0; i < iov_cnt; i++) { 14932e3b9c72Smikeb if (seen[i]) 14942e3b9c72Smikeb continue; 14952e3b9c72Smikeb DPRINTF("%s: attaching \"%s/%s\"\n", sc->sc_dev.dv_xname, 14962e3b9c72Smikeb xdl->dl_node, (const char *)iovp[i].iov_base); 14972e3b9c72Smikeb error = xen_attach_device(sc, xdl, xdl->dl_node, 14982e3b9c72Smikeb (const char *)iovp[i].iov_base); 14992e3b9c72Smikeb if (error) { 15002e3b9c72Smikeb printf("%s: failed to attach \"%s/%s\"\n", 15012e3b9c72Smikeb sc->sc_dev.dv_xname, path, 15022e3b9c72Smikeb (const char *)iovp[i].iov_base); 15032e3b9c72Smikeb continue; 15042e3b9c72Smikeb } 15052e3b9c72Smikeb } 15062e3b9c72Smikeb 15072e3b9c72Smikeb free(seen, M_TEMP, iov_cnt); 15082e3b9c72Smikeb 15092e3b9c72Smikeb xs_resfree(&xst, iovp, iov_cnt); 15106ebc0397Smikeb } 15116ebc0397Smikeb 15123d9f0c11Smikeb #include <machine/pio.h> 15133d9f0c11Smikeb 15143d9f0c11Smikeb #define XMI_PORT 0x10 15153d9f0c11Smikeb #define XMI_MAGIC 0x49d2 15163d9f0c11Smikeb #define XMI_UNPLUG_IDE 0x01 15173d9f0c11Smikeb #define XMI_UNPLUG_NIC 0x02 15183d9f0c11Smikeb #define XMI_UNPLUG_IDESEC 0x04 15193d9f0c11Smikeb 15203d9f0c11Smikeb void 15213d9f0c11Smikeb xen_disable_emulated_devices(struct xen_softc *sc) 15223d9f0c11Smikeb { 15233d9f0c11Smikeb #if defined(__i386__) || defined(__amd64__) 15243d9f0c11Smikeb ushort unplug = 0; 15253d9f0c11Smikeb 15263d9f0c11Smikeb if (inw(XMI_PORT) != XMI_MAGIC) { 15274ac18c3eSmikeb printf("%s: failed to disable emulated devices\n", 15284ac18c3eSmikeb sc->sc_dev.dv_xname); 15293d9f0c11Smikeb return; 15303d9f0c11Smikeb } 1531e86eae23Smikeb if (sc->sc_unplug & XEN_UNPLUG_IDE) 15323d9f0c11Smikeb unplug |= XMI_UNPLUG_IDE; 1533e86eae23Smikeb if (sc->sc_unplug & XEN_UNPLUG_IDESEC) 15343d9f0c11Smikeb unplug |= XMI_UNPLUG_IDESEC; 1535e86eae23Smikeb if (sc->sc_unplug & XEN_UNPLUG_NIC) 15363d9f0c11Smikeb unplug |= XMI_UNPLUG_NIC; 15374ac18c3eSmikeb if (unplug) 15383d9f0c11Smikeb outw(XMI_PORT, unplug); 15393d9f0c11Smikeb #endif /* __i386__ || __amd64__ */ 15403d9f0c11Smikeb } 1541e86eae23Smikeb 1542e86eae23Smikeb void 1543e86eae23Smikeb xen_unplug_emulated(void *xsc, int what) 1544e86eae23Smikeb { 1545e86eae23Smikeb struct xen_softc *sc = xsc; 1546e86eae23Smikeb 1547e86eae23Smikeb sc->sc_unplug |= what; 1548e86eae23Smikeb } 1549