1*0f5116d7SKonstantin Belousov /*- 2*0f5116d7SKonstantin Belousov * SPDX-License-Identifier: BSD-2-Clause 3*0f5116d7SKonstantin Belousov * 4*0f5116d7SKonstantin Belousov * Copyright (c) 2024 The FreeBSD Foundation 5*0f5116d7SKonstantin Belousov * 6*0f5116d7SKonstantin Belousov * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7*0f5116d7SKonstantin Belousov * under sponsorship from the FreeBSD Foundation. 8*0f5116d7SKonstantin Belousov * 9*0f5116d7SKonstantin Belousov * Redistribution and use in source and binary forms, with or without 10*0f5116d7SKonstantin Belousov * modification, are permitted provided that the following conditions 11*0f5116d7SKonstantin Belousov * are met: 12*0f5116d7SKonstantin Belousov * 1. Redistributions of source code must retain the above copyright 13*0f5116d7SKonstantin Belousov * notice, this list of conditions and the following disclaimer. 14*0f5116d7SKonstantin Belousov * 2. Redistributions in binary form must reproduce the above copyright 15*0f5116d7SKonstantin Belousov * notice, this list of conditions and the following disclaimer in the 16*0f5116d7SKonstantin Belousov * documentation and/or other materials provided with the distribution. 17*0f5116d7SKonstantin Belousov * 18*0f5116d7SKonstantin Belousov * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19*0f5116d7SKonstantin Belousov * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20*0f5116d7SKonstantin Belousov * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21*0f5116d7SKonstantin Belousov * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22*0f5116d7SKonstantin Belousov * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23*0f5116d7SKonstantin Belousov * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24*0f5116d7SKonstantin Belousov * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25*0f5116d7SKonstantin Belousov * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26*0f5116d7SKonstantin Belousov * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27*0f5116d7SKonstantin Belousov * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28*0f5116d7SKonstantin Belousov * SUCH DAMAGE. 29*0f5116d7SKonstantin Belousov */ 30*0f5116d7SKonstantin Belousov 31*0f5116d7SKonstantin Belousov #include <sys/param.h> 32*0f5116d7SKonstantin Belousov #include <sys/systm.h> 33*0f5116d7SKonstantin Belousov #include <sys/malloc.h> 34*0f5116d7SKonstantin Belousov #include <sys/bus.h> 35*0f5116d7SKonstantin Belousov #include <sys/interrupt.h> 36*0f5116d7SKonstantin Belousov #include <sys/kernel.h> 37*0f5116d7SKonstantin Belousov #include <sys/ktr.h> 38*0f5116d7SKonstantin Belousov #include <sys/limits.h> 39*0f5116d7SKonstantin Belousov #include <sys/lock.h> 40*0f5116d7SKonstantin Belousov #include <sys/memdesc.h> 41*0f5116d7SKonstantin Belousov #include <sys/mutex.h> 42*0f5116d7SKonstantin Belousov #include <sys/proc.h> 43*0f5116d7SKonstantin Belousov #include <sys/rwlock.h> 44*0f5116d7SKonstantin Belousov #include <sys/rman.h> 45*0f5116d7SKonstantin Belousov #include <sys/sysctl.h> 46*0f5116d7SKonstantin Belousov #include <sys/taskqueue.h> 47*0f5116d7SKonstantin Belousov #include <sys/tree.h> 48*0f5116d7SKonstantin Belousov #include <sys/uio.h> 49*0f5116d7SKonstantin Belousov #include <sys/vmem.h> 50*0f5116d7SKonstantin Belousov #include <vm/vm.h> 51*0f5116d7SKonstantin Belousov #include <vm/vm_extern.h> 52*0f5116d7SKonstantin Belousov #include <vm/vm_kern.h> 53*0f5116d7SKonstantin Belousov #include <vm/vm_object.h> 54*0f5116d7SKonstantin Belousov #include <vm/vm_page.h> 55*0f5116d7SKonstantin Belousov #include <vm/vm_pager.h> 56*0f5116d7SKonstantin Belousov #include <vm/vm_map.h> 57*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/acpi.h> 58*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/accommon.h> 59*0f5116d7SKonstantin Belousov #include <dev/pci/pcireg.h> 60*0f5116d7SKonstantin Belousov #include <dev/pci/pcivar.h> 61*0f5116d7SKonstantin Belousov #include <machine/atomic.h> 62*0f5116d7SKonstantin Belousov #include <machine/bus.h> 63*0f5116d7SKonstantin Belousov #include <machine/md_var.h> 64*0f5116d7SKonstantin Belousov #include <machine/specialreg.h> 65*0f5116d7SKonstantin Belousov #include <x86/include/busdma_impl.h> 66*0f5116d7SKonstantin Belousov #include <dev/iommu/busdma_iommu.h> 67*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_reg.h> 68*0f5116d7SKonstantin Belousov #include <x86/iommu/x86_iommu.h> 69*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_iommu.h> 70*0f5116d7SKonstantin Belousov 71*0f5116d7SKonstantin Belousov static MALLOC_DEFINE(M_AMDIOMMU_CTX, "amdiommu_ctx", "AMD IOMMU Context"); 72*0f5116d7SKonstantin Belousov static MALLOC_DEFINE(M_AMDIOMMU_DOMAIN, "amdiommu_dom", "AMD IOMMU Domain"); 73*0f5116d7SKonstantin Belousov 74*0f5116d7SKonstantin Belousov static void amdiommu_unref_domain_locked(struct amdiommu_unit *unit, 75*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain); 76*0f5116d7SKonstantin Belousov 77*0f5116d7SKonstantin Belousov static struct amdiommu_dte * 78*0f5116d7SKonstantin Belousov amdiommu_get_dtep(struct amdiommu_ctx *ctx) 79*0f5116d7SKonstantin Belousov { 80*0f5116d7SKonstantin Belousov return (&CTX2AMD(ctx)->dev_tbl[ctx->context.rid]); 81*0f5116d7SKonstantin Belousov } 82*0f5116d7SKonstantin Belousov 83*0f5116d7SKonstantin Belousov void 84*0f5116d7SKonstantin Belousov amdiommu_domain_unload_entry(struct iommu_map_entry *entry, bool free, 85*0f5116d7SKonstantin Belousov bool cansleep) 86*0f5116d7SKonstantin Belousov { 87*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 88*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 89*0f5116d7SKonstantin Belousov 90*0f5116d7SKonstantin Belousov domain = IODOM2DOM(entry->domain); 91*0f5116d7SKonstantin Belousov unit = DOM2AMD(domain); 92*0f5116d7SKonstantin Belousov 93*0f5116d7SKonstantin Belousov /* 94*0f5116d7SKonstantin Belousov * If "free" is false, then the IOTLB invalidation must be performed 95*0f5116d7SKonstantin Belousov * synchronously. Otherwise, the caller might free the entry before 96*0f5116d7SKonstantin Belousov * dmar_qi_task() is finished processing it. 97*0f5116d7SKonstantin Belousov */ 98*0f5116d7SKonstantin Belousov if (free) { 99*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit); 100*0f5116d7SKonstantin Belousov iommu_qi_invalidate_locked(&domain->iodom, entry, true); 101*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 102*0f5116d7SKonstantin Belousov } else { 103*0f5116d7SKonstantin Belousov iommu_qi_invalidate_sync(&domain->iodom, entry->start, 104*0f5116d7SKonstantin Belousov entry->end - entry->start, cansleep); 105*0f5116d7SKonstantin Belousov iommu_domain_free_entry(entry, false); 106*0f5116d7SKonstantin Belousov } 107*0f5116d7SKonstantin Belousov } 108*0f5116d7SKonstantin Belousov 109*0f5116d7SKonstantin Belousov static bool 110*0f5116d7SKonstantin Belousov amdiommu_domain_unload_emit_wait(struct amdiommu_domain *domain, 111*0f5116d7SKonstantin Belousov struct iommu_map_entry *entry) 112*0f5116d7SKonstantin Belousov { 113*0f5116d7SKonstantin Belousov return (true); /* XXXKIB */ 114*0f5116d7SKonstantin Belousov } 115*0f5116d7SKonstantin Belousov 116*0f5116d7SKonstantin Belousov void 117*0f5116d7SKonstantin Belousov amdiommu_domain_unload(struct iommu_domain *iodom, 118*0f5116d7SKonstantin Belousov struct iommu_map_entries_tailq *entries, bool cansleep) 119*0f5116d7SKonstantin Belousov { 120*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 121*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 122*0f5116d7SKonstantin Belousov struct iommu_map_entry *entry, *entry1; 123*0f5116d7SKonstantin Belousov int error __diagused; 124*0f5116d7SKonstantin Belousov 125*0f5116d7SKonstantin Belousov domain = IODOM2DOM(iodom); 126*0f5116d7SKonstantin Belousov unit = DOM2AMD(domain); 127*0f5116d7SKonstantin Belousov 128*0f5116d7SKonstantin Belousov TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 129*0f5116d7SKonstantin Belousov KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, 130*0f5116d7SKonstantin Belousov ("not mapped entry %p %p", domain, entry)); 131*0f5116d7SKonstantin Belousov error = iodom->ops->unmap(iodom, entry, 132*0f5116d7SKonstantin Belousov cansleep ? IOMMU_PGF_WAITOK : 0); 133*0f5116d7SKonstantin Belousov KASSERT(error == 0, ("unmap %p error %d", domain, error)); 134*0f5116d7SKonstantin Belousov } 135*0f5116d7SKonstantin Belousov if (TAILQ_EMPTY(entries)) 136*0f5116d7SKonstantin Belousov return; 137*0f5116d7SKonstantin Belousov 138*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit); 139*0f5116d7SKonstantin Belousov while ((entry = TAILQ_FIRST(entries)) != NULL) { 140*0f5116d7SKonstantin Belousov TAILQ_REMOVE(entries, entry, dmamap_link); 141*0f5116d7SKonstantin Belousov iommu_qi_invalidate_locked(&domain->iodom, entry, 142*0f5116d7SKonstantin Belousov amdiommu_domain_unload_emit_wait(domain, entry)); 143*0f5116d7SKonstantin Belousov } 144*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 145*0f5116d7SKonstantin Belousov } 146*0f5116d7SKonstantin Belousov 147*0f5116d7SKonstantin Belousov static void 148*0f5116d7SKonstantin Belousov amdiommu_domain_destroy(struct amdiommu_domain *domain) 149*0f5116d7SKonstantin Belousov { 150*0f5116d7SKonstantin Belousov struct iommu_domain *iodom; 151*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 152*0f5116d7SKonstantin Belousov 153*0f5116d7SKonstantin Belousov iodom = DOM2IODOM(domain); 154*0f5116d7SKonstantin Belousov 155*0f5116d7SKonstantin Belousov KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), 156*0f5116d7SKonstantin Belousov ("unfinished unloads %p", domain)); 157*0f5116d7SKonstantin Belousov KASSERT(LIST_EMPTY(&iodom->contexts), 158*0f5116d7SKonstantin Belousov ("destroying dom %p with contexts", domain)); 159*0f5116d7SKonstantin Belousov KASSERT(domain->ctx_cnt == 0, 160*0f5116d7SKonstantin Belousov ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); 161*0f5116d7SKonstantin Belousov KASSERT(domain->refs == 0, 162*0f5116d7SKonstantin Belousov ("destroying dom %p with refs %d", domain, domain->refs)); 163*0f5116d7SKonstantin Belousov 164*0f5116d7SKonstantin Belousov if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { 165*0f5116d7SKonstantin Belousov AMDIOMMU_DOMAIN_LOCK(domain); 166*0f5116d7SKonstantin Belousov iommu_gas_fini_domain(iodom); 167*0f5116d7SKonstantin Belousov AMDIOMMU_DOMAIN_UNLOCK(domain); 168*0f5116d7SKonstantin Belousov } 169*0f5116d7SKonstantin Belousov if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { 170*0f5116d7SKonstantin Belousov if (domain->pgtbl_obj != NULL) 171*0f5116d7SKonstantin Belousov AMDIOMMU_DOMAIN_PGLOCK(domain); 172*0f5116d7SKonstantin Belousov amdiommu_domain_free_pgtbl(domain); 173*0f5116d7SKonstantin Belousov } 174*0f5116d7SKonstantin Belousov iommu_domain_fini(iodom); 175*0f5116d7SKonstantin Belousov unit = DOM2AMD(domain); 176*0f5116d7SKonstantin Belousov free_unr(unit->domids, domain->domain); 177*0f5116d7SKonstantin Belousov free(domain, M_AMDIOMMU_DOMAIN); 178*0f5116d7SKonstantin Belousov } 179*0f5116d7SKonstantin Belousov 180*0f5116d7SKonstantin Belousov static iommu_gaddr_t 181*0f5116d7SKonstantin Belousov lvl2addr(int lvl) 182*0f5116d7SKonstantin Belousov { 183*0f5116d7SKonstantin Belousov int x; 184*0f5116d7SKonstantin Belousov 185*0f5116d7SKonstantin Belousov x = IOMMU_PAGE_SHIFT + IOMMU_NPTEPGSHIFT * lvl; 186*0f5116d7SKonstantin Belousov /* Level 6 has only 8 bits for page table index */ 187*0f5116d7SKonstantin Belousov if (x >= NBBY * sizeof(uint64_t)) 188*0f5116d7SKonstantin Belousov return (-1ull); 189*0f5116d7SKonstantin Belousov return (1ull < (1ull << x)); 190*0f5116d7SKonstantin Belousov } 191*0f5116d7SKonstantin Belousov 192*0f5116d7SKonstantin Belousov static void 193*0f5116d7SKonstantin Belousov amdiommu_domain_init_pglvl(struct amdiommu_unit *unit, 194*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain) 195*0f5116d7SKonstantin Belousov { 196*0f5116d7SKonstantin Belousov iommu_gaddr_t end; 197*0f5116d7SKonstantin Belousov int hats, i; 198*0f5116d7SKonstantin Belousov uint64_t efr_hats; 199*0f5116d7SKonstantin Belousov 200*0f5116d7SKonstantin Belousov end = DOM2IODOM(domain)->end; 201*0f5116d7SKonstantin Belousov for (i = AMDIOMMU_PGTBL_MAXLVL; i > 1; i--) { 202*0f5116d7SKonstantin Belousov if (lvl2addr(i) >= end && lvl2addr(i - 1) < end) 203*0f5116d7SKonstantin Belousov break; 204*0f5116d7SKonstantin Belousov } 205*0f5116d7SKonstantin Belousov domain->pglvl = i; 206*0f5116d7SKonstantin Belousov 207*0f5116d7SKonstantin Belousov efr_hats = unit->efr & AMDIOMMU_EFR_HATS_MASK; 208*0f5116d7SKonstantin Belousov switch (efr_hats) { 209*0f5116d7SKonstantin Belousov case AMDIOMMU_EFR_HATS_6LVL: 210*0f5116d7SKonstantin Belousov hats = 6; 211*0f5116d7SKonstantin Belousov break; 212*0f5116d7SKonstantin Belousov case AMDIOMMU_EFR_HATS_5LVL: 213*0f5116d7SKonstantin Belousov hats = 5; 214*0f5116d7SKonstantin Belousov break; 215*0f5116d7SKonstantin Belousov case AMDIOMMU_EFR_HATS_4LVL: 216*0f5116d7SKonstantin Belousov hats = 4; 217*0f5116d7SKonstantin Belousov break; 218*0f5116d7SKonstantin Belousov default: 219*0f5116d7SKonstantin Belousov printf("amdiommu%d: HATS %#jx (reserved) ignoring\n", 220*0f5116d7SKonstantin Belousov unit->iommu.unit, (uintmax_t)efr_hats); 221*0f5116d7SKonstantin Belousov return; 222*0f5116d7SKonstantin Belousov } 223*0f5116d7SKonstantin Belousov if (hats >= domain->pglvl) 224*0f5116d7SKonstantin Belousov return; 225*0f5116d7SKonstantin Belousov 226*0f5116d7SKonstantin Belousov printf("amdiommu%d: domain %d HATS %d pglvl %d reducing to HATS\n", 227*0f5116d7SKonstantin Belousov unit->iommu.unit, domain->domain, hats, domain->pglvl); 228*0f5116d7SKonstantin Belousov domain->pglvl = hats; 229*0f5116d7SKonstantin Belousov domain->iodom.end = lvl2addr(hats); 230*0f5116d7SKonstantin Belousov } 231*0f5116d7SKonstantin Belousov 232*0f5116d7SKonstantin Belousov static struct amdiommu_domain * 233*0f5116d7SKonstantin Belousov amdiommu_domain_alloc(struct amdiommu_unit *unit, bool id_mapped) 234*0f5116d7SKonstantin Belousov { 235*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 236*0f5116d7SKonstantin Belousov struct iommu_domain *iodom; 237*0f5116d7SKonstantin Belousov int error, id; 238*0f5116d7SKonstantin Belousov 239*0f5116d7SKonstantin Belousov id = alloc_unr(unit->domids); 240*0f5116d7SKonstantin Belousov if (id == -1) 241*0f5116d7SKonstantin Belousov return (NULL); 242*0f5116d7SKonstantin Belousov domain = malloc(sizeof(*domain), M_AMDIOMMU_DOMAIN, M_WAITOK | M_ZERO); 243*0f5116d7SKonstantin Belousov iodom = DOM2IODOM(domain); 244*0f5116d7SKonstantin Belousov domain->domain = id; 245*0f5116d7SKonstantin Belousov LIST_INIT(&iodom->contexts); 246*0f5116d7SKonstantin Belousov iommu_domain_init(AMD2IOMMU(unit), iodom, &amdiommu_domain_map_ops); 247*0f5116d7SKonstantin Belousov 248*0f5116d7SKonstantin Belousov domain->unit = unit; 249*0f5116d7SKonstantin Belousov 250*0f5116d7SKonstantin Belousov domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; 251*0f5116d7SKonstantin Belousov amdiommu_domain_init_pglvl(unit, domain); 252*0f5116d7SKonstantin Belousov iommu_gas_init_domain(DOM2IODOM(domain)); 253*0f5116d7SKonstantin Belousov 254*0f5116d7SKonstantin Belousov if (id_mapped) { 255*0f5116d7SKonstantin Belousov domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; 256*0f5116d7SKonstantin Belousov } else { 257*0f5116d7SKonstantin Belousov error = amdiommu_domain_alloc_pgtbl(domain); 258*0f5116d7SKonstantin Belousov if (error != 0) 259*0f5116d7SKonstantin Belousov goto fail; 260*0f5116d7SKonstantin Belousov /* Disable local apic region access */ 261*0f5116d7SKonstantin Belousov error = iommu_gas_reserve_region(iodom, 0xfee00000, 262*0f5116d7SKonstantin Belousov 0xfeefffff + 1, &iodom->msi_entry); 263*0f5116d7SKonstantin Belousov if (error != 0) 264*0f5116d7SKonstantin Belousov goto fail; 265*0f5116d7SKonstantin Belousov } 266*0f5116d7SKonstantin Belousov 267*0f5116d7SKonstantin Belousov return (domain); 268*0f5116d7SKonstantin Belousov 269*0f5116d7SKonstantin Belousov fail: 270*0f5116d7SKonstantin Belousov amdiommu_domain_destroy(domain); 271*0f5116d7SKonstantin Belousov return (NULL); 272*0f5116d7SKonstantin Belousov } 273*0f5116d7SKonstantin Belousov 274*0f5116d7SKonstantin Belousov static struct amdiommu_ctx * 275*0f5116d7SKonstantin Belousov amdiommu_ctx_alloc(struct amdiommu_domain *domain, uint16_t rid) 276*0f5116d7SKonstantin Belousov { 277*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx; 278*0f5116d7SKonstantin Belousov 279*0f5116d7SKonstantin Belousov ctx = malloc(sizeof(*ctx), M_AMDIOMMU_CTX, M_WAITOK | M_ZERO); 280*0f5116d7SKonstantin Belousov ctx->context.domain = DOM2IODOM(domain); 281*0f5116d7SKonstantin Belousov ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), 282*0f5116d7SKonstantin Belousov M_AMDIOMMU_CTX, M_WAITOK | M_ZERO); 283*0f5116d7SKonstantin Belousov ctx->context.rid = rid; 284*0f5116d7SKonstantin Belousov ctx->context.refs = 1; 285*0f5116d7SKonstantin Belousov return (ctx); 286*0f5116d7SKonstantin Belousov } 287*0f5116d7SKonstantin Belousov 288*0f5116d7SKonstantin Belousov static void 289*0f5116d7SKonstantin Belousov amdiommu_ctx_link(struct amdiommu_ctx *ctx) 290*0f5116d7SKonstantin Belousov { 291*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 292*0f5116d7SKonstantin Belousov 293*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 294*0f5116d7SKonstantin Belousov IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 295*0f5116d7SKonstantin Belousov KASSERT(domain->refs >= domain->ctx_cnt, 296*0f5116d7SKonstantin Belousov ("dom %p ref underflow %d %d", domain, domain->refs, 297*0f5116d7SKonstantin Belousov domain->ctx_cnt)); 298*0f5116d7SKonstantin Belousov domain->refs++; 299*0f5116d7SKonstantin Belousov domain->ctx_cnt++; 300*0f5116d7SKonstantin Belousov LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link); 301*0f5116d7SKonstantin Belousov } 302*0f5116d7SKonstantin Belousov 303*0f5116d7SKonstantin Belousov static void 304*0f5116d7SKonstantin Belousov amdiommu_ctx_unlink(struct amdiommu_ctx *ctx) 305*0f5116d7SKonstantin Belousov { 306*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 307*0f5116d7SKonstantin Belousov 308*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 309*0f5116d7SKonstantin Belousov IOMMU_ASSERT_LOCKED(domain->iodom.iommu); 310*0f5116d7SKonstantin Belousov KASSERT(domain->refs > 0, 311*0f5116d7SKonstantin Belousov ("domain %p ctx dtr refs %d", domain, domain->refs)); 312*0f5116d7SKonstantin Belousov KASSERT(domain->ctx_cnt >= domain->refs, 313*0f5116d7SKonstantin Belousov ("domain %p ctx dtr refs %d ctx_cnt %d", domain, 314*0f5116d7SKonstantin Belousov domain->refs, domain->ctx_cnt)); 315*0f5116d7SKonstantin Belousov domain->refs--; 316*0f5116d7SKonstantin Belousov domain->ctx_cnt--; 317*0f5116d7SKonstantin Belousov LIST_REMOVE(&ctx->context, link); 318*0f5116d7SKonstantin Belousov } 319*0f5116d7SKonstantin Belousov 320*0f5116d7SKonstantin Belousov struct amdiommu_ctx * 321*0f5116d7SKonstantin Belousov amdiommu_find_ctx_locked(struct amdiommu_unit *unit, uint16_t rid) 322*0f5116d7SKonstantin Belousov { 323*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 324*0f5116d7SKonstantin Belousov struct iommu_ctx *ctx; 325*0f5116d7SKonstantin Belousov 326*0f5116d7SKonstantin Belousov AMDIOMMU_ASSERT_LOCKED(unit); 327*0f5116d7SKonstantin Belousov 328*0f5116d7SKonstantin Belousov LIST_FOREACH(domain, &unit->domains, link) { 329*0f5116d7SKonstantin Belousov LIST_FOREACH(ctx, &domain->iodom.contexts, link) { 330*0f5116d7SKonstantin Belousov if (ctx->rid == rid) 331*0f5116d7SKonstantin Belousov return (IOCTX2CTX(ctx)); 332*0f5116d7SKonstantin Belousov } 333*0f5116d7SKonstantin Belousov } 334*0f5116d7SKonstantin Belousov return (NULL); 335*0f5116d7SKonstantin Belousov } 336*0f5116d7SKonstantin Belousov 337*0f5116d7SKonstantin Belousov struct amdiommu_domain * 338*0f5116d7SKonstantin Belousov amdiommu_find_domain(struct amdiommu_unit *unit, uint16_t rid) 339*0f5116d7SKonstantin Belousov { 340*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 341*0f5116d7SKonstantin Belousov struct iommu_ctx *ctx; 342*0f5116d7SKonstantin Belousov 343*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit); 344*0f5116d7SKonstantin Belousov LIST_FOREACH(domain, &unit->domains, link) { 345*0f5116d7SKonstantin Belousov LIST_FOREACH(ctx, &domain->iodom.contexts, link) { 346*0f5116d7SKonstantin Belousov if (ctx->rid == rid) 347*0f5116d7SKonstantin Belousov break; 348*0f5116d7SKonstantin Belousov } 349*0f5116d7SKonstantin Belousov } 350*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 351*0f5116d7SKonstantin Belousov return (domain); 352*0f5116d7SKonstantin Belousov } 353*0f5116d7SKonstantin Belousov 354*0f5116d7SKonstantin Belousov static void 355*0f5116d7SKonstantin Belousov amdiommu_free_ctx_locked(struct amdiommu_unit *unit, struct amdiommu_ctx *ctx) 356*0f5116d7SKonstantin Belousov { 357*0f5116d7SKonstantin Belousov struct amdiommu_dte *dtep; 358*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 359*0f5116d7SKonstantin Belousov 360*0f5116d7SKonstantin Belousov AMDIOMMU_ASSERT_LOCKED(unit); 361*0f5116d7SKonstantin Belousov KASSERT(ctx->context.refs >= 1, 362*0f5116d7SKonstantin Belousov ("amdiommu %p ctx %p refs %u", unit, ctx, ctx->context.refs)); 363*0f5116d7SKonstantin Belousov 364*0f5116d7SKonstantin Belousov /* 365*0f5116d7SKonstantin Belousov * If our reference is not last, only the dereference should 366*0f5116d7SKonstantin Belousov * be performed. 367*0f5116d7SKonstantin Belousov */ 368*0f5116d7SKonstantin Belousov if (ctx->context.refs > 1) { 369*0f5116d7SKonstantin Belousov ctx->context.refs--; 370*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 371*0f5116d7SKonstantin Belousov return; 372*0f5116d7SKonstantin Belousov } 373*0f5116d7SKonstantin Belousov 374*0f5116d7SKonstantin Belousov KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, 375*0f5116d7SKonstantin Belousov ("lost ref on disabled ctx %p", ctx)); 376*0f5116d7SKonstantin Belousov 377*0f5116d7SKonstantin Belousov /* 378*0f5116d7SKonstantin Belousov * Otherwise, the device table entry must be cleared before 379*0f5116d7SKonstantin Belousov * the page table is destroyed. 380*0f5116d7SKonstantin Belousov */ 381*0f5116d7SKonstantin Belousov dtep = amdiommu_get_dtep(ctx); 382*0f5116d7SKonstantin Belousov dtep->v = 0; 383*0f5116d7SKonstantin Belousov atomic_thread_fence_rel(); 384*0f5116d7SKonstantin Belousov memset(dtep, 0, sizeof(*dtep)); 385*0f5116d7SKonstantin Belousov 386*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 387*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ctx_locked_nowait(ctx); 388*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ir_locked_nowait(unit, ctx->context.rid); 389*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_all_pages_locked_nowait(domain); 390*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx))); 391*0f5116d7SKonstantin Belousov 392*0f5116d7SKonstantin Belousov if (unit->irte_enabled) 393*0f5116d7SKonstantin Belousov amdiommu_ctx_fini_irte(ctx); 394*0f5116d7SKonstantin Belousov 395*0f5116d7SKonstantin Belousov amdiommu_ctx_unlink(ctx); 396*0f5116d7SKonstantin Belousov free(ctx->context.tag, M_AMDIOMMU_CTX); 397*0f5116d7SKonstantin Belousov free(ctx, M_AMDIOMMU_CTX); 398*0f5116d7SKonstantin Belousov amdiommu_unref_domain_locked(unit, domain); 399*0f5116d7SKonstantin Belousov } 400*0f5116d7SKonstantin Belousov 401*0f5116d7SKonstantin Belousov static void 402*0f5116d7SKonstantin Belousov amdiommu_unref_domain_locked(struct amdiommu_unit *unit, 403*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain) 404*0f5116d7SKonstantin Belousov { 405*0f5116d7SKonstantin Belousov AMDIOMMU_ASSERT_LOCKED(unit); 406*0f5116d7SKonstantin Belousov KASSERT(domain->refs >= 1, 407*0f5116d7SKonstantin Belousov ("amdiommu%d domain %p refs %u", unit->iommu.unit, domain, 408*0f5116d7SKonstantin Belousov domain->refs)); 409*0f5116d7SKonstantin Belousov KASSERT(domain->refs > domain->ctx_cnt, 410*0f5116d7SKonstantin Belousov ("amdiommu%d domain %p refs %d ctx_cnt %d", unit->iommu.unit, 411*0f5116d7SKonstantin Belousov domain, domain->refs, domain->ctx_cnt)); 412*0f5116d7SKonstantin Belousov 413*0f5116d7SKonstantin Belousov if (domain->refs > 1) { 414*0f5116d7SKonstantin Belousov domain->refs--; 415*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 416*0f5116d7SKonstantin Belousov return; 417*0f5116d7SKonstantin Belousov } 418*0f5116d7SKonstantin Belousov 419*0f5116d7SKonstantin Belousov LIST_REMOVE(domain, link); 420*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 421*0f5116d7SKonstantin Belousov 422*0f5116d7SKonstantin Belousov taskqueue_drain(unit->iommu.delayed_taskqueue, 423*0f5116d7SKonstantin Belousov &domain->iodom.unload_task); 424*0f5116d7SKonstantin Belousov amdiommu_domain_destroy(domain); 425*0f5116d7SKonstantin Belousov } 426*0f5116d7SKonstantin Belousov 427*0f5116d7SKonstantin Belousov static void 428*0f5116d7SKonstantin Belousov dte_entry_init_one(struct amdiommu_dte *dtep, struct amdiommu_ctx *ctx, 429*0f5116d7SKonstantin Belousov vm_page_t pgtblr, uint8_t dte, uint32_t edte) 430*0f5116d7SKonstantin Belousov { 431*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 432*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 433*0f5116d7SKonstantin Belousov 434*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 435*0f5116d7SKonstantin Belousov unit = DOM2AMD(domain); 436*0f5116d7SKonstantin Belousov 437*0f5116d7SKonstantin Belousov dtep->tv = 1; 438*0f5116d7SKonstantin Belousov /* dtep->had not used for now */ 439*0f5116d7SKonstantin Belousov dtep->ir = 1; 440*0f5116d7SKonstantin Belousov dtep->iw = 1; 441*0f5116d7SKonstantin Belousov dtep->domainid = domain->domain; 442*0f5116d7SKonstantin Belousov dtep->pioctl = AMDIOMMU_DTE_PIOCTL_DIS; 443*0f5116d7SKonstantin Belousov 444*0f5116d7SKonstantin Belousov /* fill device interrupt passing hints from IVHD. */ 445*0f5116d7SKonstantin Belousov dtep->initpass = (dte & ACPI_IVHD_INIT_PASS) != 0; 446*0f5116d7SKonstantin Belousov dtep->eintpass = (dte & ACPI_IVHD_EINT_PASS) != 0; 447*0f5116d7SKonstantin Belousov dtep->nmipass = (dte & ACPI_IVHD_NMI_PASS) != 0; 448*0f5116d7SKonstantin Belousov dtep->sysmgt = (dte & ACPI_IVHD_SYSTEM_MGMT) >> 4; 449*0f5116d7SKonstantin Belousov dtep->lint0pass = (dte & ACPI_IVHD_LINT0_PASS) != 0; 450*0f5116d7SKonstantin Belousov dtep->lint1pass = (dte & ACPI_IVHD_LINT1_PASS) != 0; 451*0f5116d7SKonstantin Belousov 452*0f5116d7SKonstantin Belousov if (unit->irte_enabled) { 453*0f5116d7SKonstantin Belousov dtep->iv = 1; 454*0f5116d7SKonstantin Belousov dtep->i = 0; 455*0f5116d7SKonstantin Belousov dtep->inttablen = ilog2(unit->irte_nentries); 456*0f5116d7SKonstantin Belousov dtep->intrroot = pmap_kextract(unit->irte_x2apic ? 457*0f5116d7SKonstantin Belousov (vm_offset_t)ctx->irtx2 : 458*0f5116d7SKonstantin Belousov (vm_offset_t)ctx->irtb) >> 6; 459*0f5116d7SKonstantin Belousov 460*0f5116d7SKonstantin Belousov dtep->intctl = AMDIOMMU_DTE_INTCTL_MAP; 461*0f5116d7SKonstantin Belousov } 462*0f5116d7SKonstantin Belousov 463*0f5116d7SKonstantin Belousov if ((DOM2IODOM(domain)->flags & IOMMU_DOMAIN_IDMAP) != 0) { 464*0f5116d7SKonstantin Belousov dtep->pgmode = AMDIOMMU_DTE_PGMODE_1T1; 465*0f5116d7SKonstantin Belousov } else { 466*0f5116d7SKonstantin Belousov MPASS(domain->pglvl > 0 && domain->pglvl <= 467*0f5116d7SKonstantin Belousov AMDIOMMU_PGTBL_MAXLVL); 468*0f5116d7SKonstantin Belousov dtep->pgmode = domain->pglvl; 469*0f5116d7SKonstantin Belousov dtep->ptroot = VM_PAGE_TO_PHYS(pgtblr) >> 12; 470*0f5116d7SKonstantin Belousov } 471*0f5116d7SKonstantin Belousov 472*0f5116d7SKonstantin Belousov atomic_thread_fence_rel(); 473*0f5116d7SKonstantin Belousov dtep->v = 1; 474*0f5116d7SKonstantin Belousov } 475*0f5116d7SKonstantin Belousov 476*0f5116d7SKonstantin Belousov static void 477*0f5116d7SKonstantin Belousov dte_entry_init(struct amdiommu_ctx *ctx, bool move, uint8_t dte, uint32_t edte) 478*0f5116d7SKonstantin Belousov { 479*0f5116d7SKonstantin Belousov struct amdiommu_dte *dtep; 480*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 481*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain; 482*0f5116d7SKonstantin Belousov int i; 483*0f5116d7SKonstantin Belousov 484*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 485*0f5116d7SKonstantin Belousov unit = DOM2AMD(domain); 486*0f5116d7SKonstantin Belousov 487*0f5116d7SKonstantin Belousov dtep = amdiommu_get_dtep(ctx); 488*0f5116d7SKonstantin Belousov KASSERT(dtep->v == 0, 489*0f5116d7SKonstantin Belousov ("amdiommu%d initializing valid dte @%p %#jx", 490*0f5116d7SKonstantin Belousov CTX2AMD(ctx)->iommu.unit, dtep, (uintmax_t)(*(uint64_t *)dtep))); 491*0f5116d7SKonstantin Belousov 492*0f5116d7SKonstantin Belousov if (iommu_is_buswide_ctx(AMD2IOMMU(unit), 493*0f5116d7SKonstantin Belousov PCI_RID2BUS(ctx->context.rid))) { 494*0f5116d7SKonstantin Belousov MPASS(!move); 495*0f5116d7SKonstantin Belousov for (i = 0; i <= PCI_BUSMAX; i++) { 496*0f5116d7SKonstantin Belousov dte_entry_init_one(&dtep[i], ctx, domain->pgtblr, 497*0f5116d7SKonstantin Belousov dte, edte); 498*0f5116d7SKonstantin Belousov } 499*0f5116d7SKonstantin Belousov } else { 500*0f5116d7SKonstantin Belousov dte_entry_init_one(dtep, ctx, domain->pgtblr, dte, edte); 501*0f5116d7SKonstantin Belousov } 502*0f5116d7SKonstantin Belousov } 503*0f5116d7SKonstantin Belousov 504*0f5116d7SKonstantin Belousov struct amdiommu_ctx * 505*0f5116d7SKonstantin Belousov amdiommu_get_ctx_for_dev(struct amdiommu_unit *unit, device_t dev, uint16_t rid, 506*0f5116d7SKonstantin Belousov int dev_domain, bool id_mapped, bool rmrr_init, uint8_t dte, uint32_t edte) 507*0f5116d7SKonstantin Belousov { 508*0f5116d7SKonstantin Belousov struct amdiommu_domain *domain, *domain1; 509*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx, *ctx1; 510*0f5116d7SKonstantin Belousov int bus, slot, func; 511*0f5116d7SKonstantin Belousov 512*0f5116d7SKonstantin Belousov if (dev != NULL) { 513*0f5116d7SKonstantin Belousov bus = pci_get_bus(dev); 514*0f5116d7SKonstantin Belousov slot = pci_get_slot(dev); 515*0f5116d7SKonstantin Belousov func = pci_get_function(dev); 516*0f5116d7SKonstantin Belousov } else { 517*0f5116d7SKonstantin Belousov bus = PCI_RID2BUS(rid); 518*0f5116d7SKonstantin Belousov slot = PCI_RID2SLOT(rid); 519*0f5116d7SKonstantin Belousov func = PCI_RID2FUNC(rid); 520*0f5116d7SKonstantin Belousov } 521*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit); 522*0f5116d7SKonstantin Belousov KASSERT(!iommu_is_buswide_ctx(AMD2IOMMU(unit), bus) || 523*0f5116d7SKonstantin Belousov (slot == 0 && func == 0), 524*0f5116d7SKonstantin Belousov ("iommu%d pci%d:%d:%d get_ctx for buswide", AMD2IOMMU(unit)->unit, 525*0f5116d7SKonstantin Belousov bus, slot, func)); 526*0f5116d7SKonstantin Belousov ctx = amdiommu_find_ctx_locked(unit, rid); 527*0f5116d7SKonstantin Belousov if (ctx == NULL) { 528*0f5116d7SKonstantin Belousov /* 529*0f5116d7SKonstantin Belousov * Perform the allocations which require sleep or have 530*0f5116d7SKonstantin Belousov * higher chance to succeed if the sleep is allowed. 531*0f5116d7SKonstantin Belousov */ 532*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 533*0f5116d7SKonstantin Belousov domain1 = amdiommu_domain_alloc(unit, id_mapped); 534*0f5116d7SKonstantin Belousov if (domain1 == NULL) 535*0f5116d7SKonstantin Belousov return (NULL); 536*0f5116d7SKonstantin Belousov if (!id_mapped) { 537*0f5116d7SKonstantin Belousov /* 538*0f5116d7SKonstantin Belousov * XXXKIB IVMD seems to be less significant 539*0f5116d7SKonstantin Belousov * and less used on AMD than RMRR on Intel. 540*0f5116d7SKonstantin Belousov * Not implemented for now. 541*0f5116d7SKonstantin Belousov */ 542*0f5116d7SKonstantin Belousov } 543*0f5116d7SKonstantin Belousov ctx1 = amdiommu_ctx_alloc(domain1, rid); 544*0f5116d7SKonstantin Belousov amdiommu_ctx_init_irte(ctx1); 545*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit); 546*0f5116d7SKonstantin Belousov 547*0f5116d7SKonstantin Belousov /* 548*0f5116d7SKonstantin Belousov * Recheck the contexts, other thread might have 549*0f5116d7SKonstantin Belousov * already allocated needed one. 550*0f5116d7SKonstantin Belousov */ 551*0f5116d7SKonstantin Belousov ctx = amdiommu_find_ctx_locked(unit, rid); 552*0f5116d7SKonstantin Belousov if (ctx == NULL) { 553*0f5116d7SKonstantin Belousov domain = domain1; 554*0f5116d7SKonstantin Belousov ctx = ctx1; 555*0f5116d7SKonstantin Belousov amdiommu_ctx_link(ctx); 556*0f5116d7SKonstantin Belousov ctx->context.tag->owner = dev; 557*0f5116d7SKonstantin Belousov iommu_device_tag_init(CTX2IOCTX(ctx), dev); 558*0f5116d7SKonstantin Belousov 559*0f5116d7SKonstantin Belousov LIST_INSERT_HEAD(&unit->domains, domain, link); 560*0f5116d7SKonstantin Belousov dte_entry_init(ctx, false, dte, edte); 561*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ctx_locked(ctx); 562*0f5116d7SKonstantin Belousov if (dev != NULL) { 563*0f5116d7SKonstantin Belousov device_printf(dev, 564*0f5116d7SKonstantin Belousov "amdiommu%d pci%d:%d:%d:%d rid %x domain %d " 565*0f5116d7SKonstantin Belousov "%s-mapped\n", 566*0f5116d7SKonstantin Belousov AMD2IOMMU(unit)->unit, unit->unit_dom, 567*0f5116d7SKonstantin Belousov bus, slot, func, rid, domain->domain, 568*0f5116d7SKonstantin Belousov id_mapped ? "id" : "re"); 569*0f5116d7SKonstantin Belousov } 570*0f5116d7SKonstantin Belousov } else { 571*0f5116d7SKonstantin Belousov amdiommu_domain_destroy(domain1); 572*0f5116d7SKonstantin Belousov /* Nothing needs to be done to destroy ctx1. */ 573*0f5116d7SKonstantin Belousov free(ctx1, M_AMDIOMMU_CTX); 574*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 575*0f5116d7SKonstantin Belousov ctx->context.refs++; /* tag referenced us */ 576*0f5116d7SKonstantin Belousov } 577*0f5116d7SKonstantin Belousov } else { 578*0f5116d7SKonstantin Belousov domain = CTX2DOM(ctx); 579*0f5116d7SKonstantin Belousov if (ctx->context.tag->owner == NULL) 580*0f5116d7SKonstantin Belousov ctx->context.tag->owner = dev; 581*0f5116d7SKonstantin Belousov ctx->context.refs++; /* tag referenced us */ 582*0f5116d7SKonstantin Belousov } 583*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit); 584*0f5116d7SKonstantin Belousov 585*0f5116d7SKonstantin Belousov return (ctx); 586*0f5116d7SKonstantin Belousov } 587*0f5116d7SKonstantin Belousov 588*0f5116d7SKonstantin Belousov struct iommu_ctx * 589*0f5116d7SKonstantin Belousov amdiommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, 590*0f5116d7SKonstantin Belousov bool id_mapped, bool rmrr_init) 591*0f5116d7SKonstantin Belousov { 592*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 593*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ret; 594*0f5116d7SKonstantin Belousov int error; 595*0f5116d7SKonstantin Belousov uint32_t edte; 596*0f5116d7SKonstantin Belousov uint16_t rid1; 597*0f5116d7SKonstantin Belousov uint8_t dte; 598*0f5116d7SKonstantin Belousov 599*0f5116d7SKonstantin Belousov error = amdiommu_find_unit(dev, &unit, &rid1, &dte, &edte, 600*0f5116d7SKonstantin Belousov bootverbose); 601*0f5116d7SKonstantin Belousov if (error != 0) 602*0f5116d7SKonstantin Belousov return (NULL); 603*0f5116d7SKonstantin Belousov if (AMD2IOMMU(unit) != iommu) /* XXX complain loudly */ 604*0f5116d7SKonstantin Belousov return (NULL); 605*0f5116d7SKonstantin Belousov ret = amdiommu_get_ctx_for_dev(unit, dev, rid1, pci_get_domain(dev), 606*0f5116d7SKonstantin Belousov id_mapped, rmrr_init, dte, edte); 607*0f5116d7SKonstantin Belousov return (CTX2IOCTX(ret)); 608*0f5116d7SKonstantin Belousov } 609*0f5116d7SKonstantin Belousov 610*0f5116d7SKonstantin Belousov void 611*0f5116d7SKonstantin Belousov amdiommu_free_ctx_locked_method(struct iommu_unit *iommu, 612*0f5116d7SKonstantin Belousov struct iommu_ctx *context) 613*0f5116d7SKonstantin Belousov { 614*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit; 615*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx; 616*0f5116d7SKonstantin Belousov 617*0f5116d7SKonstantin Belousov unit = IOMMU2AMD(iommu); 618*0f5116d7SKonstantin Belousov ctx = IOCTX2CTX(context); 619*0f5116d7SKonstantin Belousov amdiommu_free_ctx_locked(unit, ctx); 620*0f5116d7SKonstantin Belousov } 621