13024e8afSRuslan Bukin /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 33024e8afSRuslan Bukin * 43024e8afSRuslan Bukin * Copyright (c) 2013 The FreeBSD Foundation 53024e8afSRuslan Bukin * 63024e8afSRuslan Bukin * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 73024e8afSRuslan Bukin * under sponsorship from the FreeBSD Foundation. 83024e8afSRuslan Bukin * 93024e8afSRuslan Bukin * Redistribution and use in source and binary forms, with or without 103024e8afSRuslan Bukin * modification, are permitted provided that the following conditions 113024e8afSRuslan Bukin * are met: 123024e8afSRuslan Bukin * 1. Redistributions of source code must retain the above copyright 133024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer. 143024e8afSRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 153024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer in the 163024e8afSRuslan Bukin * documentation and/or other materials provided with the distribution. 173024e8afSRuslan Bukin * 183024e8afSRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 193024e8afSRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 203024e8afSRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 213024e8afSRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 223024e8afSRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 233024e8afSRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 243024e8afSRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 253024e8afSRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 263024e8afSRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 273024e8afSRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 283024e8afSRuslan Bukin * SUCH DAMAGE. 293024e8afSRuslan Bukin */ 303024e8afSRuslan Bukin 31b16f993eSDoug Moore #define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry) 323024e8afSRuslan Bukin 333024e8afSRuslan Bukin #include <sys/param.h> 343024e8afSRuslan Bukin #include <sys/systm.h> 353024e8afSRuslan Bukin #include <sys/malloc.h> 363024e8afSRuslan Bukin #include <sys/bus.h> 373024e8afSRuslan Bukin #include <sys/interrupt.h> 383024e8afSRuslan Bukin #include <sys/kernel.h> 393024e8afSRuslan Bukin #include <sys/ktr.h> 403024e8afSRuslan Bukin #include <sys/lock.h> 413024e8afSRuslan Bukin #include <sys/proc.h> 423024e8afSRuslan Bukin #include <sys/rwlock.h> 433024e8afSRuslan Bukin #include <sys/memdesc.h> 443024e8afSRuslan Bukin #include <sys/mutex.h> 453024e8afSRuslan Bukin #include <sys/sysctl.h> 463024e8afSRuslan Bukin #include <sys/rman.h> 473024e8afSRuslan Bukin #include <sys/taskqueue.h> 483024e8afSRuslan Bukin #include <sys/tree.h> 493024e8afSRuslan Bukin #include <sys/uio.h> 503024e8afSRuslan Bukin #include <sys/vmem.h> 513024e8afSRuslan Bukin #include <vm/vm.h> 523024e8afSRuslan Bukin #include <vm/vm_extern.h> 533024e8afSRuslan Bukin #include <vm/vm_kern.h> 543024e8afSRuslan Bukin #include <vm/vm_object.h> 553024e8afSRuslan Bukin #include <vm/vm_page.h> 563024e8afSRuslan Bukin #include <vm/vm_map.h> 573024e8afSRuslan Bukin #include <vm/uma.h> 58c8597a1fSRuslan Bukin #include <dev/pci/pcireg.h> 59c8597a1fSRuslan Bukin #include <dev/pci/pcivar.h> 60c8597a1fSRuslan Bukin #include <dev/iommu/iommu.h> 61f23f7d3aSRuslan Bukin #include <dev/iommu/iommu_gas.h> 62e707c8beSRuslan Bukin #include <dev/iommu/iommu_msi.h> 633024e8afSRuslan Bukin #include <machine/atomic.h> 643024e8afSRuslan Bukin #include <machine/bus.h> 653024e8afSRuslan Bukin #include <machine/md_var.h> 66c4cd6990SRuslan Bukin #include <machine/iommu.h> 67c8597a1fSRuslan Bukin #include <dev/iommu/busdma_iommu.h> 683024e8afSRuslan Bukin 693024e8afSRuslan Bukin /* 703024e8afSRuslan Bukin * Guest Address Space management. 713024e8afSRuslan Bukin */ 723024e8afSRuslan Bukin 733024e8afSRuslan Bukin static uma_zone_t iommu_map_entry_zone; 743024e8afSRuslan Bukin 759c843a40SRuslan Bukin #ifdef INVARIANTS 769c843a40SRuslan Bukin static int iommu_check_free; 779c843a40SRuslan Bukin #endif 789c843a40SRuslan Bukin 793024e8afSRuslan Bukin static void 803024e8afSRuslan Bukin intel_gas_init(void) 813024e8afSRuslan Bukin { 823024e8afSRuslan Bukin 833024e8afSRuslan Bukin iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", 843024e8afSRuslan Bukin sizeof(struct iommu_map_entry), NULL, NULL, 853024e8afSRuslan Bukin NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); 863024e8afSRuslan Bukin } 873024e8afSRuslan Bukin SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); 883024e8afSRuslan Bukin 893024e8afSRuslan Bukin struct iommu_map_entry * 903024e8afSRuslan Bukin iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) 913024e8afSRuslan Bukin { 923024e8afSRuslan Bukin struct iommu_map_entry *res; 933024e8afSRuslan Bukin 9415f6baf4SRuslan Bukin KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, 953024e8afSRuslan Bukin ("unsupported flags %x", flags)); 963024e8afSRuslan Bukin 9715f6baf4SRuslan Bukin res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 983024e8afSRuslan Bukin 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); 99*f713ed66SKonstantin Belousov if (res != NULL) { 100*f713ed66SKonstantin Belousov SLIST_INIT(&res->pgtbl_free); 101*f713ed66SKonstantin Belousov if (domain != NULL) { 1023024e8afSRuslan Bukin res->domain = domain; 1033024e8afSRuslan Bukin atomic_add_int(&domain->entries_cnt, 1); 1043024e8afSRuslan Bukin } 105*f713ed66SKonstantin Belousov } 1063024e8afSRuslan Bukin return (res); 1073024e8afSRuslan Bukin } 1083024e8afSRuslan Bukin 1093024e8afSRuslan Bukin void 1104670f908SAlan Cox iommu_gas_free_entry(struct iommu_map_entry *entry) 1113024e8afSRuslan Bukin { 1124670f908SAlan Cox struct iommu_domain *domain; 113*f713ed66SKonstantin Belousov int n __unused; 1143024e8afSRuslan Bukin 115*f713ed66SKonstantin Belousov n = vm_page_free_pages_toq(&entry->pgtbl_free, false); 116*f713ed66SKonstantin Belousov #if defined(__i386__) || defined(__amd64__) 117*f713ed66SKonstantin Belousov atomic_subtract_int(&iommu_tbl_pagecnt, n); 118*f713ed66SKonstantin Belousov #endif 1194670f908SAlan Cox domain = entry->domain; 12042736dc4SAlan Cox if (domain != NULL) 1213024e8afSRuslan Bukin atomic_subtract_int(&domain->entries_cnt, 1); 1223024e8afSRuslan Bukin uma_zfree(iommu_map_entry_zone, entry); 1233024e8afSRuslan Bukin } 1243024e8afSRuslan Bukin 1253024e8afSRuslan Bukin static int 1263024e8afSRuslan Bukin iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) 1273024e8afSRuslan Bukin { 1283024e8afSRuslan Bukin 129f5912877SKonstantin Belousov /* First and last entries have zero size, so <= */ 1303024e8afSRuslan Bukin KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", 1313024e8afSRuslan Bukin a, (uintmax_t)a->start, (uintmax_t)a->end)); 1323024e8afSRuslan Bukin KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", 1333024e8afSRuslan Bukin b, (uintmax_t)b->start, (uintmax_t)b->end)); 134a59c2529SKonstantin Belousov KASSERT(((a->flags | b->flags) & IOMMU_MAP_ENTRY_FAKE) != 0 || 135a59c2529SKonstantin Belousov a->end <= b->start || b->end <= a->start || 1363024e8afSRuslan Bukin a->end == a->start || b->end == b->start, 137733da1ebSKonstantin Belousov ("overlapping entries %p (%jx, %jx) f %#x %p (%jx, %jx) f %#x" 138733da1ebSKonstantin Belousov " domain %p %p", 139733da1ebSKonstantin Belousov a, (uintmax_t)a->start, (uintmax_t)a->end, a->flags, 140733da1ebSKonstantin Belousov b, (uintmax_t)b->start, (uintmax_t)b->end, b->flags, 141733da1ebSKonstantin Belousov a->domain, b->domain)); 1423024e8afSRuslan Bukin 1433024e8afSRuslan Bukin if (a->end < b->end) 1443024e8afSRuslan Bukin return (-1); 1453024e8afSRuslan Bukin else if (b->end < a->end) 1463024e8afSRuslan Bukin return (1); 1473024e8afSRuslan Bukin return (0); 1483024e8afSRuslan Bukin } 1493024e8afSRuslan Bukin 150b16f993eSDoug Moore /* 151b16f993eSDoug Moore * Update augmentation data based on data from children. 152b16f993eSDoug Moore * Return true if and only if the update changes the augmentation data. 153b16f993eSDoug Moore */ 154b16f993eSDoug Moore static bool 1553024e8afSRuslan Bukin iommu_gas_augment_entry(struct iommu_map_entry *entry) 1563024e8afSRuslan Bukin { 1573024e8afSRuslan Bukin struct iommu_map_entry *child; 158b16f993eSDoug Moore iommu_gaddr_t bound, delta, free_down; 1593024e8afSRuslan Bukin 1603024e8afSRuslan Bukin free_down = 0; 161b16f993eSDoug Moore bound = entry->start; 1623024e8afSRuslan Bukin if ((child = RB_LEFT(entry, rb_entry)) != NULL) { 163b16f993eSDoug Moore free_down = MAX(child->free_down, bound - child->last); 164b16f993eSDoug Moore bound = child->first; 165b16f993eSDoug Moore } 166b16f993eSDoug Moore delta = bound - entry->first; 167b16f993eSDoug Moore entry->first = bound; 168b16f993eSDoug Moore bound = entry->end; 1693024e8afSRuslan Bukin if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { 1703024e8afSRuslan Bukin free_down = MAX(free_down, child->free_down); 171b16f993eSDoug Moore free_down = MAX(free_down, child->first - bound); 172b16f993eSDoug Moore bound = child->last; 173b16f993eSDoug Moore } 174b16f993eSDoug Moore delta += entry->last - bound; 175b16f993eSDoug Moore if (delta == 0) 176b16f993eSDoug Moore delta = entry->free_down - free_down; 177b16f993eSDoug Moore entry->last = bound; 1783024e8afSRuslan Bukin entry->free_down = free_down; 179b16f993eSDoug Moore 180b16f993eSDoug Moore /* 181b16f993eSDoug Moore * Return true either if the value of last-first changed, 182b16f993eSDoug Moore * or if free_down changed. 183b16f993eSDoug Moore */ 184b16f993eSDoug Moore return (delta != 0); 1853024e8afSRuslan Bukin } 1863024e8afSRuslan Bukin 1873024e8afSRuslan Bukin RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 1883024e8afSRuslan Bukin iommu_gas_cmp_entries); 1893024e8afSRuslan Bukin 1903024e8afSRuslan Bukin #ifdef INVARIANTS 1913024e8afSRuslan Bukin static void 1923024e8afSRuslan Bukin iommu_gas_check_free(struct iommu_domain *domain) 1933024e8afSRuslan Bukin { 1943024e8afSRuslan Bukin struct iommu_map_entry *entry, *l, *r; 1953024e8afSRuslan Bukin iommu_gaddr_t v; 1963024e8afSRuslan Bukin 1973024e8afSRuslan Bukin RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 198b64dca2bSRuslan Bukin KASSERT(domain == entry->domain, 1993024e8afSRuslan Bukin ("mismatched free domain %p entry %p entry->domain %p", 2003024e8afSRuslan Bukin domain, entry, entry->domain)); 2013024e8afSRuslan Bukin l = RB_LEFT(entry, rb_entry); 2023024e8afSRuslan Bukin r = RB_RIGHT(entry, rb_entry); 2033024e8afSRuslan Bukin v = 0; 2043024e8afSRuslan Bukin if (l != NULL) { 2053024e8afSRuslan Bukin v = MAX(v, l->free_down); 2063024e8afSRuslan Bukin v = MAX(v, entry->start - l->last); 2073024e8afSRuslan Bukin } 2083024e8afSRuslan Bukin if (r != NULL) { 2093024e8afSRuslan Bukin v = MAX(v, r->free_down); 2103024e8afSRuslan Bukin v = MAX(v, r->first - entry->end); 2113024e8afSRuslan Bukin } 2123024e8afSRuslan Bukin MPASS(entry->free_down == v); 2133024e8afSRuslan Bukin } 2143024e8afSRuslan Bukin } 2153024e8afSRuslan Bukin #endif 2163024e8afSRuslan Bukin 2173024e8afSRuslan Bukin static void 2183024e8afSRuslan Bukin iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) 2193024e8afSRuslan Bukin { 2208b221ca6SDoug Moore struct iommu_map_entry *nbr; 2213024e8afSRuslan Bukin 2228b221ca6SDoug Moore /* Removing entry may open a new free gap before domain->start_gap. */ 2238b221ca6SDoug Moore if (entry->end <= domain->start_gap->end) { 2248b221ca6SDoug Moore if (RB_RIGHT(entry, rb_entry) != NULL) 2258b221ca6SDoug Moore nbr = iommu_gas_entries_tree_RB_NEXT(entry); 2268b221ca6SDoug Moore else if (RB_LEFT(entry, rb_entry) != NULL) 2278b221ca6SDoug Moore nbr = RB_LEFT(entry, rb_entry); 2288b221ca6SDoug Moore else 2298b221ca6SDoug Moore nbr = RB_PARENT(entry, rb_entry); 2308b221ca6SDoug Moore domain->start_gap = nbr; 2318b221ca6SDoug Moore } 2323024e8afSRuslan Bukin RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 2333024e8afSRuslan Bukin } 2343024e8afSRuslan Bukin 235f5cafae1SRuslan Bukin struct iommu_domain * 236f5cafae1SRuslan Bukin iommu_get_ctx_domain(struct iommu_ctx *ctx) 237f5cafae1SRuslan Bukin { 238f5cafae1SRuslan Bukin 239f5cafae1SRuslan Bukin return (ctx->domain); 240f5cafae1SRuslan Bukin } 241f5cafae1SRuslan Bukin 2423024e8afSRuslan Bukin void 2433024e8afSRuslan Bukin iommu_gas_init_domain(struct iommu_domain *domain) 2443024e8afSRuslan Bukin { 2453024e8afSRuslan Bukin struct iommu_map_entry *begin, *end; 2463024e8afSRuslan Bukin 24715f6baf4SRuslan Bukin begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 24815f6baf4SRuslan Bukin end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 2493024e8afSRuslan Bukin 2503024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 2513024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); 2523024e8afSRuslan Bukin KASSERT(RB_EMPTY(&domain->rb_root), 2533024e8afSRuslan Bukin ("non-empty entries %p", domain)); 2543024e8afSRuslan Bukin 2553024e8afSRuslan Bukin end->start = domain->end; 2563024e8afSRuslan Bukin end->end = domain->end; 2573024e8afSRuslan Bukin end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 258368ee2f8SDoug Moore RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, end); 2593024e8afSRuslan Bukin 260b16f993eSDoug Moore begin->start = 0; 261f5912877SKonstantin Belousov begin->end = 0; 262b16f993eSDoug Moore begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 263368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, &domain->rb_root, end, begin); 264f5912877SKonstantin Belousov iommu_gas_augment_entry(end); 265f5912877SKonstantin Belousov iommu_gas_augment_entry(begin); 266b16f993eSDoug Moore 26787d405eaSDoug Moore domain->start_gap = begin; 2683024e8afSRuslan Bukin domain->first_place = begin; 2693024e8afSRuslan Bukin domain->last_place = end; 27015f6baf4SRuslan Bukin domain->flags |= IOMMU_DOMAIN_GAS_INITED; 2713024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 2723024e8afSRuslan Bukin } 2733024e8afSRuslan Bukin 2743024e8afSRuslan Bukin void 2753024e8afSRuslan Bukin iommu_gas_fini_domain(struct iommu_domain *domain) 2763024e8afSRuslan Bukin { 277a2c57c60SDoug Moore struct iommu_map_entry *entry; 2783024e8afSRuslan Bukin 2793024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 2803024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, 2813024e8afSRuslan Bukin ("domain still in use %p", domain)); 2823024e8afSRuslan Bukin 2833024e8afSRuslan Bukin entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); 2843024e8afSRuslan Bukin KASSERT(entry->start == 0, ("start entry start %p", domain)); 2853024e8afSRuslan Bukin KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); 286dea8594fSRuslan Bukin KASSERT(entry->flags == 287dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2883024e8afSRuslan Bukin ("start entry flags %p", domain)); 289368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2904670f908SAlan Cox iommu_gas_free_entry(entry); 2913024e8afSRuslan Bukin 2923024e8afSRuslan Bukin entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); 2933024e8afSRuslan Bukin KASSERT(entry->start == domain->end, ("end entry start %p", domain)); 2943024e8afSRuslan Bukin KASSERT(entry->end == domain->end, ("end entry end %p", domain)); 295dea8594fSRuslan Bukin KASSERT(entry->flags == 296dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2973024e8afSRuslan Bukin ("end entry flags %p", domain)); 298368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2994670f908SAlan Cox iommu_gas_free_entry(entry); 3003024e8afSRuslan Bukin } 3013024e8afSRuslan Bukin 3023024e8afSRuslan Bukin struct iommu_gas_match_args { 3033024e8afSRuslan Bukin iommu_gaddr_t size; 3043024e8afSRuslan Bukin int offset; 3053024e8afSRuslan Bukin const struct bus_dma_tag_common *common; 3063024e8afSRuslan Bukin u_int gas_flags; 3073024e8afSRuslan Bukin struct iommu_map_entry *entry; 3083024e8afSRuslan Bukin }; 3093024e8afSRuslan Bukin 3103024e8afSRuslan Bukin /* 3113024e8afSRuslan Bukin * The interval [beg, end) is a free interval between two iommu_map_entries. 3125b9b55fbSDoug Moore * Addresses can be allocated only in the range [lbound, ubound]. Try to 313e0e8d0c8SDoug Moore * allocate space in the free interval, subject to the conditions expressed by 314e0e8d0c8SDoug Moore * a, and return 'true' if and only if the allocation attempt succeeds. 3153024e8afSRuslan Bukin */ 3163024e8afSRuslan Bukin static bool 3173024e8afSRuslan Bukin iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, 318e0e8d0c8SDoug Moore iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound) 3193024e8afSRuslan Bukin { 320e0e8d0c8SDoug Moore struct iommu_map_entry *entry; 321e0e8d0c8SDoug Moore iommu_gaddr_t first, size, start; 322e0e8d0c8SDoug Moore int offset; 3233024e8afSRuslan Bukin 324da33f6d7SAlan Cox /* 325da33f6d7SAlan Cox * The prev->end is always aligned on the page size, which 326da33f6d7SAlan Cox * causes page alignment for the entry->start too. 327da33f6d7SAlan Cox * 328e0e8d0c8SDoug Moore * Create IOMMU_PAGE_SIZE gaps before, after new entry 329e0e8d0c8SDoug Moore * to ensure that out-of-bounds accesses fault. 330da33f6d7SAlan Cox */ 331e0e8d0c8SDoug Moore beg = MAX(beg + IOMMU_PAGE_SIZE, lbound); 332e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 333e0e8d0c8SDoug Moore if (start < beg) 334e0e8d0c8SDoug Moore return (false); 335a869643eSKonstantin Belousov if (end < IOMMU_PAGE_SIZE + 1) 336a869643eSKonstantin Belousov return (false); 3375b9b55fbSDoug Moore end = MIN(end - IOMMU_PAGE_SIZE - 1, ubound); 338e0e8d0c8SDoug Moore offset = a->offset; 339e0e8d0c8SDoug Moore size = a->size; 3405b9b55fbSDoug Moore if (start + offset + size - 1 > end) 3413024e8afSRuslan Bukin return (false); 3423024e8afSRuslan Bukin 343e0e8d0c8SDoug Moore /* Check for and try to skip past boundary crossing. */ 344e0e8d0c8SDoug Moore if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) { 3453024e8afSRuslan Bukin /* 3463024e8afSRuslan Bukin * The start + offset to start + offset + size region crosses 347e0e8d0c8SDoug Moore * the boundary. Check if there is enough space after the next 348e0e8d0c8SDoug Moore * boundary after the beg. 3493024e8afSRuslan Bukin */ 350e0e8d0c8SDoug Moore first = start; 351e0e8d0c8SDoug Moore beg = roundup2(start + offset + 1, a->common->boundary); 352e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 353e0e8d0c8SDoug Moore 3545b9b55fbSDoug Moore if (start + offset + size - 1 > end || 355e0e8d0c8SDoug Moore !vm_addr_bound_ok(start + offset, size, 3563024e8afSRuslan Bukin a->common->boundary)) { 3573024e8afSRuslan Bukin /* 358e0e8d0c8SDoug Moore * Not enough space to align at the requested boundary, 359e0e8d0c8SDoug Moore * or boundary is smaller than the size, but allowed to 360e0e8d0c8SDoug Moore * split. We already checked that start + size does not 361e0e8d0c8SDoug Moore * overlap ubound. 3623024e8afSRuslan Bukin * 363e0e8d0c8SDoug Moore * XXXKIB. It is possible that beg is exactly at the 364e0e8d0c8SDoug Moore * start of the next entry, then we do not have gap. 365e0e8d0c8SDoug Moore * Ignore for now. 3663024e8afSRuslan Bukin */ 367e0e8d0c8SDoug Moore if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0) 368e0e8d0c8SDoug Moore return (false); 369e0e8d0c8SDoug Moore size = beg - first - offset; 370e0e8d0c8SDoug Moore start = first; 371e0e8d0c8SDoug Moore } 372e0e8d0c8SDoug Moore } 373e0e8d0c8SDoug Moore entry = a->entry; 374e0e8d0c8SDoug Moore entry->start = start; 375e0e8d0c8SDoug Moore entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE); 376e0e8d0c8SDoug Moore entry->flags = IOMMU_MAP_ENTRY_MAP; 3773024e8afSRuslan Bukin return (true); 3783024e8afSRuslan Bukin } 3793024e8afSRuslan Bukin 380e0e8d0c8SDoug Moore /* Find the next entry that might abut a big-enough range. */ 381e0e8d0c8SDoug Moore static struct iommu_map_entry * 382e0e8d0c8SDoug Moore iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free) 3833024e8afSRuslan Bukin { 384e0e8d0c8SDoug Moore struct iommu_map_entry *next; 3853024e8afSRuslan Bukin 386e0e8d0c8SDoug Moore if ((next = RB_RIGHT(curr, rb_entry)) != NULL && 387e0e8d0c8SDoug Moore next->free_down >= min_free) { 388e0e8d0c8SDoug Moore /* Find next entry in right subtree. */ 389e0e8d0c8SDoug Moore do 390e0e8d0c8SDoug Moore curr = next; 391e0e8d0c8SDoug Moore while ((next = RB_LEFT(curr, rb_entry)) != NULL && 392e0e8d0c8SDoug Moore next->free_down >= min_free); 393e0e8d0c8SDoug Moore } else { 394e0e8d0c8SDoug Moore /* Find next entry in a left-parent ancestor. */ 395e0e8d0c8SDoug Moore while ((next = RB_PARENT(curr, rb_entry)) != NULL && 396e0e8d0c8SDoug Moore curr == RB_RIGHT(next, rb_entry)) 397e0e8d0c8SDoug Moore curr = next; 398e0e8d0c8SDoug Moore curr = next; 399e0e8d0c8SDoug Moore } 400e0e8d0c8SDoug Moore return (curr); 4013024e8afSRuslan Bukin } 4023024e8afSRuslan Bukin 4038b221ca6SDoug Moore /* 4048b221ca6SDoug Moore * Address-ordered first-fit search of 'domain' for free space satisfying the 4058b221ca6SDoug Moore * conditions of 'a'. The space allocated is at least one page big, and is 406a2c57c60SDoug Moore * bounded by guard pages to the left and right. The allocated space for 407a2c57c60SDoug Moore * 'domain' is described by an rb-tree of map entries at domain->rb_root, and 408a2c57c60SDoug Moore * domain->start_gap points to a map entry less than or adjacent to the first 4098b221ca6SDoug Moore * free-space of size at least 3 pages. 4108b221ca6SDoug Moore */ 4113024e8afSRuslan Bukin static int 4128b221ca6SDoug Moore iommu_gas_find_space(struct iommu_domain *domain, 4138b221ca6SDoug Moore struct iommu_gas_match_args *a) 4143024e8afSRuslan Bukin { 415e0e8d0c8SDoug Moore struct iommu_map_entry *curr, *first; 416e0e8d0c8SDoug Moore iommu_gaddr_t addr, min_free; 417e0e8d0c8SDoug Moore 4188b221ca6SDoug Moore IOMMU_DOMAIN_ASSERT_LOCKED(domain); 419e0e8d0c8SDoug Moore KASSERT(a->entry->flags == 0, 4208b221ca6SDoug Moore ("dirty entry %p %p", domain, a->entry)); 4218b221ca6SDoug Moore 4228b221ca6SDoug Moore /* 4238b221ca6SDoug Moore * start_gap may point to an entry adjacent to gaps too small for any 4248b221ca6SDoug Moore * new allocation. In that case, advance start_gap to the first free 4258b221ca6SDoug Moore * space big enough for a minimum allocation plus two guard pages. 4268b221ca6SDoug Moore */ 4278b221ca6SDoug Moore min_free = 3 * IOMMU_PAGE_SIZE; 4288b221ca6SDoug Moore first = domain->start_gap; 4298b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4308b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 4318b221ca6SDoug Moore for (curr = first; curr != NULL; 4328b221ca6SDoug Moore curr = iommu_gas_next(curr, min_free)) { 4338b221ca6SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 4348b221ca6SDoug Moore first->last + min_free <= curr->start) 4358b221ca6SDoug Moore break; 4368b221ca6SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 4378b221ca6SDoug Moore curr->end + min_free <= first->first) 4388b221ca6SDoug Moore break; 4398b221ca6SDoug Moore } 4408b221ca6SDoug Moore domain->start_gap = curr; 4413024e8afSRuslan Bukin 442b831865fSDoug Moore /* 443b831865fSDoug Moore * If the subtree doesn't have free space for the requested allocation 444f979ad00SDoug Moore * plus two guard pages, skip it. 445b831865fSDoug Moore */ 446f979ad00SDoug Moore min_free = 2 * IOMMU_PAGE_SIZE + 447f979ad00SDoug Moore roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); 448f979ad00SDoug Moore 4498b221ca6SDoug Moore /* Climb to find a node in the subtree of big-enough ranges. */ 450e0e8d0c8SDoug Moore first = curr; 4518b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4528b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 453f979ad00SDoug Moore 454f979ad00SDoug Moore /* 4558b221ca6SDoug Moore * Walk the big-enough ranges tree until one satisfies alignment 456f979ad00SDoug Moore * requirements, or violates lowaddr address requirement. 457f979ad00SDoug Moore */ 4585b9b55fbSDoug Moore addr = a->common->lowaddr; 459e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 460e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 461e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 462e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 463368ee2f8SDoug Moore 0, addr)) { 464368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 465368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 46630031172SDoug Moore return (0); 467368ee2f8SDoug Moore } 468e0e8d0c8SDoug Moore if (curr->end >= addr) { 4695b9b55fbSDoug Moore /* All remaining ranges > addr */ 470f979ad00SDoug Moore break; 471f979ad00SDoug Moore } 472e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 473e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 474368ee2f8SDoug Moore 0, addr)) { 475368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 476368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 4773024e8afSRuslan Bukin return (0); 4783024e8afSRuslan Bukin } 479368ee2f8SDoug Moore } 4803024e8afSRuslan Bukin 481b831865fSDoug Moore /* 482e0e8d0c8SDoug Moore * To resume the search at the start of the upper region, first climb to 483e0e8d0c8SDoug Moore * the nearest ancestor that spans highaddr. Then find the last entry 484e0e8d0c8SDoug Moore * before highaddr that could abut a big-enough range. 485b831865fSDoug Moore */ 486e0e8d0c8SDoug Moore addr = a->common->highaddr; 487e0e8d0c8SDoug Moore while (curr != NULL && curr->last < addr) 488e0e8d0c8SDoug Moore curr = RB_PARENT(curr, rb_entry); 489e0e8d0c8SDoug Moore first = NULL; 490e0e8d0c8SDoug Moore while (curr != NULL && curr->free_down >= min_free) { 491e0e8d0c8SDoug Moore if (addr < curr->end) 492e0e8d0c8SDoug Moore curr = RB_LEFT(curr, rb_entry); 493e0e8d0c8SDoug Moore else { 494e0e8d0c8SDoug Moore first = curr; 495e0e8d0c8SDoug Moore curr = RB_RIGHT(curr, rb_entry); 4963024e8afSRuslan Bukin } 4973024e8afSRuslan Bukin } 4983024e8afSRuslan Bukin 499e0e8d0c8SDoug Moore /* 500e0e8d0c8SDoug Moore * Walk the remaining big-enough ranges until one satisfies alignment 501e0e8d0c8SDoug Moore * requirements. 502e0e8d0c8SDoug Moore */ 503e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 504e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 505e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 506e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 5075b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 508368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 509368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 5103024e8afSRuslan Bukin return (0); 511368ee2f8SDoug Moore } 512e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 513e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 5145b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 515368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 516368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 517e0e8d0c8SDoug Moore return (0); 5183024e8afSRuslan Bukin } 519368ee2f8SDoug Moore } 520e0e8d0c8SDoug Moore 5213024e8afSRuslan Bukin return (ENOMEM); 5223024e8afSRuslan Bukin } 5233024e8afSRuslan Bukin 5243024e8afSRuslan Bukin static int 5253024e8afSRuslan Bukin iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 5263024e8afSRuslan Bukin u_int flags) 5273024e8afSRuslan Bukin { 5283024e8afSRuslan Bukin struct iommu_map_entry *next, *prev; 5293024e8afSRuslan Bukin 5303024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 5313024e8afSRuslan Bukin 5323024e8afSRuslan Bukin if ((entry->start & IOMMU_PAGE_MASK) != 0 || 5333024e8afSRuslan Bukin (entry->end & IOMMU_PAGE_MASK) != 0) 5343024e8afSRuslan Bukin return (EINVAL); 5353024e8afSRuslan Bukin if (entry->start >= entry->end) 5363024e8afSRuslan Bukin return (EINVAL); 5373024e8afSRuslan Bukin if (entry->end >= domain->end) 5383024e8afSRuslan Bukin return (EINVAL); 5393024e8afSRuslan Bukin 540a59c2529SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_FAKE; 5413024e8afSRuslan Bukin next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); 5423024e8afSRuslan Bukin KASSERT(next != NULL, ("next must be non-null %p %jx", domain, 5433024e8afSRuslan Bukin (uintmax_t)entry->start)); 5443024e8afSRuslan Bukin prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 5453024e8afSRuslan Bukin /* prev could be NULL */ 546a59c2529SKonstantin Belousov entry->flags &= ~IOMMU_MAP_ENTRY_FAKE; 5473024e8afSRuslan Bukin 5483024e8afSRuslan Bukin /* 5493024e8afSRuslan Bukin * Adapt to broken BIOSes which specify overlapping RMRR 5503024e8afSRuslan Bukin * entries. 5513024e8afSRuslan Bukin * 5523024e8afSRuslan Bukin * XXXKIB: this does not handle a case when prev or next 5533024e8afSRuslan Bukin * entries are completely covered by the current one, which 5543024e8afSRuslan Bukin * extends both ways. 5553024e8afSRuslan Bukin */ 5563024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start && 5573024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5583024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5593024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5603024e8afSRuslan Bukin return (EBUSY); 5613024e8afSRuslan Bukin entry->start = prev->end; 5623024e8afSRuslan Bukin } 5633024e8afSRuslan Bukin if (next->start < entry->end && 5643024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5653024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5663024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5673024e8afSRuslan Bukin return (EBUSY); 5683024e8afSRuslan Bukin entry->end = next->start; 5693024e8afSRuslan Bukin } 5703024e8afSRuslan Bukin if (entry->end == entry->start) 5713024e8afSRuslan Bukin return (0); 5723024e8afSRuslan Bukin 5733024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start) { 5743024e8afSRuslan Bukin /* This assumes that prev is the placeholder entry. */ 5753024e8afSRuslan Bukin iommu_gas_rb_remove(domain, prev); 5763024e8afSRuslan Bukin prev = NULL; 5773024e8afSRuslan Bukin } 578368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 579368ee2f8SDoug Moore &domain->rb_root, next, entry); 5803024e8afSRuslan Bukin if (next->start < entry->end) { 5813024e8afSRuslan Bukin iommu_gas_rb_remove(domain, next); 5823024e8afSRuslan Bukin next = NULL; 5833024e8afSRuslan Bukin } 5843024e8afSRuslan Bukin 5853024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) != 0) 5863024e8afSRuslan Bukin entry->flags = IOMMU_MAP_ENTRY_RMRR; 5873024e8afSRuslan Bukin 5883024e8afSRuslan Bukin #ifdef INVARIANTS 5893024e8afSRuslan Bukin struct iommu_map_entry *ip, *in; 5903024e8afSRuslan Bukin ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 5913024e8afSRuslan Bukin in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 5923024e8afSRuslan Bukin KASSERT(prev == NULL || ip == prev, 5933024e8afSRuslan Bukin ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", 5943024e8afSRuslan Bukin entry, entry->start, entry->end, prev, 5953024e8afSRuslan Bukin prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, 5963024e8afSRuslan Bukin ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); 5973024e8afSRuslan Bukin KASSERT(next == NULL || in == next, 5983024e8afSRuslan Bukin ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", 5993024e8afSRuslan Bukin entry, entry->start, entry->end, next, 6003024e8afSRuslan Bukin next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, 6013024e8afSRuslan Bukin in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); 6023024e8afSRuslan Bukin #endif 6033024e8afSRuslan Bukin 6043024e8afSRuslan Bukin return (0); 6053024e8afSRuslan Bukin } 6063024e8afSRuslan Bukin 6073024e8afSRuslan Bukin void 6084670f908SAlan Cox iommu_gas_free_space(struct iommu_map_entry *entry) 6093024e8afSRuslan Bukin { 6104670f908SAlan Cox struct iommu_domain *domain; 6113024e8afSRuslan Bukin 6124670f908SAlan Cox domain = entry->domain; 6133024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6143024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, 6153024e8afSRuslan Bukin ("permanent entry %p %p", domain, entry)); 6163024e8afSRuslan Bukin 6174670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 6183024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6193024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_MAP; 6203024e8afSRuslan Bukin #ifdef INVARIANTS 6213024e8afSRuslan Bukin if (iommu_check_free) 6223024e8afSRuslan Bukin iommu_gas_check_free(domain); 6233024e8afSRuslan Bukin #endif 6244670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6253024e8afSRuslan Bukin } 6263024e8afSRuslan Bukin 6273024e8afSRuslan Bukin void 6284670f908SAlan Cox iommu_gas_free_region(struct iommu_map_entry *entry) 6293024e8afSRuslan Bukin { 6304670f908SAlan Cox struct iommu_domain *domain; 6313024e8afSRuslan Bukin 6324670f908SAlan Cox domain = entry->domain; 6333024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6343024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, 6353024e8afSRuslan Bukin ("non-RMRR entry %p %p", domain, entry)); 6363024e8afSRuslan Bukin 6374670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 63887cd087aSDoug Moore if (entry != domain->first_place && 63987cd087aSDoug Moore entry != domain->last_place) 6403024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6413024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; 6424670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6433024e8afSRuslan Bukin } 6443024e8afSRuslan Bukin 645c9e4d250SKonstantin Belousov static struct iommu_map_entry * 646c9e4d250SKonstantin Belousov iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start, 647c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry **r) 648c9e4d250SKonstantin Belousov { 649c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *res, fentry; 650c9e4d250SKonstantin Belousov 651c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 652c9e4d250SKonstantin Belousov MPASS(start <= end); 65387cd087aSDoug Moore MPASS(end <= domain->end); 654c9e4d250SKonstantin Belousov 655c9e4d250SKonstantin Belousov /* 656c9e4d250SKonstantin Belousov * Find an entry which contains the supplied guest's address 657c9e4d250SKonstantin Belousov * start, or the first entry after the start. Since we 658c9e4d250SKonstantin Belousov * asserted that start is below domain end, entry should 659c9e4d250SKonstantin Belousov * exist. Then clip it if needed. 660c9e4d250SKonstantin Belousov */ 661cb1d664bSKonstantin Belousov bzero(&fentry, sizeof(fentry)); 662c9e4d250SKonstantin Belousov fentry.start = start + 1; 663c9e4d250SKonstantin Belousov fentry.end = start + 1; 664a59c2529SKonstantin Belousov fentry.flags = IOMMU_MAP_ENTRY_FAKE; 665c9e4d250SKonstantin Belousov entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry); 666c9e4d250SKonstantin Belousov 667c9e4d250SKonstantin Belousov if (entry->start >= start || 668c9e4d250SKonstantin Belousov (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 669c9e4d250SKonstantin Belousov return (entry); 670c9e4d250SKonstantin Belousov 671c9e4d250SKonstantin Belousov res = *r; 672c9e4d250SKonstantin Belousov *r = NULL; 673c9e4d250SKonstantin Belousov *res = *entry; 674c9e4d250SKonstantin Belousov res->start = entry->end = start; 675c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 676368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 677368ee2f8SDoug Moore &domain->rb_root, entry, res); 678c9e4d250SKonstantin Belousov return (res); 679c9e4d250SKonstantin Belousov } 680c9e4d250SKonstantin Belousov 681c9e4d250SKonstantin Belousov static bool 682c9e4d250SKonstantin Belousov iommu_gas_remove_clip_right(struct iommu_domain *domain, 683c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry *entry, 684c9e4d250SKonstantin Belousov struct iommu_map_entry *r) 685c9e4d250SKonstantin Belousov { 686c9e4d250SKonstantin Belousov if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 687c9e4d250SKonstantin Belousov return (false); 688c9e4d250SKonstantin Belousov 689c9e4d250SKonstantin Belousov *r = *entry; 690c9e4d250SKonstantin Belousov r->end = entry->start = end; 691c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 692368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 693368ee2f8SDoug Moore &domain->rb_root, entry, r); 694c9e4d250SKonstantin Belousov return (true); 695c9e4d250SKonstantin Belousov } 696c9e4d250SKonstantin Belousov 697c9e4d250SKonstantin Belousov static void 698c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(struct iommu_domain *domain, 699c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp) 700c9e4d250SKonstantin Belousov { 701c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 702c9e4d250SKonstantin Belousov 703c9e4d250SKonstantin Belousov if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED | 704c0047e7cSKonstantin Belousov IOMMU_MAP_ENTRY_RMRR | 705c9e4d250SKonstantin Belousov IOMMU_MAP_ENTRY_REMOVING)) != 0) 706c9e4d250SKonstantin Belousov return; 707c9e4d250SKonstantin Belousov MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0); 708c9e4d250SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_REMOVING; 709c9e4d250SKonstantin Belousov TAILQ_INSERT_TAIL(gcp, entry, dmamap_link); 710c9e4d250SKonstantin Belousov } 711c9e4d250SKonstantin Belousov 712273b4de3SKonstantin Belousov static void 713273b4de3SKonstantin Belousov iommu_gas_remove_locked(struct iommu_domain *domain, 714273b4de3SKonstantin Belousov iommu_gaddr_t start, iommu_gaddr_t size, 715273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, 716273b4de3SKonstantin Belousov struct iommu_map_entry **r1, struct iommu_map_entry **r2) 717c9e4d250SKonstantin Belousov { 718273b4de3SKonstantin Belousov struct iommu_map_entry *entry, *nentry; 719c9e4d250SKonstantin Belousov iommu_gaddr_t end; 720c9e4d250SKonstantin Belousov 721273b4de3SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 722273b4de3SKonstantin Belousov 723c9e4d250SKonstantin Belousov end = start + size; 724c9e4d250SKonstantin Belousov 725273b4de3SKonstantin Belousov nentry = iommu_gas_remove_clip_left(domain, start, end, r1); 726c9e4d250SKonstantin Belousov RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) { 727c9e4d250SKonstantin Belousov if (entry->start >= end) 728c9e4d250SKonstantin Belousov break; 729c9e4d250SKonstantin Belousov KASSERT(start <= entry->start, 730c9e4d250SKonstantin Belousov ("iommu_gas_remove entry (%#jx, %#jx) start %#jx", 731c9e4d250SKonstantin Belousov entry->start, entry->end, start)); 732273b4de3SKonstantin Belousov iommu_gas_remove_unmap(domain, entry, gc); 733c9e4d250SKonstantin Belousov } 734273b4de3SKonstantin Belousov if (iommu_gas_remove_clip_right(domain, end, entry, *r2)) { 735273b4de3SKonstantin Belousov iommu_gas_remove_unmap(domain, *r2, gc); 736273b4de3SKonstantin Belousov *r2 = NULL; 737c9e4d250SKonstantin Belousov } 738c9e4d250SKonstantin Belousov 739c9e4d250SKonstantin Belousov #ifdef INVARIANTS 740c9e4d250SKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 741f5912877SKonstantin Belousov if ((entry->flags & (IOMMU_MAP_ENTRY_RMRR | 742f5912877SKonstantin Belousov IOMMU_MAP_ENTRY_PLACE)) != 0) 743c9e4d250SKonstantin Belousov continue; 744c9e4d250SKonstantin Belousov KASSERT(entry->end <= start || entry->start >= end, 745c9e4d250SKonstantin Belousov ("iommu_gas_remove leftover entry (%#jx, %#jx) range " 746c9e4d250SKonstantin Belousov "(%#jx, %#jx)", 747c9e4d250SKonstantin Belousov entry->start, entry->end, start, end)); 748c9e4d250SKonstantin Belousov } 749c9e4d250SKonstantin Belousov #endif 750273b4de3SKonstantin Belousov } 751c9e4d250SKonstantin Belousov 752273b4de3SKonstantin Belousov static void 753273b4de3SKonstantin Belousov iommu_gas_remove_init(struct iommu_domain *domain, 754273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, struct iommu_map_entry **r1, 755273b4de3SKonstantin Belousov struct iommu_map_entry **r2) 756273b4de3SKonstantin Belousov { 757273b4de3SKonstantin Belousov TAILQ_INIT(gc); 758273b4de3SKonstantin Belousov *r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 759273b4de3SKonstantin Belousov *r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 760273b4de3SKonstantin Belousov } 761273b4de3SKonstantin Belousov 762273b4de3SKonstantin Belousov static void 763273b4de3SKonstantin Belousov iommu_gas_remove_cleanup(struct iommu_domain *domain, 764273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, struct iommu_map_entry **r1, 765273b4de3SKonstantin Belousov struct iommu_map_entry **r2) 766273b4de3SKonstantin Belousov { 767273b4de3SKonstantin Belousov if (*r1 != NULL) { 768273b4de3SKonstantin Belousov iommu_gas_free_entry(*r1); 769273b4de3SKonstantin Belousov *r1 = NULL; 770273b4de3SKonstantin Belousov } 771273b4de3SKonstantin Belousov if (*r2 != NULL) { 772273b4de3SKonstantin Belousov iommu_gas_free_entry(*r2); 773273b4de3SKonstantin Belousov *r2 = NULL; 774273b4de3SKonstantin Belousov } 775273b4de3SKonstantin Belousov iommu_domain_unload(domain, gc, true); 776273b4de3SKonstantin Belousov } 777273b4de3SKonstantin Belousov 778273b4de3SKonstantin Belousov /* 779273b4de3SKonstantin Belousov * Remove specified range from the GAS of the domain. Note that the 780273b4de3SKonstantin Belousov * removal is not guaranteed to occur upon the function return, it 781273b4de3SKonstantin Belousov * might be finalized some time after, when hardware reports that 782273b4de3SKonstantin Belousov * (queued) IOTLB invalidation was performed. 783273b4de3SKonstantin Belousov */ 784273b4de3SKonstantin Belousov void 785273b4de3SKonstantin Belousov iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 786273b4de3SKonstantin Belousov iommu_gaddr_t size) 787273b4de3SKonstantin Belousov { 788273b4de3SKonstantin Belousov struct iommu_map_entry *r1, *r2; 789273b4de3SKonstantin Belousov struct iommu_map_entries_tailq gc; 790273b4de3SKonstantin Belousov 791273b4de3SKonstantin Belousov iommu_gas_remove_init(domain, &gc, &r1, &r2); 792273b4de3SKonstantin Belousov IOMMU_DOMAIN_LOCK(domain); 793273b4de3SKonstantin Belousov iommu_gas_remove_locked(domain, start, size, &gc, &r1, &r2); 794c9e4d250SKonstantin Belousov IOMMU_DOMAIN_UNLOCK(domain); 795273b4de3SKonstantin Belousov iommu_gas_remove_cleanup(domain, &gc, &r1, &r2); 796c9e4d250SKonstantin Belousov } 797c9e4d250SKonstantin Belousov 7983024e8afSRuslan Bukin int 7993024e8afSRuslan Bukin iommu_gas_map(struct iommu_domain *domain, 8003024e8afSRuslan Bukin const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 8013024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 8023024e8afSRuslan Bukin { 803e0e8d0c8SDoug Moore struct iommu_gas_match_args a; 8043024e8afSRuslan Bukin struct iommu_map_entry *entry; 8053024e8afSRuslan Bukin int error; 8063024e8afSRuslan Bukin 8073024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, 8083024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8093024e8afSRuslan Bukin 810e0e8d0c8SDoug Moore a.size = size; 811e0e8d0c8SDoug Moore a.offset = offset; 812e0e8d0c8SDoug Moore a.common = common; 813e0e8d0c8SDoug Moore a.gas_flags = flags; 8143024e8afSRuslan Bukin entry = iommu_gas_alloc_entry(domain, 81515f6baf4SRuslan Bukin (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); 8163024e8afSRuslan Bukin if (entry == NULL) 8173024e8afSRuslan Bukin return (ENOMEM); 818e0e8d0c8SDoug Moore a.entry = entry; 8193024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8208b221ca6SDoug Moore error = iommu_gas_find_space(domain, &a); 8213024e8afSRuslan Bukin if (error == ENOMEM) { 8223024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8234670f908SAlan Cox iommu_gas_free_entry(entry); 8243024e8afSRuslan Bukin return (error); 8253024e8afSRuslan Bukin } 8263024e8afSRuslan Bukin #ifdef INVARIANTS 8273024e8afSRuslan Bukin if (iommu_check_free) 8283024e8afSRuslan Bukin iommu_gas_check_free(domain); 8293024e8afSRuslan Bukin #endif 8303024e8afSRuslan Bukin KASSERT(error == 0, 8313024e8afSRuslan Bukin ("unexpected error %d from iommu_gas_find_entry", error)); 8323024e8afSRuslan Bukin KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", 8333024e8afSRuslan Bukin (uintmax_t)entry->end, (uintmax_t)domain->end)); 8343024e8afSRuslan Bukin entry->flags |= eflags; 8353024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8363024e8afSRuslan Bukin 837bdd5eb33SKonstantin Belousov error = domain->ops->map(domain, entry, ma, eflags, 83815f6baf4SRuslan Bukin ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8393024e8afSRuslan Bukin if (error == ENOMEM) { 8408bc36738SAlan Cox iommu_domain_unload_entry(entry, true, 8418bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8423024e8afSRuslan Bukin return (error); 8433024e8afSRuslan Bukin } 8443024e8afSRuslan Bukin KASSERT(error == 0, 8453024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8463024e8afSRuslan Bukin 8473024e8afSRuslan Bukin *res = entry; 8483024e8afSRuslan Bukin return (0); 8493024e8afSRuslan Bukin } 8503024e8afSRuslan Bukin 8513024e8afSRuslan Bukin int 8523024e8afSRuslan Bukin iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 8533024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 8543024e8afSRuslan Bukin { 8553024e8afSRuslan Bukin iommu_gaddr_t start; 8563024e8afSRuslan Bukin int error; 8573024e8afSRuslan Bukin 8584670f908SAlan Cox KASSERT(entry->domain == domain, 8594670f908SAlan Cox ("mismatched domain %p entry %p entry->domain %p", domain, 8604670f908SAlan Cox entry, entry->domain)); 8613024e8afSRuslan Bukin KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, 8623024e8afSRuslan Bukin entry, entry->flags)); 8633024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, 8643024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8653024e8afSRuslan Bukin 8663024e8afSRuslan Bukin start = entry->start; 8673024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8683024e8afSRuslan Bukin error = iommu_gas_alloc_region(domain, entry, flags); 8693024e8afSRuslan Bukin if (error != 0) { 8703024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8713024e8afSRuslan Bukin return (error); 8723024e8afSRuslan Bukin } 8733024e8afSRuslan Bukin entry->flags |= eflags; 8743024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8753024e8afSRuslan Bukin if (entry->end == entry->start) 8763024e8afSRuslan Bukin return (0); 8773024e8afSRuslan Bukin 878bdd5eb33SKonstantin Belousov error = domain->ops->map(domain, entry, 879bdd5eb33SKonstantin Belousov ma + OFF_TO_IDX(start - entry->start), eflags, 880bdd5eb33SKonstantin Belousov ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8813024e8afSRuslan Bukin if (error == ENOMEM) { 8828bc36738SAlan Cox iommu_domain_unload_entry(entry, false, 8838bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8843024e8afSRuslan Bukin return (error); 8853024e8afSRuslan Bukin } 8863024e8afSRuslan Bukin KASSERT(error == 0, 8873024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8883024e8afSRuslan Bukin 8893024e8afSRuslan Bukin return (0); 8903024e8afSRuslan Bukin } 8913024e8afSRuslan Bukin 892ee47a12aSRyan Libby static int 893ee47a12aSRyan Libby iommu_gas_reserve_region_locked(struct iommu_domain *domain, 894ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry) 895ee47a12aSRyan Libby { 896ee47a12aSRyan Libby int error; 897ee47a12aSRyan Libby 898ee47a12aSRyan Libby IOMMU_DOMAIN_ASSERT_LOCKED(domain); 899ee47a12aSRyan Libby 900ee47a12aSRyan Libby entry->start = start; 901ee47a12aSRyan Libby entry->end = end; 902ee47a12aSRyan Libby error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); 903ee47a12aSRyan Libby if (error == 0) 904ee47a12aSRyan Libby entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; 905ee47a12aSRyan Libby return (error); 906ee47a12aSRyan Libby } 907ee47a12aSRyan Libby 9083024e8afSRuslan Bukin int 9093024e8afSRuslan Bukin iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 91094dfb28eSRuslan Bukin iommu_gaddr_t end, struct iommu_map_entry **entry0) 9113024e8afSRuslan Bukin { 9123024e8afSRuslan Bukin struct iommu_map_entry *entry; 9133024e8afSRuslan Bukin int error; 9143024e8afSRuslan Bukin 91515f6baf4SRuslan Bukin entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 9163024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 917ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, start, end, entry); 9183024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 9193024e8afSRuslan Bukin if (error != 0) 9204670f908SAlan Cox iommu_gas_free_entry(entry); 92194dfb28eSRuslan Bukin else if (entry0 != NULL) 92294dfb28eSRuslan Bukin *entry0 = entry; 9233024e8afSRuslan Bukin return (error); 9243024e8afSRuslan Bukin } 9253024e8afSRuslan Bukin 926ee47a12aSRyan Libby /* 927ee47a12aSRyan Libby * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing 928ee47a12aSRyan Libby * entries. 929ee47a12aSRyan Libby */ 930ee47a12aSRyan Libby int 931ee47a12aSRyan Libby iommu_gas_reserve_region_extend(struct iommu_domain *domain, 932ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end) 933ee47a12aSRyan Libby { 934ee47a12aSRyan Libby struct iommu_map_entry *entry, *next, *prev, key = {}; 935ee47a12aSRyan Libby iommu_gaddr_t entry_start, entry_end; 936ee47a12aSRyan Libby int error; 937ee47a12aSRyan Libby 938ee47a12aSRyan Libby error = 0; 939ee47a12aSRyan Libby entry = NULL; 940ee47a12aSRyan Libby end = ummin(end, domain->end); 941ee47a12aSRyan Libby while (start < end) { 942ee47a12aSRyan Libby /* Preallocate an entry. */ 943ee47a12aSRyan Libby if (entry == NULL) 944ee47a12aSRyan Libby entry = iommu_gas_alloc_entry(domain, 945ee47a12aSRyan Libby IOMMU_PGF_WAITOK); 946ee47a12aSRyan Libby /* Calculate the free region from here to the next entry. */ 947ee47a12aSRyan Libby key.start = key.end = start; 948ee47a12aSRyan Libby IOMMU_DOMAIN_LOCK(domain); 949ee47a12aSRyan Libby next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); 950ee47a12aSRyan Libby KASSERT(next != NULL, ("domain %p with end %#jx has no entry " 951ee47a12aSRyan Libby "after %#jx", domain, (uintmax_t)domain->end, 952ee47a12aSRyan Libby (uintmax_t)start)); 953ee47a12aSRyan Libby entry_end = ummin(end, next->start); 954ee47a12aSRyan Libby prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 955ee47a12aSRyan Libby if (prev != NULL) 956ee47a12aSRyan Libby entry_start = ummax(start, prev->end); 957ee47a12aSRyan Libby else 958ee47a12aSRyan Libby entry_start = start; 959ee47a12aSRyan Libby start = next->end; 960ee47a12aSRyan Libby /* Reserve the region if non-empty. */ 961ee47a12aSRyan Libby if (entry_start != entry_end) { 962ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, 963ee47a12aSRyan Libby entry_start, entry_end, entry); 9640ba1d860SAlan Cox if (error != 0) { 9650ba1d860SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 966ee47a12aSRyan Libby break; 9670ba1d860SAlan Cox } 968ee47a12aSRyan Libby entry = NULL; 969ee47a12aSRyan Libby } 970ee47a12aSRyan Libby IOMMU_DOMAIN_UNLOCK(domain); 971ee47a12aSRyan Libby } 972ee47a12aSRyan Libby /* Release a preallocated entry if it was not used. */ 973ee47a12aSRyan Libby if (entry != NULL) 9744670f908SAlan Cox iommu_gas_free_entry(entry); 975ee47a12aSRyan Libby return (error); 976ee47a12aSRyan Libby } 977ee47a12aSRyan Libby 978f32f0095SRuslan Bukin void 979f32f0095SRuslan Bukin iommu_unmap_msi(struct iommu_ctx *ctx) 980f32f0095SRuslan Bukin { 981f32f0095SRuslan Bukin struct iommu_map_entry *entry; 982f32f0095SRuslan Bukin struct iommu_domain *domain; 983f32f0095SRuslan Bukin 984f32f0095SRuslan Bukin domain = ctx->domain; 985f32f0095SRuslan Bukin entry = domain->msi_entry; 986f32f0095SRuslan Bukin if (entry == NULL) 987f32f0095SRuslan Bukin return; 988f32f0095SRuslan Bukin 989bdd5eb33SKonstantin Belousov domain->ops->unmap(domain, entry, IOMMU_PGF_WAITOK); 990f32f0095SRuslan Bukin 9914670f908SAlan Cox iommu_gas_free_space(entry); 992f32f0095SRuslan Bukin 9934670f908SAlan Cox iommu_gas_free_entry(entry); 994f32f0095SRuslan Bukin 995f32f0095SRuslan Bukin domain->msi_entry = NULL; 996f32f0095SRuslan Bukin domain->msi_base = 0; 997f32f0095SRuslan Bukin domain->msi_phys = 0; 998f32f0095SRuslan Bukin } 999f32f0095SRuslan Bukin 10003024e8afSRuslan Bukin int 1001e707c8beSRuslan Bukin iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset, 1002e707c8beSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 1003e707c8beSRuslan Bukin { 1004e707c8beSRuslan Bukin struct iommu_domain *domain; 1005e707c8beSRuslan Bukin struct iommu_map_entry *entry; 1006e707c8beSRuslan Bukin int error; 1007e707c8beSRuslan Bukin 1008e707c8beSRuslan Bukin error = 0; 1009e707c8beSRuslan Bukin domain = ctx->domain; 1010e707c8beSRuslan Bukin 1011e707c8beSRuslan Bukin /* Check if there is already an MSI page allocated */ 1012e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 1013e707c8beSRuslan Bukin entry = domain->msi_entry; 1014e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1015e707c8beSRuslan Bukin 1016e707c8beSRuslan Bukin if (entry == NULL) { 1017e707c8beSRuslan Bukin error = iommu_gas_map(domain, &ctx->tag->common, size, offset, 1018e707c8beSRuslan Bukin eflags, flags, ma, &entry); 1019e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 1020e707c8beSRuslan Bukin if (error == 0) { 1021e707c8beSRuslan Bukin if (domain->msi_entry == NULL) { 1022e707c8beSRuslan Bukin MPASS(domain->msi_base == 0); 1023e707c8beSRuslan Bukin MPASS(domain->msi_phys == 0); 1024e707c8beSRuslan Bukin 1025e707c8beSRuslan Bukin domain->msi_entry = entry; 1026e707c8beSRuslan Bukin domain->msi_base = entry->start; 1027e707c8beSRuslan Bukin domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]); 1028e707c8beSRuslan Bukin } else { 1029e707c8beSRuslan Bukin /* 1030e707c8beSRuslan Bukin * We lost the race and already have an 1031e707c8beSRuslan Bukin * MSI page allocated. Free the unneeded entry. 1032e707c8beSRuslan Bukin */ 10334670f908SAlan Cox iommu_gas_free_entry(entry); 1034e707c8beSRuslan Bukin } 1035e707c8beSRuslan Bukin } else if (domain->msi_entry != NULL) { 1036e707c8beSRuslan Bukin /* 1037e707c8beSRuslan Bukin * The allocation failed, but another succeeded. 1038e707c8beSRuslan Bukin * Return success as there is a valid MSI page. 1039e707c8beSRuslan Bukin */ 1040e707c8beSRuslan Bukin error = 0; 1041e707c8beSRuslan Bukin } 1042e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1043e707c8beSRuslan Bukin } 1044e707c8beSRuslan Bukin 1045e707c8beSRuslan Bukin return (error); 1046e707c8beSRuslan Bukin } 1047e707c8beSRuslan Bukin 1048e707c8beSRuslan Bukin void 1049e707c8beSRuslan Bukin iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr) 1050e707c8beSRuslan Bukin { 1051e707c8beSRuslan Bukin 1052e707c8beSRuslan Bukin *addr = (*addr - domain->msi_phys) + domain->msi_base; 1053e707c8beSRuslan Bukin 1054e707c8beSRuslan Bukin KASSERT(*addr >= domain->msi_entry->start, 1055e707c8beSRuslan Bukin ("%s: Address is below the MSI entry start address (%jx < %jx)", 1056e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start)); 1057e707c8beSRuslan Bukin 1058e707c8beSRuslan Bukin KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end, 1059e707c8beSRuslan Bukin ("%s: Address is above the MSI entry end address (%jx < %jx)", 1060e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end)); 1061e707c8beSRuslan Bukin } 1062e707c8beSRuslan Bukin 1063357149f0SRuslan Bukin SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); 1064357149f0SRuslan Bukin 10659c843a40SRuslan Bukin #ifdef INVARIANTS 10669c843a40SRuslan Bukin SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, 10679c843a40SRuslan Bukin &iommu_check_free, 0, 10689c843a40SRuslan Bukin "Check the GPA RBtree for free_down and free_after validity"); 10699c843a40SRuslan Bukin #endif 107030ce85caSKonstantin Belousov 107130ce85caSKonstantin Belousov #include "opt_ddb.h" 107230ce85caSKonstantin Belousov #ifdef DDB 107330ce85caSKonstantin Belousov 107430ce85caSKonstantin Belousov #include <ddb/ddb.h> 107530ce85caSKonstantin Belousov 107630ce85caSKonstantin Belousov static void 107730ce85caSKonstantin Belousov iommu_debug_dump_gas(struct iommu_domain *domain) 107830ce85caSKonstantin Belousov { 107930ce85caSKonstantin Belousov struct iommu_map_entry *entry; 108030ce85caSKonstantin Belousov 108130ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p iommu %p fl %#x\n", domain, 108230ce85caSKonstantin Belousov &domain->rb_root, domain->iommu, domain->flags); 108330ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p\n", domain, &domain->rb_root); 108430ce85caSKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 108530ce85caSKonstantin Belousov db_printf( 108630ce85caSKonstantin Belousov " e %p [%#jx %#jx] fl %#x first %#jx last %#jx free_down %#jx", 108730ce85caSKonstantin Belousov entry, (uintmax_t)entry->start, (uintmax_t)entry->end, 108830ce85caSKonstantin Belousov entry->flags, 108930ce85caSKonstantin Belousov (uintmax_t)entry->first, (uintmax_t)entry->last, 109030ce85caSKonstantin Belousov (uintmax_t)entry->free_down); 109130ce85caSKonstantin Belousov if (entry == domain->start_gap) 109230ce85caSKonstantin Belousov db_printf(" start_gap"); 109330ce85caSKonstantin Belousov if (entry == domain->first_place) 109430ce85caSKonstantin Belousov db_printf(" first_place"); 109530ce85caSKonstantin Belousov if (entry == domain->last_place) 109630ce85caSKonstantin Belousov db_printf(" last_place"); 109730ce85caSKonstantin Belousov db_printf("\n"); 109830ce85caSKonstantin Belousov } 109930ce85caSKonstantin Belousov } 110030ce85caSKonstantin Belousov 110130ce85caSKonstantin Belousov DB_SHOW_COMMAND(iommu_domain, iommu_domain_show) 110230ce85caSKonstantin Belousov { 110330ce85caSKonstantin Belousov struct iommu_domain *domain; 110430ce85caSKonstantin Belousov 110530ce85caSKonstantin Belousov if (!have_addr) { 110630ce85caSKonstantin Belousov db_printf("show iommu_domain addr\n"); 110730ce85caSKonstantin Belousov return; 110830ce85caSKonstantin Belousov } 110930ce85caSKonstantin Belousov 111030ce85caSKonstantin Belousov domain = (void *)addr; 111130ce85caSKonstantin Belousov iommu_debug_dump_gas(domain); 111230ce85caSKonstantin Belousov } 111330ce85caSKonstantin Belousov 111430ce85caSKonstantin Belousov #endif 1115