1 /* $NetBSD: iommu.c,v 1.108 2014/08/24 19:09:43 palle Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Matthew R. Green 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Copyright (c) 2001, 2002 Eduardo Horvath 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The name of the author may not be used to endorse or promote products 42 * derived from this software without specific prior written permission. 43 * 44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * SUCH DAMAGE. 55 */ 56 57 /* 58 * UltraSPARC IOMMU support; used by both the sbus and pci code. 59 */ 60 61 #include <sys/cdefs.h> 62 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.108 2014/08/24 19:09:43 palle Exp $"); 63 64 #include "opt_ddb.h" 65 66 #include <sys/param.h> 67 #include <sys/extent.h> 68 #include <sys/malloc.h> 69 #include <sys/systm.h> 70 #include <sys/device.h> 71 #include <sys/proc.h> 72 73 #include <uvm/uvm.h> 74 75 #include <sys/bus.h> 76 #include <sparc64/dev/iommureg.h> 77 #include <sparc64/dev/iommuvar.h> 78 79 #include <machine/autoconf.h> 80 #include <machine/cpu.h> 81 82 #ifdef DEBUG 83 #define IDB_BUSDMA 0x1 84 #define IDB_IOMMU 0x2 85 #define IDB_INFO 0x4 86 #define IDB_SYNC 0x8 87 int iommudebug = 0x0; 88 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0) 89 #define IOTTE_DEBUG(n) (n) 90 #else 91 #define DPRINTF(l, s) 92 #define IOTTE_DEBUG(n) 0 93 #endif 94 95 #define iommu_strbuf_flush(i, v) do { \ 96 if ((i)->sb_flush) \ 97 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \ 98 STRBUFREG(strbuf_pgflush), (v)); \ 99 } while (0) 100 101 static int iommu_strbuf_flush_done(struct strbuf_ctl *); 102 static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 103 bus_size_t, int); 104 105 /* 106 * initialise the UltraSPARC IOMMU (SBUS or PCI): 107 * - allocate and setup the iotsb. 108 * - enable the IOMMU 109 * - initialise the streaming buffers (if they exist) 110 * - create a private DVMA map. 111 */ 112 void 113 iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase) 114 { 115 psize_t size; 116 vaddr_t va; 117 paddr_t pa; 118 struct vm_page *pg; 119 struct pglist pglist; 120 121 /* 122 * Setup the iommu. 123 * 124 * The sun4u iommu is part of the SBUS or PCI controller so we will 125 * deal with it here.. 126 * 127 * For sysio and psycho/psycho+ the IOMMU address space always ends at 128 * 0xffffe000, but the starting address depends on the size of the 129 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each 130 * entry is 8 bytes. The start of the map can be calculated by 131 * (0xffffe000 << (8 + is->is_tsbsize)). 132 * 133 * But sabre and hummingbird use a different scheme that seems to 134 * be hard-wired, so we read the start and size from the PROM and 135 * just use those values. 136 */ 137 if (strncmp(name, "pyro", 4) == 0) { 138 is->is_cr = IOMMUREG_READ(is, iommu_cr); 139 is->is_cr &= ~IOMMUCR_FIRE_BE; 140 is->is_cr |= (IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN | 141 IOMMUCR_FIRE_TE); 142 } else 143 is->is_cr = IOMMUCR_EN; 144 is->is_tsbsize = tsbsize; 145 if (iovabase == -1) { 146 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); 147 is->is_dvmaend = IOTSB_VEND - 1; 148 } else { 149 is->is_dvmabase = iovabase; 150 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize) - 1; 151 } 152 153 /* 154 * Allocate memory for I/O pagetables. They need to be physically 155 * contiguous. 156 */ 157 158 size = PAGE_SIZE << is->is_tsbsize; 159 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 160 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 161 panic("iommu_init: no memory"); 162 163 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 164 if (va == 0) 165 panic("iommu_init: no memory"); 166 is->is_tsb = (int64_t *)va; 167 168 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 169 170 /* Map the pages */ 171 TAILQ_FOREACH(pg, &pglist, pageq.queue) { 172 pa = VM_PAGE_TO_PHYS(pg); 173 pmap_kenter_pa(va, pa | PMAP_NVC, 174 VM_PROT_READ | VM_PROT_WRITE, 0); 175 va += PAGE_SIZE; 176 } 177 pmap_update(pmap_kernel()); 178 memset(is->is_tsb, 0, size); 179 180 #ifdef DEBUG 181 if (iommudebug & IDB_INFO) 182 { 183 /* Probe the iommu */ 184 185 printf("iommu cr=%llx tsb=%llx\n", 186 (unsigned long long)bus_space_read_8(is->is_bustag, 187 is->is_iommu, 188 offsetof(struct iommureg, iommu_cr)), 189 (unsigned long long)bus_space_read_8(is->is_bustag, 190 is->is_iommu, 191 offsetof(struct iommureg, iommu_tsb))); 192 printf("TSB base %p phys %llx\n", (void *)is->is_tsb, 193 (unsigned long long)is->is_ptsb); 194 delay(1000000); /* 1 s */ 195 } 196 #endif 197 198 /* 199 * Now all the hardware's working we need to allocate a dvma map. 200 */ 201 aprint_debug("DVMA map: %x to %x\n", 202 (unsigned int)is->is_dvmabase, 203 (unsigned int)is->is_dvmaend); 204 aprint_debug("IOTSB: %llx to %llx\n", 205 (unsigned long long)is->is_ptsb, 206 (unsigned long long)(is->is_ptsb + size - 1)); 207 is->is_dvmamap = extent_create(name, 208 is->is_dvmabase, is->is_dvmaend, 209 0, 0, EX_NOWAIT); 210 /* XXXMRG Check is_dvmamap is valid. */ 211 212 mutex_init(&is->is_lock, MUTEX_DEFAULT, IPL_HIGH); 213 214 /* 215 * Set the TSB size. The relevant bits were moved to the TSB 216 * base register in the PCIe host bridges. 217 */ 218 if (is->is_flags & IOMMU_TSBSIZE_IN_PTSB) 219 is->is_ptsb |= is->is_tsbsize; 220 else 221 is->is_cr |= (is->is_tsbsize << 16); 222 223 /* 224 * now actually start up the IOMMU 225 */ 226 iommu_reset(is); 227 } 228 229 /* 230 * Streaming buffers don't exist on the UltraSPARC IIi; we should have 231 * detected that already and disabled them. If not, we will notice that 232 * they aren't there when the STRBUF_EN bit does not remain. 233 */ 234 void 235 iommu_reset(struct iommu_state *is) 236 { 237 int i; 238 struct strbuf_ctl *sb; 239 240 IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb); 241 242 /* Enable IOMMU in diagnostic mode */ 243 IOMMUREG_WRITE(is, iommu_cr, is->is_cr|IOMMUCR_DE); 244 245 for (i = 0; i < 2; i++) { 246 if ((sb = is->is_sb[i])) { 247 248 /* Enable diagnostics mode? */ 249 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb, 250 STRBUFREG(strbuf_ctl), STRBUF_EN); 251 252 membar_Lookaside(); 253 254 /* No streaming buffers? Disable them */ 255 if (bus_space_read_8(is->is_bustag, 256 is->is_sb[i]->sb_sb, 257 STRBUFREG(strbuf_ctl)) == 0) { 258 is->is_sb[i]->sb_flush = NULL; 259 } else { 260 261 /* 262 * locate the pa of the flush buffer. 263 */ 264 if (pmap_extract(pmap_kernel(), 265 (vaddr_t)is->is_sb[i]->sb_flush, 266 &is->is_sb[i]->sb_flushpa) == FALSE) 267 is->is_sb[i]->sb_flush = NULL; 268 } 269 } 270 } 271 272 if (is->is_flags & IOMMU_FLUSH_CACHE) 273 IOMMUREG_WRITE(is, iommu_cache_invalidate, -1ULL); 274 } 275 276 /* 277 * Here are the iommu control routines. 278 */ 279 void 280 iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 281 { 282 struct iommu_state *is = sb->sb_is; 283 int strbuf = (flags & BUS_DMA_STREAMING); 284 int64_t tte; 285 286 #ifdef DIAGNOSTIC 287 if (va < is->is_dvmabase || va > is->is_dvmaend) 288 panic("iommu_enter: va %#lx not in DVMA space", va); 289 #endif 290 291 /* Is the streamcache flush really needed? */ 292 if (sb->sb_flush) 293 iommu_strbuf_flush(sb, va); 294 else 295 /* If we can't flush the strbuf don't enable it. */ 296 strbuf = 0; 297 298 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE), 299 !(flags & BUS_DMA_NOCACHE), (strbuf)); 300 #ifdef DEBUG 301 tte |= (flags & 0xff000LL)<<(4*8); 302 #endif 303 304 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte; 305 bus_space_write_8(is->is_bustag, is->is_iommu, 306 IOMMUREG(iommu_flush), va); 307 DPRINTF(IDB_IOMMU, ("iommu_enter: slot %d va %lx pa %lx " 308 "TSB[%lx]@%p=%lx\n", (int)IOTSBSLOT(va,is->is_tsbsize), 309 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize), 310 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 311 (u_long)tte)); 312 } 313 314 /* 315 * Find the value of a DVMA address (debug routine). 316 */ 317 paddr_t 318 iommu_extract(struct iommu_state *is, vaddr_t dva) 319 { 320 int64_t tte = 0; 321 322 if (dva >= is->is_dvmabase && dva <= is->is_dvmaend) 323 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)]; 324 325 if ((tte & IOTTE_V) == 0) 326 return ((paddr_t)-1L); 327 return (tte & IOTTE_PAMASK); 328 } 329 330 /* 331 * iommu_remove: removes mappings created by iommu_enter 332 * 333 * Only demap from IOMMU if flag is set. 334 * 335 * XXX: this function needs better internal error checking. 336 */ 337 void 338 iommu_remove(struct iommu_state *is, vaddr_t va, size_t len) 339 { 340 int slot; 341 342 #ifdef DIAGNOSTIC 343 if (va < is->is_dvmabase || va > is->is_dvmaend) 344 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 345 if ((long)(va + len) < (long)va) 346 panic("iommu_remove: va 0x%lx + len 0x%lx wraps", 347 (long) va, (long) len); 348 if (len & ~0xfffffff) 349 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len); 350 #endif 351 352 va = trunc_page(va); 353 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n", 354 va, (u_long)IOTSBSLOT(va, is->is_tsbsize), 355 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)])); 356 while (len > 0) { 357 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d " 358 "for va %p size %lx\n", 359 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, 360 (u_long)len)); 361 if (len <= PAGE_SIZE) 362 len = 0; 363 else 364 len -= PAGE_SIZE; 365 366 #if 0 367 /* 368 * XXX Zero-ing the entry would not require RMW 369 * 370 * Disabling valid bit while a page is used by a device 371 * causes an uncorrectable DMA error. 372 * Workaround to avoid an uncorrectable DMA error is 373 * eliminating the next line, but the page is mapped 374 * until the next iommu_enter call. 375 */ 376 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V; 377 membar_StoreStore(); 378 #endif 379 IOMMUREG_WRITE(is, iommu_flush, va); 380 381 /* Flush cache if necessary. */ 382 slot = IOTSBSLOT(trunc_page(va), is->is_tsbsize); 383 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 384 (len == 0 || (slot % 8) == 7)) 385 IOMMUREG_WRITE(is, iommu_cache_flush, 386 is->is_ptsb + slot * 8); 387 388 va += PAGE_SIZE; 389 } 390 } 391 392 static int 393 iommu_strbuf_flush_done(struct strbuf_ctl *sb) 394 { 395 struct iommu_state *is = sb->sb_is; 396 struct timeval cur, flushtimeout; 397 398 #define BUMPTIME(t, usec) { \ 399 register volatile struct timeval *tp = (t); \ 400 register long us; \ 401 \ 402 tp->tv_usec = us = tp->tv_usec + (usec); \ 403 if (us >= 1000000) { \ 404 tp->tv_usec = us - 1000000; \ 405 tp->tv_sec++; \ 406 } \ 407 } 408 409 if (!sb->sb_flush) 410 return (0); 411 412 /* 413 * Streaming buffer flushes: 414 * 415 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If 416 * we're not on a cache line boundary (64-bits): 417 * 2 Store 0 in flag 418 * 3 Store pointer to flag in flushsync 419 * 4 wait till flushsync becomes 0x1 420 * 421 * If it takes more than .5 sec, something 422 * went wrong. 423 */ 424 425 *sb->sb_flush = 0; 426 bus_space_write_8(is->is_bustag, sb->sb_sb, 427 STRBUFREG(strbuf_flushsync), sb->sb_flushpa); 428 429 microtime(&flushtimeout); 430 cur = flushtimeout; 431 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */ 432 433 DPRINTF(IDB_IOMMU, ("%s: flush = %lx at va = %lx pa = %lx now=" 434 "%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n", __func__, 435 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa, 436 cur.tv_sec, cur.tv_usec, 437 flushtimeout.tv_sec, flushtimeout.tv_usec)); 438 439 /* Bypass non-coherent D$ */ 440 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) && 441 timercmp(&cur, &flushtimeout, <=)) 442 microtime(&cur); 443 444 #ifdef DIAGNOSTIC 445 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) { 446 printf("%s: flush timeout %p, at %p\n", __func__, 447 (void *)(u_long)*sb->sb_flush, 448 (void *)(u_long)sb->sb_flushpa); /* panic? */ 449 #ifdef DDB 450 Debugger(); 451 #endif 452 } 453 #endif 454 DPRINTF(IDB_IOMMU, ("%s: flushed\n", __func__)); 455 return (*sb->sb_flush); 456 } 457 458 /* 459 * IOMMU DVMA operations, common to SBUS and PCI. 460 */ 461 int 462 iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 463 bus_size_t buflen, struct proc *p, int flags) 464 { 465 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 466 struct iommu_state *is = sb->sb_is; 467 int err, needsflush; 468 bus_size_t sgsize; 469 paddr_t curaddr; 470 u_long dvmaddr, sgstart, sgend, bmask; 471 bus_size_t align, boundary, len; 472 vaddr_t vaddr = (vaddr_t)buf; 473 int seg; 474 struct pmap *pmap; 475 int slot; 476 477 if (map->dm_nsegs) { 478 /* Already in use?? */ 479 #ifdef DIAGNOSTIC 480 printf("iommu_dvmamap_load: map still in use\n"); 481 #endif 482 bus_dmamap_unload(t, map); 483 } 484 485 /* 486 * Make sure that on error condition we return "no valid mappings". 487 */ 488 map->dm_nsegs = 0; 489 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 490 491 if (buflen > map->_dm_size) { 492 DPRINTF(IDB_BUSDMA, 493 ("iommu_dvmamap_load(): error %d > %d -- " 494 "map size exceeded!\n", (int)buflen, (int)map->_dm_size)); 495 return (EINVAL); 496 } 497 498 sgsize = round_page(buflen + ((int)vaddr & PGOFSET)); 499 500 /* 501 * A boundary presented to bus_dmamem_alloc() takes precedence 502 * over boundary in the map. 503 */ 504 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0) 505 boundary = map->_dm_boundary; 506 align = max(map->dm_segs[0]._ds_align, PAGE_SIZE); 507 508 /* 509 * If our segment size is larger than the boundary we need to 510 * split the transfer up int little pieces ourselves. 511 */ 512 KASSERT(is->is_dvmamap); 513 mutex_enter(&is->is_lock); 514 err = extent_alloc(is->is_dvmamap, sgsize, align, 515 (sgsize > boundary) ? 0 : boundary, 516 EX_NOWAIT|EX_BOUNDZERO, &dvmaddr); 517 mutex_exit(&is->is_lock); 518 519 #ifdef DEBUG 520 if (err || (dvmaddr == (u_long)-1)) { 521 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n", 522 (int)sgsize, flags); 523 #ifdef DDB 524 Debugger(); 525 #endif 526 } 527 #endif 528 if (err != 0) 529 return (err); 530 531 if (dvmaddr == (u_long)-1) 532 return (ENOMEM); 533 534 /* Set the active DVMA map */ 535 map->_dm_dvmastart = dvmaddr; 536 map->_dm_dvmasize = sgsize; 537 538 /* 539 * Now split the DVMA range into segments, not crossing 540 * the boundary. 541 */ 542 seg = 0; 543 sgstart = dvmaddr + (vaddr & PGOFSET); 544 sgend = sgstart + buflen - 1; 545 map->dm_segs[seg].ds_addr = sgstart; 546 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx " 547 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1), 548 (long)~(boundary - 1))); 549 bmask = ~(boundary - 1); 550 while ((sgstart & bmask) != (sgend & bmask) || 551 sgend - sgstart + 1 > map->dm_maxsegsz) { 552 /* Oops. We crossed a boundary or large seg. Split the xfer. */ 553 len = map->dm_maxsegsz; 554 if ((sgstart & bmask) != (sgend & bmask)) 555 len = min(len, boundary - (sgstart & (boundary - 1))); 556 map->dm_segs[seg].ds_len = len; 557 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 558 "seg %d start %lx size %lx\n", seg, 559 (long)map->dm_segs[seg].ds_addr, 560 (long)map->dm_segs[seg].ds_len)); 561 if (++seg >= map->_dm_segcnt) { 562 /* Too many segments. Fail the operation. */ 563 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 564 "too many segments %d\n", seg)); 565 mutex_enter(&is->is_lock); 566 err = extent_free(is->is_dvmamap, 567 dvmaddr, sgsize, EX_NOWAIT); 568 map->_dm_dvmastart = 0; 569 map->_dm_dvmasize = 0; 570 mutex_exit(&is->is_lock); 571 if (err != 0) 572 printf("warning: %s: %" PRId64 573 " of DVMA space lost\n", __func__, sgsize); 574 return (EFBIG); 575 } 576 sgstart += len; 577 map->dm_segs[seg].ds_addr = sgstart; 578 } 579 map->dm_segs[seg].ds_len = sgend - sgstart + 1; 580 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 581 "seg %d start %lx size %lx\n", seg, 582 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len)); 583 map->dm_nsegs = seg + 1; 584 map->dm_mapsize = buflen; 585 586 if (p != NULL) 587 pmap = p->p_vmspace->vm_map.pmap; 588 else 589 pmap = pmap_kernel(); 590 591 needsflush = 0; 592 for (; buflen > 0; ) { 593 594 /* 595 * Get the physical address for this page. 596 */ 597 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) { 598 #ifdef DIAGNOSTIC 599 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr); 600 #endif 601 bus_dmamap_unload(t, map); 602 return (-1); 603 } 604 605 /* 606 * Compute the segment size, and adjust counts. 607 */ 608 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 609 if (buflen < sgsize) 610 sgsize = buflen; 611 612 DPRINTF(IDB_BUSDMA, 613 ("iommu_dvmamap_load: map %p loading va %p " 614 "dva %lx at pa %lx\n", 615 map, (void *)vaddr, (long)dvmaddr, 616 (long)trunc_page(curaddr))); 617 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr), 618 flags | IOTTE_DEBUG(0x4000)); 619 needsflush = 1; 620 621 vaddr += sgsize; 622 buflen -= sgsize; 623 624 /* Flush cache if necessary. */ 625 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 626 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 627 (buflen <= 0 || (slot % 8) == 7)) 628 IOMMUREG_WRITE(is, iommu_cache_flush, 629 is->is_ptsb + slot * 8); 630 631 dvmaddr += PAGE_SIZE; 632 } 633 if (needsflush) 634 iommu_strbuf_flush_done(sb); 635 #ifdef DIAGNOSTIC 636 for (seg = 0; seg < map->dm_nsegs; seg++) { 637 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 638 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 639 printf("seg %d dvmaddr %lx out of range %x - %x\n", 640 seg, (long)map->dm_segs[seg].ds_addr, 641 is->is_dvmabase, is->is_dvmaend); 642 #ifdef DDB 643 Debugger(); 644 #endif 645 } 646 } 647 #endif 648 return (0); 649 } 650 651 652 void 653 iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 654 { 655 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 656 struct iommu_state *is = sb->sb_is; 657 int error; 658 bus_size_t sgsize = map->_dm_dvmasize; 659 660 /* Flush the iommu */ 661 #ifdef DEBUG 662 if (!map->_dm_dvmastart) { 663 printf("iommu_dvmamap_unload: No dvmastart is zero\n"); 664 #ifdef DDB 665 Debugger(); 666 #endif 667 } 668 #endif 669 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 670 671 /* Flush the caches */ 672 bus_dmamap_unload(t->_parent, map); 673 674 mutex_enter(&is->is_lock); 675 error = extent_free(is->is_dvmamap, map->_dm_dvmastart, 676 map->_dm_dvmasize, EX_NOWAIT); 677 map->_dm_dvmastart = 0; 678 map->_dm_dvmasize = 0; 679 mutex_exit(&is->is_lock); 680 if (error != 0) 681 printf("warning: %s: %" PRId64 " of DVMA space lost\n", 682 __func__, sgsize); 683 684 /* Clear the map */ 685 } 686 687 688 int 689 iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 690 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 691 { 692 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 693 struct iommu_state *is = sb->sb_is; 694 struct vm_page *pg; 695 int i, j; 696 int left; 697 int err, needsflush; 698 bus_size_t sgsize; 699 paddr_t pa; 700 bus_size_t boundary, align; 701 u_long dvmaddr, sgstart, sgend, bmask; 702 struct pglist *pglist; 703 const int pagesz = PAGE_SIZE; 704 int slot; 705 #ifdef DEBUG 706 int npg = 0; 707 #endif 708 709 if (map->dm_nsegs) { 710 /* Already in use?? */ 711 #ifdef DIAGNOSTIC 712 printf("iommu_dvmamap_load_raw: map still in use\n"); 713 #endif 714 bus_dmamap_unload(t, map); 715 } 716 717 /* 718 * A boundary presented to bus_dmamem_alloc() takes precedence 719 * over boundary in the map. 720 */ 721 if ((boundary = segs[0]._ds_boundary) == 0) 722 boundary = map->_dm_boundary; 723 724 align = max(segs[0]._ds_align, pagesz); 725 726 /* 727 * Make sure that on error condition we return "no valid mappings". 728 */ 729 map->dm_nsegs = 0; 730 /* Count up the total number of pages we need */ 731 pa = trunc_page(segs[0].ds_addr); 732 sgsize = 0; 733 left = size; 734 for (i = 0; left > 0 && i < nsegs; i++) { 735 if (round_page(pa) != round_page(segs[i].ds_addr)) 736 sgsize = round_page(sgsize) + 737 (segs[i].ds_addr & PGOFSET); 738 sgsize += min(left, segs[i].ds_len); 739 left -= segs[i].ds_len; 740 pa = segs[i].ds_addr + segs[i].ds_len; 741 } 742 sgsize = round_page(sgsize); 743 744 mutex_enter(&is->is_lock); 745 /* 746 * If our segment size is larger than the boundary we need to 747 * split the transfer up into little pieces ourselves. 748 */ 749 err = extent_alloc(is->is_dvmamap, sgsize, align, 750 (sgsize > boundary) ? 0 : boundary, 751 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) | 752 EX_BOUNDZERO, &dvmaddr); 753 mutex_exit(&is->is_lock); 754 755 if (err != 0) 756 return (err); 757 758 #ifdef DEBUG 759 if (dvmaddr == (u_long)-1) 760 { 761 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n", 762 (int)sgsize, flags); 763 #ifdef DDB 764 Debugger(); 765 #endif 766 } 767 #endif 768 if (dvmaddr == (u_long)-1) 769 return (ENOMEM); 770 771 /* Set the active DVMA map */ 772 map->_dm_dvmastart = dvmaddr; 773 map->_dm_dvmasize = sgsize; 774 775 bmask = ~(boundary - 1); 776 if ((pglist = segs[0]._ds_mlist) == NULL) { 777 u_long prev_va = 0UL, last_va = dvmaddr; 778 paddr_t prev_pa = 0; 779 int end = 0, offset; 780 bus_size_t len = size; 781 782 /* 783 * This segs is made up of individual physical 784 * segments, probably by _bus_dmamap_load_uio() or 785 * _bus_dmamap_load_mbuf(). Ignore the mlist and 786 * load each one individually. 787 */ 788 j = 0; 789 needsflush = 0; 790 for (i = 0; i < nsegs ; i++) { 791 792 pa = segs[i].ds_addr; 793 offset = (pa & PGOFSET); 794 pa = trunc_page(pa); 795 dvmaddr = trunc_page(dvmaddr); 796 left = min(len, segs[i].ds_len); 797 798 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting " 799 "physseg %d start %lx size %lx\n", i, 800 (long)segs[i].ds_addr, (long)segs[i].ds_len)); 801 802 if ((pa == prev_pa) && 803 ((offset != 0) || (end != offset))) { 804 /* We can re-use this mapping */ 805 dvmaddr = prev_va; 806 } 807 808 sgstart = dvmaddr + offset; 809 sgend = sgstart + left - 1; 810 811 /* Are the segments virtually adjacent? */ 812 if ((j > 0) && (end == offset) && 813 ((offset == 0) || (pa == prev_pa)) && 814 (map->dm_segs[j-1].ds_len + left <= 815 map->dm_maxsegsz)) { 816 /* Just append to the previous segment. */ 817 map->dm_segs[--j].ds_len += left; 818 /* Restore sgstart for boundary check */ 819 sgstart = map->dm_segs[j].ds_addr; 820 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 821 "appending seg %d start %lx size %lx\n", j, 822 (long)map->dm_segs[j].ds_addr, 823 (long)map->dm_segs[j].ds_len)); 824 } else { 825 if (j >= map->_dm_segcnt) { 826 iommu_remove(is, map->_dm_dvmastart, 827 last_va - map->_dm_dvmastart); 828 goto fail; 829 } 830 map->dm_segs[j].ds_addr = sgstart; 831 map->dm_segs[j].ds_len = left; 832 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 833 "seg %d start %lx size %lx\n", j, 834 (long)map->dm_segs[j].ds_addr, 835 (long)map->dm_segs[j].ds_len)); 836 } 837 end = (offset + left) & PGOFSET; 838 839 /* Check for boundary issues */ 840 while ((sgstart & bmask) != (sgend & bmask)) { 841 /* Need a new segment. */ 842 map->dm_segs[j].ds_len = 843 boundary - (sgstart & (boundary - 1)); 844 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 845 "seg %d start %lx size %lx\n", j, 846 (long)map->dm_segs[j].ds_addr, 847 (long)map->dm_segs[j].ds_len)); 848 if (++j >= map->_dm_segcnt) { 849 iommu_remove(is, map->_dm_dvmastart, 850 last_va - map->_dm_dvmastart); 851 goto fail; 852 } 853 sgstart += map->dm_segs[j-1].ds_len; 854 map->dm_segs[j].ds_addr = sgstart; 855 map->dm_segs[j].ds_len = sgend - sgstart + 1; 856 } 857 858 if (sgsize == 0) 859 panic("iommu_dmamap_load_raw: size botch"); 860 861 /* Now map a series of pages. */ 862 while (dvmaddr <= sgend) { 863 DPRINTF(IDB_BUSDMA, 864 ("iommu_dvmamap_load_raw: map %p " 865 "loading va %lx at pa %lx\n", 866 map, (long)dvmaddr, 867 (long)(pa))); 868 /* Enter it if we haven't before. */ 869 if (prev_va != dvmaddr) { 870 iommu_enter(sb, prev_va = dvmaddr, 871 prev_pa = pa, 872 flags | IOTTE_DEBUG(++npg << 12)); 873 needsflush = 1; 874 875 /* Flush cache if necessary. */ 876 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 877 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 878 ((dvmaddr + pagesz) > sgend || (slot % 8) == 7)) 879 IOMMUREG_WRITE(is, iommu_cache_flush, 880 is->is_ptsb + slot * 8); 881 } 882 883 dvmaddr += pagesz; 884 pa += pagesz; 885 last_va = dvmaddr; 886 } 887 888 len -= left; 889 ++j; 890 } 891 if (needsflush) 892 iommu_strbuf_flush_done(sb); 893 894 map->dm_mapsize = size; 895 map->dm_nsegs = j; 896 #ifdef DIAGNOSTIC 897 { int seg; 898 for (seg = 0; seg < map->dm_nsegs; seg++) { 899 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 900 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 901 printf("seg %d dvmaddr %lx out of range %x - %x\n", 902 seg, (long)map->dm_segs[seg].ds_addr, 903 is->is_dvmabase, is->is_dvmaend); 904 #ifdef DDB 905 Debugger(); 906 #endif 907 } 908 } 909 } 910 #endif 911 return (0); 912 } 913 914 /* 915 * This was allocated with bus_dmamem_alloc. 916 * The pages are on a `pglist'. 917 */ 918 i = 0; 919 sgstart = dvmaddr; 920 sgend = sgstart + size - 1; 921 map->dm_segs[i].ds_addr = sgstart; 922 while ((sgstart & bmask) != (sgend & bmask)) { 923 /* Oops. We crossed a boundary. Split the xfer. */ 924 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1)); 925 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 926 "seg %d start %lx size %lx\n", i, 927 (long)map->dm_segs[i].ds_addr, 928 (long)map->dm_segs[i].ds_len)); 929 if (++i >= map->_dm_segcnt) { 930 /* Too many segments. Fail the operation. */ 931 goto fail; 932 } 933 sgstart += map->dm_segs[i-1].ds_len; 934 map->dm_segs[i].ds_addr = sgstart; 935 } 936 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 937 "seg %d start %lx size %lx\n", i, 938 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len)); 939 map->dm_segs[i].ds_len = sgend - sgstart + 1; 940 941 needsflush = 0; 942 TAILQ_FOREACH(pg, pglist, pageq.queue) { 943 if (sgsize == 0) 944 panic("iommu_dmamap_load_raw: size botch"); 945 pa = VM_PAGE_TO_PHYS(pg); 946 947 DPRINTF(IDB_BUSDMA, 948 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n", 949 map, (long)dvmaddr, (long)(pa))); 950 iommu_enter(sb, dvmaddr, pa, flags | IOTTE_DEBUG(0x8000)); 951 needsflush = 1; 952 953 sgsize -= pagesz; 954 955 /* Flush cache if necessary. */ 956 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 957 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 958 (sgsize == 0 || (slot % 8) == 7)) 959 IOMMUREG_WRITE(is, iommu_cache_flush, 960 is->is_ptsb + slot * 8); 961 962 dvmaddr += pagesz; 963 } 964 if (needsflush) 965 iommu_strbuf_flush_done(sb); 966 map->dm_mapsize = size; 967 map->dm_nsegs = i+1; 968 #ifdef DIAGNOSTIC 969 { int seg; 970 for (seg = 0; seg < map->dm_nsegs; seg++) { 971 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 972 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 973 printf("seg %d dvmaddr %lx out of range %x - %x\n", 974 seg, (long)map->dm_segs[seg].ds_addr, 975 is->is_dvmabase, is->is_dvmaend); 976 #ifdef DDB 977 Debugger(); 978 #endif 979 } 980 } 981 } 982 #endif 983 return (0); 984 985 fail: 986 mutex_enter(&is->is_lock); 987 err = extent_free(is->is_dvmamap, map->_dm_dvmastart, sgsize, 988 EX_NOWAIT); 989 map->_dm_dvmastart = 0; 990 map->_dm_dvmasize = 0; 991 mutex_exit(&is->is_lock); 992 if (err != 0) 993 printf("warning: %s: %" PRId64 " of DVMA space lost\n", 994 __func__, sgsize); 995 return (EFBIG); 996 } 997 998 999 /* 1000 * Flush an individual dma segment, returns non-zero if the streaming buffers 1001 * need flushing afterwards. 1002 */ 1003 static int 1004 iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len) 1005 { 1006 vaddr_t vaend; 1007 struct iommu_state *is = sb->sb_is; 1008 1009 #ifdef DIAGNOSTIC 1010 if (va < is->is_dvmabase || va > is->is_dvmaend) 1011 panic("invalid va: %llx", (long long)va); 1012 #endif 1013 1014 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) { 1015 DPRINTF(IDB_SYNC, 1016 ("iommu_dvmamap_sync_range: attempting to flush " 1017 "non-streaming entry\n")); 1018 return (0); 1019 } 1020 1021 vaend = round_page(va + len) - 1; 1022 va = trunc_page(va); 1023 1024 #ifdef DIAGNOSTIC 1025 if (va < is->is_dvmabase || vaend > is->is_dvmaend) 1026 panic("invalid va range: %llx to %llx (%x to %x)", 1027 (long long)va, (long long)vaend, 1028 is->is_dvmabase, 1029 is->is_dvmaend); 1030 #endif 1031 1032 for ( ; va <= vaend; va += PAGE_SIZE) { 1033 DPRINTF(IDB_SYNC, 1034 ("iommu_dvmamap_sync_range: flushing va %p\n", 1035 (void *)(u_long)va)); 1036 iommu_strbuf_flush(sb, va); 1037 } 1038 1039 return (1); 1040 } 1041 1042 static void 1043 _iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1044 bus_size_t len, int ops) 1045 { 1046 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 1047 bus_size_t count; 1048 int i, needsflush = 0; 1049 1050 if (!sb->sb_flush) 1051 return; 1052 1053 for (i = 0; i < map->dm_nsegs; i++) { 1054 if (offset < map->dm_segs[i].ds_len) 1055 break; 1056 offset -= map->dm_segs[i].ds_len; 1057 } 1058 1059 if (i == map->dm_nsegs) 1060 panic("%s: segment too short %llu", __func__, 1061 (unsigned long long)offset); 1062 1063 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) { 1064 /* Nothing to do */; 1065 } 1066 1067 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) { 1068 1069 for (; len > 0 && i < map->dm_nsegs; i++) { 1070 count = MIN(map->dm_segs[i].ds_len - offset, len); 1071 if (count > 0 && 1072 iommu_dvmamap_sync_range(sb, 1073 map->dm_segs[i].ds_addr + offset, count)) 1074 needsflush = 1; 1075 offset = 0; 1076 len -= count; 1077 } 1078 #ifdef DIAGNOSTIC 1079 if (i == map->dm_nsegs && len > 0) 1080 panic("%s: leftover %llu", __func__, 1081 (unsigned long long)len); 1082 #endif 1083 1084 if (needsflush) 1085 iommu_strbuf_flush_done(sb); 1086 } 1087 } 1088 1089 void 1090 iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1091 bus_size_t len, int ops) 1092 { 1093 1094 /* If len is 0, then there is nothing to do */ 1095 if (len == 0) 1096 return; 1097 1098 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) { 1099 /* Flush the CPU then the IOMMU */ 1100 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1101 _iommu_dvmamap_sync(t, map, offset, len, ops); 1102 } 1103 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) { 1104 /* Flush the IOMMU then the CPU */ 1105 _iommu_dvmamap_sync(t, map, offset, len, ops); 1106 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1107 } 1108 } 1109 1110 int 1111 iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1112 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1113 int flags) 1114 { 1115 1116 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx " 1117 "segp %p flags %d\n", (unsigned long long)size, 1118 (unsigned long long)alignment, (unsigned long long)boundary, 1119 segs, flags)); 1120 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, 1121 segs, nsegs, rsegs, flags|BUS_DMA_DVMA)); 1122 } 1123 1124 void 1125 iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1126 { 1127 1128 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n", 1129 segs, nsegs)); 1130 bus_dmamem_free(t->_parent, segs, nsegs); 1131 } 1132 1133 /* 1134 * Map the DVMA mappings into the kernel pmap. 1135 * Check the flags to see whether we're streaming or coherent. 1136 */ 1137 int 1138 iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1139 size_t size, void **kvap, int flags) 1140 { 1141 struct vm_page *pg; 1142 vaddr_t va; 1143 bus_addr_t addr; 1144 struct pglist *pglist; 1145 int cbit; 1146 const uvm_flag_t kmflags = 1147 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 1148 1149 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n", 1150 segs, nsegs, size)); 1151 1152 /* 1153 * Allocate some space in the kernel map, and then map these pages 1154 * into this space. 1155 */ 1156 size = round_page(size); 1157 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 1158 if (va == 0) 1159 return (ENOMEM); 1160 1161 *kvap = (void *)va; 1162 1163 /* 1164 * digest flags: 1165 */ 1166 cbit = 0; 1167 if (flags & BUS_DMA_COHERENT) /* Disable vcache */ 1168 cbit |= PMAP_NVC; 1169 if (flags & BUS_DMA_NOCACHE) /* side effects */ 1170 cbit |= PMAP_NC; 1171 1172 /* 1173 * Now take this and map it into the CPU. 1174 */ 1175 pglist = segs[0]._ds_mlist; 1176 TAILQ_FOREACH(pg, pglist, pageq.queue) { 1177 #ifdef DIAGNOSTIC 1178 if (size == 0) 1179 panic("iommu_dvmamem_map: size botch"); 1180 #endif 1181 addr = VM_PAGE_TO_PHYS(pg); 1182 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: " 1183 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit)); 1184 pmap_kenter_pa(va, addr | cbit, 1185 VM_PROT_READ | VM_PROT_WRITE, 0); 1186 va += PAGE_SIZE; 1187 size -= PAGE_SIZE; 1188 } 1189 pmap_update(pmap_kernel()); 1190 return (0); 1191 } 1192 1193 /* 1194 * Unmap DVMA mappings from kernel 1195 */ 1196 void 1197 iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1198 { 1199 1200 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n", 1201 kva, size)); 1202 1203 #ifdef DIAGNOSTIC 1204 if ((u_long)kva & PGOFSET) 1205 panic("iommu_dvmamem_unmap"); 1206 #endif 1207 1208 size = round_page(size); 1209 pmap_kremove((vaddr_t)kva, size); 1210 pmap_update(pmap_kernel()); 1211 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1212 } 1213