1 /* $NetBSD: iommu.c,v 1.115 2019/02/09 11:27:05 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Matthew R. Green 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Copyright (c) 2001, 2002 Eduardo Horvath 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The name of the author may not be used to endorse or promote products 42 * derived from this software without specific prior written permission. 43 * 44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * SUCH DAMAGE. 55 */ 56 57 /* 58 * UltraSPARC IOMMU support; used by both the sbus and pci code. 59 */ 60 61 #include <sys/cdefs.h> 62 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.115 2019/02/09 11:27:05 mrg Exp $"); 63 64 #include "opt_ddb.h" 65 66 #include <sys/param.h> 67 #include <sys/extent.h> 68 #include <sys/malloc.h> 69 #include <sys/systm.h> 70 #include <sys/device.h> 71 #include <sys/proc.h> 72 73 #include <uvm/uvm.h> 74 75 #include <sys/bus.h> 76 #include <sparc64/dev/iommureg.h> 77 #include <sparc64/dev/iommuvar.h> 78 79 #include <machine/autoconf.h> 80 #include <machine/cpu.h> 81 #include <machine/hypervisor.h> 82 83 #ifdef DEBUG 84 #define IDB_BUSDMA 0x1 85 #define IDB_IOMMU 0x2 86 #define IDB_INFO 0x4 87 #define IDB_SYNC 0x8 88 int iommudebug = 0x0; 89 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0) 90 #define IOTTE_DEBUG(n) (n) 91 #else 92 #define DPRINTF(l, s) 93 #define IOTTE_DEBUG(n) 0 94 #endif 95 96 #define iommu_strbuf_flush(i, v) do { \ 97 if ((i)->sb_flush) \ 98 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \ 99 STRBUFREG(strbuf_pgflush), (v)); \ 100 } while (0) 101 102 static int iommu_strbuf_flush_done(struct strbuf_ctl *); 103 static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 104 bus_size_t, int); 105 static void iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags); 106 static void iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags); 107 static void iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len); 108 static void iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len); 109 110 /* 111 * initialise the UltraSPARC IOMMU (SBUS or PCI): 112 * - allocate and setup the iotsb. 113 * - enable the IOMMU 114 * - initialise the streaming buffers (if they exist) 115 * - create a private DVMA map. 116 */ 117 void 118 iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase) 119 { 120 psize_t size; 121 vaddr_t va; 122 paddr_t pa; 123 struct vm_page *pg; 124 struct pglist pglist; 125 126 DPRINTF(IDB_INFO, ("iommu_init: tsbsize %x iovabase %x\n", tsbsize, iovabase)); 127 128 /* 129 * Setup the iommu. 130 * 131 * The sun4u iommu is part of the SBUS or PCI controller so we will 132 * deal with it here.. 133 * 134 * For sysio and psycho/psycho+ the IOMMU address space always ends at 135 * 0xffffe000, but the starting address depends on the size of the 136 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each 137 * entry is 8 bytes. The start of the map can be calculated by 138 * (0xffffe000 << (8 + is->is_tsbsize)). 139 * 140 * But sabre and hummingbird use a different scheme that seems to 141 * be hard-wired, so we read the start and size from the PROM and 142 * just use those values. 143 */ 144 if (strncmp(name, "pyro", 4) == 0) { 145 is->is_cr = IOMMUREG_READ(is, iommu_cr); 146 is->is_cr &= ~IOMMUCR_FIRE_BE; 147 is->is_cr |= (IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN | 148 IOMMUCR_FIRE_TE); 149 } else 150 is->is_cr = IOMMUCR_EN; 151 is->is_tsbsize = tsbsize; 152 if (iovabase == -1) { 153 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); 154 is->is_dvmaend = IOTSB_VEND - 1; 155 } else { 156 is->is_dvmabase = iovabase; 157 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize) - 1; 158 } 159 160 /* 161 * Allocate memory for I/O pagetables. They need to be physically 162 * contiguous. 163 */ 164 165 size = PAGE_SIZE << is->is_tsbsize; 166 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 167 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 168 panic("iommu_init: no memory"); 169 170 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 171 if (va == 0) 172 panic("iommu_init: no memory"); 173 is->is_tsb = (int64_t *)va; 174 175 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 176 177 /* Map the pages */ 178 TAILQ_FOREACH(pg, &pglist, pageq.queue) { 179 pa = VM_PAGE_TO_PHYS(pg); 180 pmap_kenter_pa(va, pa | PMAP_NVC, 181 VM_PROT_READ | VM_PROT_WRITE, 0); 182 va += PAGE_SIZE; 183 } 184 pmap_update(pmap_kernel()); 185 memset(is->is_tsb, 0, size); 186 187 #ifdef DEBUG 188 if (iommudebug & IDB_INFO) 189 { 190 /* Probe the iommu */ 191 if (!CPU_ISSUN4V) { 192 printf("iommu cr=%llx tsb=%llx\n", 193 (unsigned long long)bus_space_read_8(is->is_bustag, 194 is->is_iommu, 195 offsetof(struct iommureg, iommu_cr)), 196 (unsigned long long)bus_space_read_8(is->is_bustag, 197 is->is_iommu, 198 offsetof(struct iommureg, iommu_tsb))); 199 printf("TSB base %p phys %llx\n", (void *)is->is_tsb, 200 (unsigned long long)is->is_ptsb); 201 delay(1000000); /* 1 s */ 202 } 203 } 204 #endif 205 206 /* 207 * Now all the hardware's working we need to allocate a dvma map. 208 */ 209 aprint_debug("DVMA map: %x to %x\n", 210 (unsigned int)is->is_dvmabase, 211 (unsigned int)is->is_dvmaend); 212 aprint_debug("IOTSB: %llx to %llx\n", 213 (unsigned long long)is->is_ptsb, 214 (unsigned long long)(is->is_ptsb + size - 1)); 215 is->is_dvmamap = extent_create(name, 216 is->is_dvmabase, is->is_dvmaend, 217 0, 0, EX_NOWAIT); 218 if (!is->is_dvmamap) 219 panic("iommu_init: extent_create() failed"); 220 221 mutex_init(&is->is_lock, MUTEX_DEFAULT, IPL_HIGH); 222 223 /* 224 * Set the TSB size. The relevant bits were moved to the TSB 225 * base register in the PCIe host bridges. 226 */ 227 if (is->is_flags & IOMMU_TSBSIZE_IN_PTSB) 228 is->is_ptsb |= is->is_tsbsize; 229 else 230 is->is_cr |= (is->is_tsbsize << 16); 231 232 /* 233 * now actually start up the IOMMU 234 */ 235 iommu_reset(is); 236 } 237 238 /* 239 * Streaming buffers don't exist on the UltraSPARC IIi; we should have 240 * detected that already and disabled them. If not, we will notice that 241 * they aren't there when the STRBUF_EN bit does not remain. 242 */ 243 void 244 iommu_reset(struct iommu_state *is) 245 { 246 int i; 247 struct strbuf_ctl *sb; 248 249 if (CPU_ISSUN4V) 250 return; 251 252 IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb); 253 254 /* Enable IOMMU in diagnostic mode */ 255 IOMMUREG_WRITE(is, iommu_cr, is->is_cr|IOMMUCR_DE); 256 257 for (i = 0; i < 2; i++) { 258 if ((sb = is->is_sb[i])) { 259 260 /* Enable diagnostics mode? */ 261 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb, 262 STRBUFREG(strbuf_ctl), STRBUF_EN); 263 264 membar_Lookaside(); 265 266 /* No streaming buffers? Disable them */ 267 if (bus_space_read_8(is->is_bustag, 268 is->is_sb[i]->sb_sb, 269 STRBUFREG(strbuf_ctl)) == 0) { 270 is->is_sb[i]->sb_flush = NULL; 271 } else { 272 273 /* 274 * locate the pa of the flush buffer. 275 */ 276 if (pmap_extract(pmap_kernel(), 277 (vaddr_t)is->is_sb[i]->sb_flush, 278 &is->is_sb[i]->sb_flushpa) == FALSE) 279 is->is_sb[i]->sb_flush = NULL; 280 } 281 } 282 } 283 284 if (is->is_flags & IOMMU_FLUSH_CACHE) 285 IOMMUREG_WRITE(is, iommu_cache_invalidate, -1ULL); 286 } 287 288 /* 289 * Here are the iommu control routines. 290 */ 291 292 void 293 iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 294 { 295 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx flags %x\n", 296 va, (long)pa, flags)); 297 if (!CPU_ISSUN4V) 298 iommu_enter_sun4u(sb, va, pa, flags); 299 else 300 iommu_enter_sun4v(sb, va, pa, flags); 301 } 302 303 304 void 305 iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 306 { 307 struct iommu_state *is = sb->sb_is; 308 int strbuf = (flags & BUS_DMA_STREAMING); 309 int64_t tte; 310 311 #ifdef DIAGNOSTIC 312 if (va < is->is_dvmabase || va > is->is_dvmaend) 313 panic("iommu_enter: va %#lx not in DVMA space", va); 314 #endif 315 316 /* Is the streamcache flush really needed? */ 317 if (sb->sb_flush) 318 iommu_strbuf_flush(sb, va); 319 else 320 /* If we can't flush the strbuf don't enable it. */ 321 strbuf = 0; 322 323 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE), 324 !(flags & BUS_DMA_NOCACHE), (strbuf)); 325 #ifdef DEBUG 326 tte |= (flags & 0xff000LL)<<(4*8); 327 #endif 328 329 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte; 330 bus_space_write_8(is->is_bustag, is->is_iommu, 331 IOMMUREG(iommu_flush), va); 332 DPRINTF(IDB_IOMMU, ("iommu_enter: slot %d va %lx pa %lx " 333 "TSB[%lx]@%p=%lx\n", (int)IOTSBSLOT(va,is->is_tsbsize), 334 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize), 335 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 336 (u_long)tte)); 337 } 338 339 void 340 iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 341 { 342 struct iommu_state *is = sb->sb_is; 343 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize); 344 paddr_t page_list[1], addr; 345 u_int64_t attr, nmapped; 346 int err; 347 348 #ifdef DIAGNOSTIC 349 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend) 350 panic("viommu_enter: va %#lx not in DVMA space", va); 351 #endif 352 353 attr = PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE; 354 if (flags & BUS_DMA_READ) 355 attr &= ~PCI_MAP_ATTR_READ; 356 if (flags & BUS_DMA_WRITE) 357 attr &= ~PCI_MAP_ATTR_WRITE; 358 359 page_list[0] = trunc_page(pa); 360 if (!pmap_extract(pmap_kernel(), (vaddr_t)page_list, &addr)) 361 panic("viommu_enter: pmap_extract failed"); 362 err = hv_pci_iommu_map(is->is_devhandle, tsbid, 1, attr, 363 addr, &nmapped); 364 if (err != H_EOK || nmapped != 1) 365 panic("hv_pci_iommu_map: err=%d, nmapped=%lu", err, (long unsigned int)nmapped); 366 } 367 368 /* 369 * Find the value of a DVMA address (debug routine). 370 */ 371 paddr_t 372 iommu_extract(struct iommu_state *is, vaddr_t dva) 373 { 374 int64_t tte = 0; 375 376 if (dva >= is->is_dvmabase && dva <= is->is_dvmaend) 377 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)]; 378 379 if ((tte & IOTTE_V) == 0) 380 return ((paddr_t)-1L); 381 return (tte & IOTTE_PAMASK); 382 } 383 384 /* 385 * iommu_remove: removes mappings created by iommu_enter 386 * 387 * Only demap from IOMMU if flag is set. 388 * 389 * XXX: this function needs better internal error checking. 390 */ 391 392 393 void 394 iommu_remove(struct iommu_state *is, vaddr_t va, size_t len) 395 { 396 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx len %zu\n", va, len)); 397 if (!CPU_ISSUN4V) 398 iommu_remove_sun4u(is, va, len); 399 else 400 iommu_remove_sun4v(is, va, len); 401 } 402 403 void 404 iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len) 405 { 406 407 int slot; 408 409 #ifdef DIAGNOSTIC 410 if (va < is->is_dvmabase || va > is->is_dvmaend) 411 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 412 if ((long)(va + len) < (long)va) 413 panic("iommu_remove: va 0x%lx + len 0x%lx wraps", 414 (long) va, (long) len); 415 if (len & ~0xfffffff) 416 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len); 417 #endif 418 419 va = trunc_page(va); 420 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n", 421 va, (u_long)IOTSBSLOT(va, is->is_tsbsize), 422 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)])); 423 while (len > 0) { 424 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d " 425 "for va %p size %lx\n", 426 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, 427 (u_long)len)); 428 if (len <= PAGE_SIZE) 429 len = 0; 430 else 431 len -= PAGE_SIZE; 432 433 #if 0 434 /* 435 * XXX Zero-ing the entry would not require RMW 436 * 437 * Disabling valid bit while a page is used by a device 438 * causes an uncorrectable DMA error. 439 * Workaround to avoid an uncorrectable DMA error is 440 * eliminating the next line, but the page is mapped 441 * until the next iommu_enter call. 442 */ 443 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V; 444 membar_StoreStore(); 445 #endif 446 IOMMUREG_WRITE(is, iommu_flush, va); 447 448 /* Flush cache if necessary. */ 449 slot = IOTSBSLOT(trunc_page(va), is->is_tsbsize); 450 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 451 (len == 0 || (slot % 8) == 7)) 452 IOMMUREG_WRITE(is, iommu_cache_flush, 453 is->is_ptsb + slot * 8); 454 455 va += PAGE_SIZE; 456 } 457 } 458 459 void 460 iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len) 461 { 462 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize); 463 u_int64_t ndemapped; 464 int err; 465 466 #ifdef DIAGNOSTIC 467 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend) 468 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 469 if (va != trunc_page(va)) { 470 printf("iommu_remove: unaligned va: %lx\n", va); 471 va = trunc_page(va); 472 } 473 #endif 474 475 err = hv_pci_iommu_demap(is->is_devhandle, tsbid, 1, &ndemapped); 476 if (err != H_EOK || ndemapped != 1) 477 panic("hv_pci_iommu_unmap: err=%d", err); 478 } 479 480 static int 481 iommu_strbuf_flush_done(struct strbuf_ctl *sb) 482 { 483 struct iommu_state *is = sb->sb_is; 484 struct timeval cur, flushtimeout; 485 486 #define BUMPTIME(t, usec) { \ 487 register volatile struct timeval *tp = (t); \ 488 register long us; \ 489 \ 490 tp->tv_usec = us = tp->tv_usec + (usec); \ 491 if (us >= 1000000) { \ 492 tp->tv_usec = us - 1000000; \ 493 tp->tv_sec++; \ 494 } \ 495 } 496 497 if (!sb->sb_flush) 498 return (0); 499 500 /* 501 * Streaming buffer flushes: 502 * 503 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If 504 * we're not on a cache line boundary (64-bits): 505 * 2 Store 0 in flag 506 * 3 Store pointer to flag in flushsync 507 * 4 wait till flushsync becomes 0x1 508 * 509 * If it takes more than .5 sec, something 510 * went wrong. 511 */ 512 513 *sb->sb_flush = 0; 514 bus_space_write_8(is->is_bustag, sb->sb_sb, 515 STRBUFREG(strbuf_flushsync), sb->sb_flushpa); 516 517 microtime(&flushtimeout); 518 cur = flushtimeout; 519 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */ 520 521 DPRINTF(IDB_IOMMU, ("%s: flush = %lx at va = %lx pa = %lx now=" 522 "%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n", __func__, 523 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa, 524 cur.tv_sec, cur.tv_usec, 525 flushtimeout.tv_sec, flushtimeout.tv_usec)); 526 527 /* Bypass non-coherent D$ */ 528 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) && 529 timercmp(&cur, &flushtimeout, <=)) 530 microtime(&cur); 531 532 #ifdef DIAGNOSTIC 533 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) { 534 printf("%s: flush timeout %p, at %p\n", __func__, 535 (void *)(u_long)*sb->sb_flush, 536 (void *)(u_long)sb->sb_flushpa); /* panic? */ 537 #ifdef DDB 538 Debugger(); 539 #endif 540 } 541 #endif 542 DPRINTF(IDB_IOMMU, ("%s: flushed\n", __func__)); 543 return (*sb->sb_flush); 544 } 545 546 /* 547 * IOMMU DVMA operations, common to SBUS and PCI. 548 */ 549 int 550 iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 551 bus_size_t buflen, struct proc *p, int flags) 552 { 553 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 554 struct iommu_state *is = sb->sb_is; 555 int err, needsflush; 556 bus_size_t sgsize; 557 paddr_t curaddr; 558 u_long dvmaddr, sgstart, sgend, bmask; 559 bus_size_t align, boundary, len; 560 vaddr_t vaddr = (vaddr_t)buf; 561 int seg; 562 struct pmap *pmap; 563 int slot; 564 565 if (map->dm_nsegs) { 566 /* Already in use?? */ 567 #ifdef DIAGNOSTIC 568 printf("iommu_dvmamap_load: map still in use\n"); 569 #endif 570 bus_dmamap_unload(t, map); 571 } 572 573 /* 574 * Make sure that on error condition we return "no valid mappings". 575 */ 576 map->dm_nsegs = 0; 577 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 578 579 if (buflen > map->_dm_size) { 580 DPRINTF(IDB_BUSDMA, 581 ("iommu_dvmamap_load(): error %d > %d -- " 582 "map size exceeded!\n", (int)buflen, (int)map->_dm_size)); 583 return (EINVAL); 584 } 585 586 sgsize = round_page(buflen + ((int)vaddr & PGOFSET)); 587 588 /* 589 * A boundary presented to bus_dmamem_alloc() takes precedence 590 * over boundary in the map. 591 */ 592 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0) 593 boundary = map->_dm_boundary; 594 align = uimax(map->dm_segs[0]._ds_align, PAGE_SIZE); 595 596 /* 597 * If our segment size is larger than the boundary we need to 598 * split the transfer up int little pieces ourselves. 599 */ 600 KASSERT(is->is_dvmamap); 601 mutex_enter(&is->is_lock); 602 err = extent_alloc(is->is_dvmamap, sgsize, align, 603 (sgsize > boundary) ? 0 : boundary, 604 EX_NOWAIT|EX_BOUNDZERO, &dvmaddr); 605 mutex_exit(&is->is_lock); 606 607 #ifdef DEBUG 608 if (err || (dvmaddr == (u_long)-1)) { 609 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n", 610 (int)sgsize, flags); 611 #ifdef DDB 612 Debugger(); 613 #endif 614 } 615 #endif 616 if (err != 0) 617 return (err); 618 619 if (dvmaddr == (u_long)-1) 620 return (ENOMEM); 621 622 /* Set the active DVMA map */ 623 map->_dm_dvmastart = dvmaddr; 624 map->_dm_dvmasize = sgsize; 625 626 /* 627 * Now split the DVMA range into segments, not crossing 628 * the boundary. 629 */ 630 seg = 0; 631 sgstart = dvmaddr + (vaddr & PGOFSET); 632 sgend = sgstart + buflen - 1; 633 map->dm_segs[seg].ds_addr = sgstart; 634 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx " 635 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1), 636 (long)~(boundary - 1))); 637 bmask = ~(boundary - 1); 638 while ((sgstart & bmask) != (sgend & bmask) || 639 sgend - sgstart + 1 > map->dm_maxsegsz) { 640 /* Oops. We crossed a boundary or large seg. Split the xfer. */ 641 len = map->dm_maxsegsz; 642 if ((sgstart & bmask) != (sgend & bmask)) 643 len = uimin(len, boundary - (sgstart & (boundary - 1))); 644 map->dm_segs[seg].ds_len = len; 645 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 646 "seg %d start %lx size %lx\n", seg, 647 (long)map->dm_segs[seg].ds_addr, 648 (long)map->dm_segs[seg].ds_len)); 649 if (++seg >= map->_dm_segcnt) { 650 /* Too many segments. Fail the operation. */ 651 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 652 "too many segments %d\n", seg)); 653 mutex_enter(&is->is_lock); 654 err = extent_free(is->is_dvmamap, 655 dvmaddr, sgsize, EX_NOWAIT); 656 map->_dm_dvmastart = 0; 657 map->_dm_dvmasize = 0; 658 mutex_exit(&is->is_lock); 659 if (err != 0) 660 printf("warning: %s: %" PRId64 661 " of DVMA space lost\n", __func__, sgsize); 662 return (EFBIG); 663 } 664 sgstart += len; 665 map->dm_segs[seg].ds_addr = sgstart; 666 } 667 map->dm_segs[seg].ds_len = sgend - sgstart + 1; 668 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 669 "seg %d start %lx size %lx\n", seg, 670 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len)); 671 map->dm_nsegs = seg + 1; 672 map->dm_mapsize = buflen; 673 674 if (p != NULL) 675 pmap = p->p_vmspace->vm_map.pmap; 676 else 677 pmap = pmap_kernel(); 678 679 needsflush = 0; 680 for (; buflen > 0; ) { 681 682 /* 683 * Get the physical address for this page. 684 */ 685 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) { 686 #ifdef DIAGNOSTIC 687 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr); 688 #endif 689 bus_dmamap_unload(t, map); 690 return (-1); 691 } 692 693 /* 694 * Compute the segment size, and adjust counts. 695 */ 696 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 697 if (buflen < sgsize) 698 sgsize = buflen; 699 700 DPRINTF(IDB_BUSDMA, 701 ("iommu_dvmamap_load: map %p loading va %p " 702 "dva %lx at pa %lx\n", 703 map, (void *)vaddr, (long)dvmaddr, 704 (long)trunc_page(curaddr))); 705 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr), 706 flags | IOTTE_DEBUG(0x4000)); 707 needsflush = 1; 708 709 vaddr += sgsize; 710 buflen -= sgsize; 711 712 /* Flush cache if necessary. */ 713 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 714 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 715 (buflen <= 0 || (slot % 8) == 7)) 716 IOMMUREG_WRITE(is, iommu_cache_flush, 717 is->is_ptsb + slot * 8); 718 719 dvmaddr += PAGE_SIZE; 720 } 721 if (needsflush) 722 iommu_strbuf_flush_done(sb); 723 #ifdef DIAGNOSTIC 724 for (seg = 0; seg < map->dm_nsegs; seg++) { 725 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 726 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 727 printf("seg %d dvmaddr %lx out of range %x - %x\n", 728 seg, (long)map->dm_segs[seg].ds_addr, 729 is->is_dvmabase, is->is_dvmaend); 730 #ifdef DDB 731 Debugger(); 732 #endif 733 } 734 } 735 #endif 736 return (0); 737 } 738 739 740 void 741 iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 742 { 743 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 744 struct iommu_state *is = sb->sb_is; 745 int error; 746 bus_size_t sgsize = map->_dm_dvmasize; 747 748 /* Flush the iommu */ 749 if (!map->_dm_dvmastart) 750 panic("%s: error dvmastart is zero!\n", __func__); 751 752 if (is->is_flags & IOMMU_SYNC_BEFORE_UNMAP) { 753 754 /* Flush the caches */ 755 bus_dmamap_unload(t->_parent, map); 756 757 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 758 759 } else { 760 761 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 762 763 /* Flush the caches */ 764 bus_dmamap_unload(t->_parent, map); 765 } 766 767 mutex_enter(&is->is_lock); 768 error = extent_free(is->is_dvmamap, map->_dm_dvmastart, 769 map->_dm_dvmasize, EX_NOWAIT); 770 map->_dm_dvmastart = 0; 771 map->_dm_dvmasize = 0; 772 mutex_exit(&is->is_lock); 773 if (error != 0) 774 printf("warning: %s: %" PRId64 " of DVMA space lost\n", 775 __func__, sgsize); 776 777 /* Clear the map */ 778 } 779 780 781 int 782 iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 783 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 784 { 785 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 786 struct iommu_state *is = sb->sb_is; 787 struct vm_page *pg; 788 int i, j; 789 int left; 790 int err, needsflush; 791 bus_size_t sgsize; 792 paddr_t pa; 793 bus_size_t boundary, align; 794 u_long dvmaddr, sgstart, sgend, bmask; 795 struct pglist *pglist; 796 const int pagesz = PAGE_SIZE; 797 int slot; 798 #ifdef DEBUG 799 int npg = 0; 800 #endif 801 802 if (map->dm_nsegs) { 803 /* Already in use?? */ 804 #ifdef DIAGNOSTIC 805 printf("iommu_dvmamap_load_raw: map still in use\n"); 806 #endif 807 bus_dmamap_unload(t, map); 808 } 809 810 /* 811 * A boundary presented to bus_dmamem_alloc() takes precedence 812 * over boundary in the map. 813 */ 814 if ((boundary = segs[0]._ds_boundary) == 0) 815 boundary = map->_dm_boundary; 816 817 align = uimax(segs[0]._ds_align, pagesz); 818 819 /* 820 * Make sure that on error condition we return "no valid mappings". 821 */ 822 map->dm_nsegs = 0; 823 /* Count up the total number of pages we need */ 824 pa = trunc_page(segs[0].ds_addr); 825 sgsize = 0; 826 left = size; 827 for (i = 0; left > 0 && i < nsegs; i++) { 828 if (round_page(pa) != round_page(segs[i].ds_addr)) 829 sgsize = round_page(sgsize) + 830 (segs[i].ds_addr & PGOFSET); 831 sgsize += uimin(left, segs[i].ds_len); 832 left -= segs[i].ds_len; 833 pa = segs[i].ds_addr + segs[i].ds_len; 834 } 835 sgsize = round_page(sgsize); 836 837 mutex_enter(&is->is_lock); 838 /* 839 * If our segment size is larger than the boundary we need to 840 * split the transfer up into little pieces ourselves. 841 */ 842 err = extent_alloc(is->is_dvmamap, sgsize, align, 843 (sgsize > boundary) ? 0 : boundary, 844 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) | 845 EX_BOUNDZERO, &dvmaddr); 846 mutex_exit(&is->is_lock); 847 848 if (err != 0) 849 return (err); 850 851 #ifdef DEBUG 852 if (dvmaddr == (u_long)-1) 853 { 854 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n", 855 (int)sgsize, flags); 856 #ifdef DDB 857 Debugger(); 858 #endif 859 } 860 #endif 861 if (dvmaddr == (u_long)-1) 862 return (ENOMEM); 863 864 /* Set the active DVMA map */ 865 map->_dm_dvmastart = dvmaddr; 866 map->_dm_dvmasize = sgsize; 867 868 bmask = ~(boundary - 1); 869 if ((pglist = segs[0]._ds_mlist) == NULL) { 870 u_long prev_va = 0UL, last_va = dvmaddr; 871 paddr_t prev_pa = 0; 872 int end = 0, offset; 873 bus_size_t len = size; 874 875 /* 876 * This segs is made up of individual physical 877 * segments, probably by _bus_dmamap_load_uio() or 878 * _bus_dmamap_load_mbuf(). Ignore the mlist and 879 * load each one individually. 880 */ 881 j = 0; 882 needsflush = 0; 883 for (i = 0; i < nsegs ; i++) { 884 885 pa = segs[i].ds_addr; 886 offset = (pa & PGOFSET); 887 pa = trunc_page(pa); 888 dvmaddr = trunc_page(dvmaddr); 889 left = uimin(len, segs[i].ds_len); 890 891 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting " 892 "physseg %d start %lx size %lx\n", i, 893 (long)segs[i].ds_addr, (long)segs[i].ds_len)); 894 895 if ((pa == prev_pa) && 896 ((offset != 0) || (end != offset))) { 897 /* We can re-use this mapping */ 898 dvmaddr = prev_va; 899 } 900 901 sgstart = dvmaddr + offset; 902 sgend = sgstart + left - 1; 903 904 /* Are the segments virtually adjacent? */ 905 if ((j > 0) && (end == offset) && 906 ((offset == 0) || (pa == prev_pa)) && 907 (map->dm_segs[j-1].ds_len + left <= 908 map->dm_maxsegsz)) { 909 /* Just append to the previous segment. */ 910 map->dm_segs[--j].ds_len += left; 911 /* Restore sgstart for boundary check */ 912 sgstart = map->dm_segs[j].ds_addr; 913 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 914 "appending seg %d start %lx size %lx\n", j, 915 (long)map->dm_segs[j].ds_addr, 916 (long)map->dm_segs[j].ds_len)); 917 } else { 918 if (j >= map->_dm_segcnt) { 919 iommu_remove(is, map->_dm_dvmastart, 920 last_va - map->_dm_dvmastart); 921 goto fail; 922 } 923 map->dm_segs[j].ds_addr = sgstart; 924 map->dm_segs[j].ds_len = left; 925 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 926 "seg %d start %lx size %lx\n", j, 927 (long)map->dm_segs[j].ds_addr, 928 (long)map->dm_segs[j].ds_len)); 929 } 930 end = (offset + left) & PGOFSET; 931 932 /* Check for boundary issues */ 933 while ((sgstart & bmask) != (sgend & bmask)) { 934 /* Need a new segment. */ 935 map->dm_segs[j].ds_len = 936 boundary - (sgstart & (boundary - 1)); 937 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 938 "seg %d start %lx size %lx\n", j, 939 (long)map->dm_segs[j].ds_addr, 940 (long)map->dm_segs[j].ds_len)); 941 if (++j >= map->_dm_segcnt) { 942 iommu_remove(is, map->_dm_dvmastart, 943 last_va - map->_dm_dvmastart); 944 goto fail; 945 } 946 sgstart += map->dm_segs[j-1].ds_len; 947 map->dm_segs[j].ds_addr = sgstart; 948 map->dm_segs[j].ds_len = sgend - sgstart + 1; 949 } 950 951 if (sgsize == 0) 952 panic("iommu_dmamap_load_raw: size botch"); 953 954 /* Now map a series of pages. */ 955 while (dvmaddr <= sgend) { 956 DPRINTF(IDB_BUSDMA, 957 ("iommu_dvmamap_load_raw: map %p " 958 "loading va %lx at pa %lx\n", 959 map, (long)dvmaddr, 960 (long)(pa))); 961 /* Enter it if we haven't before. */ 962 if (prev_va != dvmaddr) { 963 iommu_enter(sb, prev_va = dvmaddr, 964 prev_pa = pa, 965 flags | IOTTE_DEBUG(++npg << 12)); 966 needsflush = 1; 967 968 /* Flush cache if necessary. */ 969 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 970 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 971 ((dvmaddr + pagesz) > sgend || (slot % 8) == 7)) 972 IOMMUREG_WRITE(is, iommu_cache_flush, 973 is->is_ptsb + slot * 8); 974 } 975 976 dvmaddr += pagesz; 977 pa += pagesz; 978 last_va = dvmaddr; 979 } 980 981 len -= left; 982 ++j; 983 } 984 if (needsflush) 985 iommu_strbuf_flush_done(sb); 986 987 map->dm_mapsize = size; 988 map->dm_nsegs = j; 989 #ifdef DIAGNOSTIC 990 { int seg; 991 for (seg = 0; seg < map->dm_nsegs; seg++) { 992 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 993 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 994 printf("seg %d dvmaddr %lx out of range %x - %x\n", 995 seg, (long)map->dm_segs[seg].ds_addr, 996 is->is_dvmabase, is->is_dvmaend); 997 #ifdef DDB 998 Debugger(); 999 #endif 1000 } 1001 } 1002 } 1003 #endif 1004 return (0); 1005 } 1006 1007 /* 1008 * This was allocated with bus_dmamem_alloc. 1009 * The pages are on a `pglist'. 1010 */ 1011 i = 0; 1012 sgstart = dvmaddr; 1013 sgend = sgstart + size - 1; 1014 map->dm_segs[i].ds_addr = sgstart; 1015 while ((sgstart & bmask) != (sgend & bmask)) { 1016 /* Oops. We crossed a boundary. Split the xfer. */ 1017 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1)); 1018 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 1019 "seg %d start %lx size %lx\n", i, 1020 (long)map->dm_segs[i].ds_addr, 1021 (long)map->dm_segs[i].ds_len)); 1022 if (++i >= map->_dm_segcnt) { 1023 /* Too many segments. Fail the operation. */ 1024 goto fail; 1025 } 1026 sgstart += map->dm_segs[i-1].ds_len; 1027 map->dm_segs[i].ds_addr = sgstart; 1028 } 1029 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 1030 "seg %d start %lx size %lx\n", i, 1031 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len)); 1032 map->dm_segs[i].ds_len = sgend - sgstart + 1; 1033 1034 needsflush = 0; 1035 TAILQ_FOREACH(pg, pglist, pageq.queue) { 1036 if (sgsize == 0) 1037 panic("iommu_dmamap_load_raw: size botch"); 1038 pa = VM_PAGE_TO_PHYS(pg); 1039 1040 DPRINTF(IDB_BUSDMA, 1041 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n", 1042 map, (long)dvmaddr, (long)(pa))); 1043 iommu_enter(sb, dvmaddr, pa, flags | IOTTE_DEBUG(0x8000)); 1044 needsflush = 1; 1045 1046 sgsize -= pagesz; 1047 1048 /* Flush cache if necessary. */ 1049 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 1050 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 1051 (sgsize == 0 || (slot % 8) == 7)) 1052 IOMMUREG_WRITE(is, iommu_cache_flush, 1053 is->is_ptsb + slot * 8); 1054 1055 dvmaddr += pagesz; 1056 } 1057 if (needsflush) 1058 iommu_strbuf_flush_done(sb); 1059 map->dm_mapsize = size; 1060 map->dm_nsegs = i+1; 1061 #ifdef DIAGNOSTIC 1062 { int seg; 1063 for (seg = 0; seg < map->dm_nsegs; seg++) { 1064 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 1065 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 1066 printf("seg %d dvmaddr %lx out of range %x - %x\n", 1067 seg, (long)map->dm_segs[seg].ds_addr, 1068 is->is_dvmabase, is->is_dvmaend); 1069 #ifdef DDB 1070 Debugger(); 1071 #endif 1072 } 1073 } 1074 } 1075 #endif 1076 return (0); 1077 1078 fail: 1079 mutex_enter(&is->is_lock); 1080 err = extent_free(is->is_dvmamap, map->_dm_dvmastart, sgsize, 1081 EX_NOWAIT); 1082 map->_dm_dvmastart = 0; 1083 map->_dm_dvmasize = 0; 1084 mutex_exit(&is->is_lock); 1085 if (err != 0) 1086 printf("warning: %s: %" PRId64 " of DVMA space lost\n", 1087 __func__, sgsize); 1088 return (EFBIG); 1089 } 1090 1091 1092 /* 1093 * Flush an individual dma segment, returns non-zero if the streaming buffers 1094 * need flushing afterwards. 1095 */ 1096 static int 1097 iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len) 1098 { 1099 vaddr_t vaend; 1100 struct iommu_state *is = sb->sb_is; 1101 1102 #ifdef DIAGNOSTIC 1103 if (va < is->is_dvmabase || va > is->is_dvmaend) 1104 panic("invalid va: %llx", (long long)va); 1105 #endif 1106 1107 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) { 1108 DPRINTF(IDB_SYNC, 1109 ("iommu_dvmamap_sync_range: attempting to flush " 1110 "non-streaming entry\n")); 1111 return (0); 1112 } 1113 1114 vaend = round_page(va + len) - 1; 1115 va = trunc_page(va); 1116 1117 #ifdef DIAGNOSTIC 1118 if (va < is->is_dvmabase || vaend > is->is_dvmaend) 1119 panic("invalid va range: %llx to %llx (%x to %x)", 1120 (long long)va, (long long)vaend, 1121 is->is_dvmabase, 1122 is->is_dvmaend); 1123 #endif 1124 1125 for ( ; va <= vaend; va += PAGE_SIZE) { 1126 DPRINTF(IDB_SYNC, 1127 ("iommu_dvmamap_sync_range: flushing va %p\n", 1128 (void *)(u_long)va)); 1129 iommu_strbuf_flush(sb, va); 1130 } 1131 1132 return (1); 1133 } 1134 1135 static void 1136 _iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1137 bus_size_t len, int ops) 1138 { 1139 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 1140 bus_size_t count; 1141 int i, needsflush = 0; 1142 1143 if (!sb->sb_flush) 1144 return; 1145 1146 for (i = 0; i < map->dm_nsegs; i++) { 1147 if (offset < map->dm_segs[i].ds_len) 1148 break; 1149 offset -= map->dm_segs[i].ds_len; 1150 } 1151 1152 if (i == map->dm_nsegs) 1153 panic("%s: segment too short %llu", __func__, 1154 (unsigned long long)offset); 1155 1156 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) { 1157 /* Nothing to do */; 1158 } 1159 1160 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) { 1161 1162 for (; len > 0 && i < map->dm_nsegs; i++) { 1163 count = MIN(map->dm_segs[i].ds_len - offset, len); 1164 if (count > 0 && 1165 iommu_dvmamap_sync_range(sb, 1166 map->dm_segs[i].ds_addr + offset, count)) 1167 needsflush = 1; 1168 offset = 0; 1169 len -= count; 1170 } 1171 #ifdef DIAGNOSTIC 1172 if (i == map->dm_nsegs && len > 0) 1173 panic("%s: leftover %llu", __func__, 1174 (unsigned long long)len); 1175 #endif 1176 1177 if (needsflush) 1178 iommu_strbuf_flush_done(sb); 1179 } 1180 } 1181 1182 void 1183 iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1184 bus_size_t len, int ops) 1185 { 1186 1187 /* If len is 0, then there is nothing to do */ 1188 if (len == 0) 1189 return; 1190 1191 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) { 1192 /* Flush the CPU then the IOMMU */ 1193 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1194 _iommu_dvmamap_sync(t, map, offset, len, ops); 1195 } 1196 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) { 1197 /* Flush the IOMMU then the CPU */ 1198 _iommu_dvmamap_sync(t, map, offset, len, ops); 1199 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1200 } 1201 } 1202 1203 int 1204 iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1205 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1206 int flags) 1207 { 1208 1209 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx " 1210 "segp %p flags %d\n", (unsigned long long)size, 1211 (unsigned long long)alignment, (unsigned long long)boundary, 1212 segs, flags)); 1213 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, 1214 segs, nsegs, rsegs, flags|BUS_DMA_DVMA)); 1215 } 1216 1217 void 1218 iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1219 { 1220 1221 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n", 1222 segs, nsegs)); 1223 bus_dmamem_free(t->_parent, segs, nsegs); 1224 } 1225 1226 /* 1227 * Map the DVMA mappings into the kernel pmap. 1228 * Check the flags to see whether we're streaming or coherent. 1229 */ 1230 int 1231 iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1232 size_t size, void **kvap, int flags) 1233 { 1234 struct vm_page *pg; 1235 vaddr_t va; 1236 bus_addr_t addr; 1237 struct pglist *pglist; 1238 int cbit; 1239 const uvm_flag_t kmflags = 1240 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 1241 1242 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n", 1243 segs, nsegs, size)); 1244 1245 /* 1246 * Allocate some space in the kernel map, and then map these pages 1247 * into this space. 1248 */ 1249 size = round_page(size); 1250 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 1251 if (va == 0) 1252 return (ENOMEM); 1253 1254 *kvap = (void *)va; 1255 1256 /* 1257 * digest flags: 1258 */ 1259 cbit = 0; 1260 if (flags & BUS_DMA_COHERENT) /* Disable vcache */ 1261 cbit |= PMAP_NVC; 1262 if (flags & BUS_DMA_NOCACHE) /* side effects */ 1263 cbit |= PMAP_NC; 1264 1265 /* 1266 * Now take this and map it into the CPU. 1267 */ 1268 pglist = segs[0]._ds_mlist; 1269 TAILQ_FOREACH(pg, pglist, pageq.queue) { 1270 #ifdef DIAGNOSTIC 1271 if (size == 0) 1272 panic("iommu_dvmamem_map: size botch"); 1273 #endif 1274 addr = VM_PAGE_TO_PHYS(pg); 1275 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: " 1276 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit)); 1277 pmap_kenter_pa(va, addr | cbit, 1278 VM_PROT_READ | VM_PROT_WRITE, 0); 1279 va += PAGE_SIZE; 1280 size -= PAGE_SIZE; 1281 } 1282 pmap_update(pmap_kernel()); 1283 return (0); 1284 } 1285 1286 /* 1287 * Unmap DVMA mappings from kernel 1288 */ 1289 void 1290 iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1291 { 1292 1293 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n", 1294 kva, size)); 1295 1296 #ifdef DIAGNOSTIC 1297 if ((u_long)kva & PGOFSET) 1298 panic("iommu_dvmamem_unmap"); 1299 #endif 1300 1301 size = round_page(size); 1302 pmap_kremove((vaddr_t)kva, size); 1303 pmap_update(pmap_kernel()); 1304 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1305 } 1306