1 /* $NetBSD: iommu.c,v 1.75 2004/07/01 06:40:36 petrov Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002 Eduardo Horvath 5 * Copyright (c) 1999, 2000 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * UltraSPARC IOMMU support; used by both the sbus and pci code. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.75 2004/07/01 06:40:36 petrov Exp $"); 38 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/extent.h> 43 #include <sys/malloc.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/proc.h> 47 48 #include <uvm/uvm_extern.h> 49 50 #include <machine/bus.h> 51 #include <sparc64/sparc64/cache.h> 52 #include <sparc64/dev/iommureg.h> 53 #include <sparc64/dev/iommuvar.h> 54 55 #include <machine/autoconf.h> 56 #include <machine/cpu.h> 57 58 #ifdef DEBUG 59 #define IDB_BUSDMA 0x1 60 #define IDB_IOMMU 0x2 61 #define IDB_INFO 0x4 62 #define IDB_SYNC 0x8 63 int iommudebug = 0x0; 64 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0) 65 #else 66 #define DPRINTF(l, s) 67 #endif 68 69 #define iommu_strbuf_flush(i, v) do { \ 70 if ((i)->sb_flush) \ 71 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \ 72 STRBUFREG(strbuf_pgflush), (v)); \ 73 } while (0) 74 75 static int iommu_strbuf_flush_done __P((struct strbuf_ctl *)); 76 77 /* 78 * initialise the UltraSPARC IOMMU (SBUS or PCI): 79 * - allocate and setup the iotsb. 80 * - enable the IOMMU 81 * - initialise the streaming buffers (if they exist) 82 * - create a private DVMA map. 83 */ 84 void 85 iommu_init(name, is, tsbsize, iovabase) 86 char *name; 87 struct iommu_state *is; 88 int tsbsize; 89 u_int32_t iovabase; 90 { 91 psize_t size; 92 vaddr_t va; 93 paddr_t pa; 94 struct vm_page *pg; 95 struct pglist pglist; 96 97 /* 98 * Setup the iommu. 99 * 100 * The sun4u iommu is part of the SBUS or PCI controller so we will 101 * deal with it here.. 102 * 103 * For sysio and psycho/psycho+ the IOMMU address space always ends at 104 * 0xffffe000, but the starting address depends on the size of the 105 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each 106 * entry is 8 bytes. The start of the map can be calculated by 107 * (0xffffe000 << (8 + is->is_tsbsize)). 108 * 109 * But sabre and hummingbird use a different scheme that seems to 110 * be hard-wired, so we read the start and size from the PROM and 111 * just use those values. 112 */ 113 is->is_cr = (tsbsize << 16) | IOMMUCR_EN; 114 is->is_tsbsize = tsbsize; 115 if (iovabase == -1) { 116 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); 117 is->is_dvmaend = IOTSB_VEND; 118 } else { 119 is->is_dvmabase = iovabase; 120 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize); 121 } 122 123 /* 124 * Allocate memory for I/O pagetables. They need to be physically 125 * contiguous. 126 */ 127 128 size = PAGE_SIZE << is->is_tsbsize; 129 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 130 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 131 panic("iommu_init: no memory"); 132 133 va = uvm_km_valloc(kernel_map, size); 134 if (va == 0) 135 panic("iommu_init: no memory"); 136 is->is_tsb = (int64_t *)va; 137 138 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 139 140 /* Map the pages */ 141 TAILQ_FOREACH(pg, &pglist, pageq) { 142 pa = VM_PAGE_TO_PHYS(pg); 143 pmap_kenter_pa(va, pa | PMAP_NVC, VM_PROT_READ | VM_PROT_WRITE); 144 va += PAGE_SIZE; 145 } 146 pmap_update(pmap_kernel()); 147 memset(is->is_tsb, 0, size); 148 149 #ifdef DEBUG 150 if (iommudebug & IDB_INFO) 151 { 152 /* Probe the iommu */ 153 154 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", 155 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 156 offsetof (struct iommureg, iommu_cr)), 157 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 158 offsetof (struct iommureg, iommu_tsb)), 159 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 160 offsetof (struct iommureg, iommu_flush))); 161 printf("iommu cr=%llx tsb=%llx\n", 162 (unsigned long long)bus_space_read_8(is->is_bustag, 163 is->is_iommu, 164 offsetof (struct iommureg, iommu_cr)), 165 (unsigned long long)bus_space_read_8(is->is_bustag, 166 is->is_iommu, 167 offsetof (struct iommureg, iommu_tsb))); 168 printf("TSB base %p phys %llx\n", (void *)is->is_tsb, 169 (unsigned long long)is->is_ptsb); 170 delay(1000000); /* 1 s */ 171 } 172 #endif 173 174 /* 175 * now actually start up the IOMMU 176 */ 177 iommu_reset(is); 178 179 /* 180 * Now all the hardware's working we need to allocate a dvma map. 181 */ 182 printf("DVMA map: %x to %x\n", 183 (unsigned int)is->is_dvmabase, 184 (unsigned int)is->is_dvmaend); 185 printf("IOTSB: %llx to %llx\n", 186 (unsigned long long)is->is_ptsb, 187 (unsigned long long)(is->is_ptsb + size)); 188 is->is_dvmamap = extent_create(name, 189 is->is_dvmabase, is->is_dvmaend - PAGE_SIZE, 190 M_DEVBUF, 0, 0, EX_NOWAIT); 191 } 192 193 /* 194 * Streaming buffers don't exist on the UltraSPARC IIi; we should have 195 * detected that already and disabled them. If not, we will notice that 196 * they aren't there when the STRBUF_EN bit does not remain. 197 */ 198 void 199 iommu_reset(is) 200 struct iommu_state *is; 201 { 202 int i; 203 struct strbuf_ctl *sb; 204 205 /* Need to do 64-bit stores */ 206 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_tsb), 207 is->is_ptsb); 208 209 /* Enable IOMMU in diagnostic mode */ 210 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_cr), 211 is->is_cr|IOMMUCR_DE); 212 213 for (i = 0; i < 2; i++) { 214 if ((sb = is->is_sb[i])) { 215 216 /* Enable diagnostics mode? */ 217 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb, 218 STRBUFREG(strbuf_ctl), STRBUF_EN); 219 220 /* No streaming buffers? Disable them */ 221 if (bus_space_read_8(is->is_bustag, 222 is->is_sb[i]->sb_sb, 223 STRBUFREG(strbuf_ctl)) == 0) { 224 is->is_sb[i]->sb_flush = NULL; 225 } else { 226 227 /* 228 * locate the pa of the flush buffer. 229 */ 230 (void)pmap_extract(pmap_kernel(), 231 (vaddr_t)is->is_sb[i]->sb_flush, 232 &is->is_sb[i]->sb_flushpa); 233 } 234 } 235 } 236 } 237 238 /* 239 * Here are the iommu control routines. 240 */ 241 void 242 iommu_enter(sb, va, pa, flags) 243 struct strbuf_ctl *sb; 244 vaddr_t va; 245 int64_t pa; 246 int flags; 247 { 248 struct iommu_state *is = sb->sb_is; 249 int strbuf = (flags & BUS_DMA_STREAMING); 250 int64_t tte; 251 252 #ifdef DIAGNOSTIC 253 if (va < is->is_dvmabase || va > is->is_dvmaend) 254 panic("iommu_enter: va %#lx not in DVMA space", va); 255 #endif 256 257 /* Is the streamcache flush really needed? */ 258 if (sb->sb_flush) { 259 iommu_strbuf_flush(sb, va); 260 iommu_strbuf_flush_done(sb); 261 } else 262 /* If we can't flush the strbuf don't enable it. */ 263 strbuf = 0; 264 265 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE), 266 !(flags & BUS_DMA_NOCACHE), (strbuf)); 267 #ifdef DEBUG 268 tte |= (flags & 0xff000LL)<<(4*8); 269 #endif 270 271 DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n", 272 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va)); 273 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte; 274 bus_space_write_8(is->is_bustag, is->is_iommu, 275 IOMMUREG(iommu_flush), va); 276 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n", 277 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize), 278 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 279 (u_long)tte)); 280 } 281 282 /* 283 * Find the value of a DVMA address (debug routine). 284 */ 285 paddr_t 286 iommu_extract(is, dva) 287 struct iommu_state *is; 288 vaddr_t dva; 289 { 290 int64_t tte = 0; 291 292 if (dva >= is->is_dvmabase && dva < is->is_dvmaend) 293 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)]; 294 295 if ((tte & IOTTE_V) == 0) 296 return ((paddr_t)-1L); 297 return (tte & IOTTE_PAMASK); 298 } 299 300 /* 301 * iommu_remove: removes mappings created by iommu_enter 302 * 303 * Only demap from IOMMU if flag is set. 304 * 305 * XXX: this function needs better internal error checking. 306 */ 307 void 308 iommu_remove(is, va, len) 309 struct iommu_state *is; 310 vaddr_t va; 311 size_t len; 312 { 313 314 #ifdef DIAGNOSTIC 315 if (va < is->is_dvmabase || va > is->is_dvmaend) 316 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 317 if ((long)(va + len) < (long)va) 318 panic("iommu_remove: va 0x%lx + len 0x%lx wraps", 319 (long) va, (long) len); 320 if (len & ~0xfffffff) 321 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len); 322 #endif 323 324 va = trunc_page(va); 325 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n", 326 va, (u_long)IOTSBSLOT(va, is->is_tsbsize), 327 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)])); 328 while (len > 0) { 329 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d " 330 "for va %p size %lx\n", 331 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, 332 (u_long)len)); 333 if (len <= PAGE_SIZE) 334 len = 0; 335 else 336 len -= PAGE_SIZE; 337 338 /* XXX Zero-ing the entry would not require RMW */ 339 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V; 340 bus_space_write_8(is->is_bustag, is->is_iommu, 341 IOMMUREG(iommu_flush), va); 342 va += PAGE_SIZE; 343 } 344 } 345 346 static int 347 iommu_strbuf_flush_done(sb) 348 struct strbuf_ctl *sb; 349 { 350 struct iommu_state *is = sb->sb_is; 351 struct timeval cur, flushtimeout; 352 353 #define BUMPTIME(t, usec) { \ 354 register volatile struct timeval *tp = (t); \ 355 register long us; \ 356 \ 357 tp->tv_usec = us = tp->tv_usec + (usec); \ 358 if (us >= 1000000) { \ 359 tp->tv_usec = us - 1000000; \ 360 tp->tv_sec++; \ 361 } \ 362 } 363 364 if (!sb->sb_flush) 365 return (0); 366 367 /* 368 * Streaming buffer flushes: 369 * 370 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If 371 * we're not on a cache line boundary (64-bits): 372 * 2 Store 0 in flag 373 * 3 Store pointer to flag in flushsync 374 * 4 wait till flushsync becomes 0x1 375 * 376 * If it takes more than .5 sec, something 377 * went wrong. 378 */ 379 380 *sb->sb_flush = 0; 381 bus_space_write_8(is->is_bustag, sb->sb_sb, 382 STRBUFREG(strbuf_flushsync), sb->sb_flushpa); 383 384 microtime(&flushtimeout); 385 cur = flushtimeout; 386 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */ 387 388 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx " 389 "at va = %lx pa = %lx now=%lx:%lx until = %lx:%lx\n", 390 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa, 391 cur.tv_sec, cur.tv_usec, 392 flushtimeout.tv_sec, flushtimeout.tv_usec)); 393 394 /* Bypass non-coherent D$ */ 395 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) && 396 timercmp(&cur, &flushtimeout, <=)) 397 microtime(&cur); 398 399 #ifdef DIAGNOSTIC 400 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) { 401 printf("iommu_strbuf_flush_done: flush timeout %p, at %p\n", 402 (void *)(u_long)*sb->sb_flush, 403 (void *)(u_long)sb->sb_flushpa); /* panic? */ 404 #ifdef DDB 405 Debugger(); 406 #endif 407 } 408 #endif 409 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n")); 410 return (*sb->sb_flush); 411 } 412 413 /* 414 * IOMMU DVMA operations, common to SBUS and PCI. 415 */ 416 int 417 iommu_dvmamap_load(t, sb, map, buf, buflen, p, flags) 418 bus_dma_tag_t t; 419 struct strbuf_ctl *sb; 420 bus_dmamap_t map; 421 void *buf; 422 bus_size_t buflen; 423 struct proc *p; 424 int flags; 425 { 426 struct iommu_state *is = sb->sb_is; 427 int s; 428 int err; 429 bus_size_t sgsize; 430 paddr_t curaddr; 431 u_long dvmaddr, sgstart, sgend; 432 bus_size_t align, boundary, len; 433 vaddr_t vaddr = (vaddr_t)buf; 434 int seg; 435 struct pmap *pmap; 436 437 if (map->dm_nsegs) { 438 /* Already in use?? */ 439 #ifdef DIAGNOSTIC 440 printf("iommu_dvmamap_load: map still in use\n"); 441 #endif 442 bus_dmamap_unload(t, map); 443 } 444 445 /* 446 * Make sure that on error condition we return "no valid mappings". 447 */ 448 map->dm_nsegs = 0; 449 if (buflen > map->_dm_size) { 450 DPRINTF(IDB_BUSDMA, 451 ("iommu_dvmamap_load(): error %d > %d -- " 452 "map size exceeded!\n", (int)buflen, (int)map->_dm_size)); 453 return (EINVAL); 454 } 455 456 sgsize = round_page(buflen + ((int)vaddr & PGOFSET)); 457 458 /* 459 * A boundary presented to bus_dmamem_alloc() takes precedence 460 * over boundary in the map. 461 */ 462 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0) 463 boundary = map->_dm_boundary; 464 align = max(map->dm_segs[0]._ds_align, PAGE_SIZE); 465 466 /* 467 * If our segment size is larger than the boundary we need to 468 * split the transfer up int little pieces ourselves. 469 */ 470 s = splhigh(); 471 err = extent_alloc(is->is_dvmamap, sgsize, align, 472 (sgsize > boundary) ? 0 : boundary, 473 EX_NOWAIT|EX_BOUNDZERO, &dvmaddr); 474 splx(s); 475 476 #ifdef DEBUG 477 if (err || (dvmaddr == (u_long)-1)) { 478 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n", 479 (int)sgsize, flags); 480 #ifdef DDB 481 Debugger(); 482 #endif 483 } 484 #endif 485 if (err != 0) 486 return (err); 487 488 if (dvmaddr == (u_long)-1) 489 return (ENOMEM); 490 491 /* Set the active DVMA map */ 492 map->_dm_dvmastart = dvmaddr; 493 map->_dm_dvmasize = sgsize; 494 495 /* 496 * Now split the DVMA range into segments, not crossing 497 * the boundary. 498 */ 499 seg = 0; 500 sgstart = dvmaddr + (vaddr & PGOFSET); 501 sgend = sgstart + buflen - 1; 502 map->dm_segs[seg].ds_addr = sgstart; 503 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx " 504 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1), 505 (long)~(boundary - 1))); 506 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) { 507 /* Oops. We crossed a boundary. Split the xfer. */ 508 len = boundary - (sgstart & (boundary - 1)); 509 map->dm_segs[seg].ds_len = len; 510 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 511 "seg %d start %lx size %lx\n", seg, 512 (long)map->dm_segs[seg].ds_addr, 513 (long)map->dm_segs[seg].ds_len)); 514 if (++seg >= map->_dm_segcnt) { 515 /* Too many segments. Fail the operation. */ 516 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 517 "too many segments %d\n", seg)); 518 s = splhigh(); 519 /* How can this fail? And if it does what can we do? */ 520 err = extent_free(is->is_dvmamap, 521 dvmaddr, sgsize, EX_NOWAIT); 522 map->_dm_dvmastart = 0; 523 map->_dm_dvmasize = 0; 524 splx(s); 525 return (E2BIG); 526 } 527 sgstart += len; 528 map->dm_segs[seg].ds_addr = sgstart; 529 } 530 map->dm_segs[seg].ds_len = sgend - sgstart + 1; 531 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 532 "seg %d start %lx size %lx\n", seg, 533 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len)); 534 map->dm_nsegs = seg + 1; 535 map->dm_mapsize = buflen; 536 537 if (p != NULL) 538 pmap = p->p_vmspace->vm_map.pmap; 539 else 540 pmap = pmap_kernel(); 541 542 for (; buflen > 0; ) { 543 544 /* 545 * Get the physical address for this page. 546 */ 547 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) { 548 #ifdef DIAGNOSTIC 549 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr); 550 #endif 551 bus_dmamap_unload(t, map); 552 return (-1); 553 } 554 555 /* 556 * Compute the segment size, and adjust counts. 557 */ 558 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 559 if (buflen < sgsize) 560 sgsize = buflen; 561 562 DPRINTF(IDB_BUSDMA, 563 ("iommu_dvmamap_load: map %p loading va %p " 564 "dva %lx at pa %lx\n", 565 map, (void *)vaddr, (long)dvmaddr, 566 (long)(curaddr & ~(PAGE_SIZE-1)))); 567 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr), 568 flags|0x4000); 569 570 dvmaddr += PAGE_SIZE; 571 vaddr += sgsize; 572 buflen -= sgsize; 573 } 574 #ifdef DIAGNOSTIC 575 for (seg = 0; seg < map->dm_nsegs; seg++) { 576 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 577 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 578 printf("seg %d dvmaddr %lx out of range %x - %x\n", 579 seg, (long)map->dm_segs[seg].ds_addr, 580 is->is_dvmabase, is->is_dvmaend); 581 #ifdef DDB 582 Debugger(); 583 #endif 584 } 585 } 586 #endif 587 return (0); 588 } 589 590 591 void 592 iommu_dvmamap_unload(t, sb, map) 593 bus_dma_tag_t t; 594 struct strbuf_ctl *sb; 595 bus_dmamap_t map; 596 { 597 struct iommu_state *is = sb->sb_is; 598 int error, s; 599 bus_size_t sgsize = map->_dm_dvmasize; 600 601 /* Flush the iommu */ 602 #ifdef DEBUG 603 if (!map->_dm_dvmastart) { 604 printf("iommu_dvmamap_unload: No dvmastart is zero\n"); 605 #ifdef DDB 606 Debugger(); 607 #endif 608 } 609 #endif 610 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 611 612 /* Flush the caches */ 613 bus_dmamap_unload(t->_parent, map); 614 615 /* Mark the mappings as invalid. */ 616 map->dm_mapsize = 0; 617 map->dm_nsegs = 0; 618 619 s = splhigh(); 620 error = extent_free(is->is_dvmamap, map->_dm_dvmastart, 621 map->_dm_dvmasize, EX_NOWAIT); 622 map->_dm_dvmastart = 0; 623 map->_dm_dvmasize = 0; 624 splx(s); 625 if (error != 0) 626 printf("warning: %qd of DVMA space lost\n", (long long)sgsize); 627 628 /* Clear the map */ 629 } 630 631 632 int 633 iommu_dvmamap_load_raw(t, sb, map, segs, nsegs, flags, size) 634 bus_dma_tag_t t; 635 struct strbuf_ctl *sb; 636 bus_dmamap_t map; 637 bus_dma_segment_t *segs; 638 int nsegs; 639 int flags; 640 bus_size_t size; 641 { 642 struct iommu_state *is = sb->sb_is; 643 struct vm_page *pg; 644 int i, j, s; 645 int left; 646 int err; 647 bus_size_t sgsize; 648 paddr_t pa; 649 bus_size_t boundary, align; 650 u_long dvmaddr, sgstart, sgend; 651 struct pglist *pglist; 652 int pagesz = PAGE_SIZE; 653 int npg = 0; /* DEBUG */ 654 655 if (map->dm_nsegs) { 656 /* Already in use?? */ 657 #ifdef DIAGNOSTIC 658 printf("iommu_dvmamap_load_raw: map still in use\n"); 659 #endif 660 bus_dmamap_unload(t, map); 661 } 662 663 /* 664 * A boundary presented to bus_dmamem_alloc() takes precedence 665 * over boundary in the map. 666 */ 667 if ((boundary = segs[0]._ds_boundary) == 0) 668 boundary = map->_dm_boundary; 669 670 align = max(segs[0]._ds_align, pagesz); 671 672 /* 673 * Make sure that on error condition we return "no valid mappings". 674 */ 675 map->dm_nsegs = 0; 676 /* Count up the total number of pages we need */ 677 pa = segs[0].ds_addr; 678 sgsize = 0; 679 left = size; 680 for (i = 0; left && i < nsegs; i++) { 681 if (round_page(pa) != round_page(segs[i].ds_addr)) 682 sgsize = round_page(sgsize); 683 sgsize += min(left, segs[i].ds_len); 684 left -= segs[i].ds_len; 685 pa = segs[i].ds_addr + segs[i].ds_len; 686 } 687 sgsize = round_page(sgsize) + PAGE_SIZE; /* XXX reserve extra dvma page */ 688 689 s = splhigh(); 690 /* 691 * If our segment size is larger than the boundary we need to 692 * split the transfer up into little pieces ourselves. 693 */ 694 err = extent_alloc(is->is_dvmamap, sgsize, align, 695 (sgsize > boundary) ? 0 : boundary, 696 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) | 697 EX_BOUNDZERO, &dvmaddr); 698 splx(s); 699 700 if (err != 0) 701 return (err); 702 703 #ifdef DEBUG 704 if (dvmaddr == (u_long)-1) 705 { 706 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n", 707 (int)sgsize, flags); 708 #ifdef DDB 709 Debugger(); 710 #endif 711 } 712 #endif 713 if (dvmaddr == (u_long)-1) 714 return (ENOMEM); 715 716 /* Set the active DVMA map */ 717 map->_dm_dvmastart = dvmaddr; 718 map->_dm_dvmasize = sgsize; 719 720 if ((pglist = segs[0]._ds_mlist) == NULL) { 721 u_long prev_va = 0UL; 722 paddr_t prev_pa = 0; 723 int end = 0, offset; 724 725 /* 726 * This segs is made up of individual physical 727 * segments, probably by _bus_dmamap_load_uio() or 728 * _bus_dmamap_load_mbuf(). Ignore the mlist and 729 * load each one individually. 730 */ 731 map->dm_mapsize = size; 732 733 j = 0; 734 for (i = 0; i < nsegs ; i++) { 735 736 pa = segs[i].ds_addr; 737 offset = (pa & PGOFSET); 738 pa = trunc_page(pa); 739 dvmaddr = trunc_page(dvmaddr); 740 left = min(size, segs[i].ds_len); 741 742 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting " 743 "physseg %d start %lx size %lx\n", i, 744 (long)segs[i].ds_addr, (long)segs[i].ds_len)); 745 746 if ((pa == prev_pa) && 747 ((offset != 0) || (end != offset))) { 748 /* We can re-use this mapping */ 749 dvmaddr = prev_va; 750 } 751 752 sgstart = dvmaddr + offset; 753 sgend = sgstart + left - 1; 754 755 /* Are the segments virtually adjacent? */ 756 if ((j > 0) && (end == offset) && 757 ((offset == 0) || (pa == prev_pa))) { 758 /* Just append to the previous segment. */ 759 map->dm_segs[--j].ds_len += left; 760 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 761 "appending seg %d start %lx size %lx\n", j, 762 (long)map->dm_segs[j].ds_addr, 763 (long)map->dm_segs[j].ds_len)); 764 } else { 765 if (j >= map->_dm_segcnt) { 766 iommu_dvmamap_unload(t, sb, map); 767 return (E2BIG); 768 } 769 map->dm_segs[j].ds_addr = sgstart; 770 map->dm_segs[j].ds_len = left; 771 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 772 "seg %d start %lx size %lx\n", j, 773 (long)map->dm_segs[j].ds_addr, 774 (long)map->dm_segs[j].ds_len)); 775 } 776 end = (offset + left) & PGOFSET; 777 778 /* Check for boundary issues */ 779 while ((sgstart & ~(boundary - 1)) != 780 (sgend & ~(boundary - 1))) { 781 /* Need a new segment. */ 782 map->dm_segs[j].ds_len = 783 boundary - (sgstart & (boundary - 1)); 784 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 785 "seg %d start %lx size %lx\n", j, 786 (long)map->dm_segs[j].ds_addr, 787 (long)map->dm_segs[j].ds_len)); 788 if (++j >= map->_dm_segcnt) { 789 iommu_dvmamap_unload(t, sb, map); 790 return (E2BIG); 791 } 792 sgstart = roundup(sgstart, boundary); 793 map->dm_segs[j].ds_addr = sgstart; 794 map->dm_segs[j].ds_len = sgend - sgstart + 1; 795 } 796 797 if (sgsize == 0) 798 panic("iommu_dmamap_load_raw: size botch"); 799 800 /* Now map a series of pages. */ 801 while (dvmaddr <= sgend) { 802 DPRINTF(IDB_BUSDMA, 803 ("iommu_dvmamap_load_raw: map %p " 804 "loading va %lx at pa %lx\n", 805 map, (long)dvmaddr, 806 (long)(pa))); 807 /* Enter it if we haven't before. */ 808 if (prev_va != dvmaddr) 809 iommu_enter(sb, prev_va = dvmaddr, 810 prev_pa = pa, 811 flags | (++npg << 12)); 812 dvmaddr += pagesz; 813 pa += pagesz; 814 } 815 816 size -= left; 817 ++j; 818 } 819 820 map->dm_nsegs = j; 821 #ifdef DIAGNOSTIC 822 { int seg; 823 for (seg = 0; seg < map->dm_nsegs; seg++) { 824 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 825 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 826 printf("seg %d dvmaddr %lx out of range %x - %x\n", 827 seg, (long)map->dm_segs[seg].ds_addr, 828 is->is_dvmabase, is->is_dvmaend); 829 #ifdef DDB 830 Debugger(); 831 #endif 832 } 833 } 834 } 835 #endif 836 return (0); 837 } 838 839 /* 840 * This was allocated with bus_dmamem_alloc. 841 * The pages are on a `pglist'. 842 */ 843 map->dm_mapsize = size; 844 i = 0; 845 sgstart = dvmaddr; 846 sgend = sgstart + size - 1; 847 map->dm_segs[i].ds_addr = sgstart; 848 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) { 849 /* Oops. We crossed a boundary. Split the xfer. */ 850 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1)); 851 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 852 "seg %d start %lx size %lx\n", i, 853 (long)map->dm_segs[i].ds_addr, 854 (long)map->dm_segs[i].ds_len)); 855 if (++i >= map->_dm_segcnt) { 856 /* Too many segments. Fail the operation. */ 857 s = splhigh(); 858 /* How can this fail? And if it does what can we do? */ 859 err = extent_free(is->is_dvmamap, 860 dvmaddr, sgsize, EX_NOWAIT); 861 map->_dm_dvmastart = 0; 862 map->_dm_dvmasize = 0; 863 splx(s); 864 return (E2BIG); 865 } 866 sgstart = roundup(sgstart, boundary); 867 map->dm_segs[i].ds_addr = sgstart; 868 } 869 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 870 "seg %d start %lx size %lx\n", i, 871 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len)); 872 map->dm_segs[i].ds_len = sgend - sgstart + 1; 873 874 TAILQ_FOREACH(pg, pglist, pageq) { 875 if (sgsize == 0) 876 panic("iommu_dmamap_load_raw: size botch"); 877 pa = VM_PAGE_TO_PHYS(pg); 878 879 DPRINTF(IDB_BUSDMA, 880 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n", 881 map, (long)dvmaddr, (long)(pa))); 882 iommu_enter(sb, dvmaddr, pa, flags|0x8000); 883 884 dvmaddr += pagesz; 885 sgsize -= pagesz; 886 } 887 map->dm_mapsize = size; 888 map->dm_nsegs = i+1; 889 #ifdef DIAGNOSTIC 890 { int seg; 891 for (seg = 0; seg < map->dm_nsegs; seg++) { 892 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 893 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 894 printf("seg %d dvmaddr %lx out of range %x - %x\n", 895 seg, (long)map->dm_segs[seg].ds_addr, 896 is->is_dvmabase, is->is_dvmaend); 897 #ifdef DDB 898 Debugger(); 899 #endif 900 } 901 } 902 } 903 #endif 904 return (0); 905 } 906 907 908 /* 909 * Flush an individual dma segment, returns non-zero if the streaming buffers 910 * need flushing afterwards. 911 */ 912 static int 913 iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len) 914 { 915 vaddr_t vaend; 916 struct iommu_state *is = sb->sb_is; 917 918 #ifdef DIAGNOSTIC 919 if (va < is->is_dvmabase || va > is->is_dvmaend) 920 panic("invalid va: %llx", (long long)va); 921 #endif 922 923 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) { 924 DPRINTF(IDB_BUSDMA, 925 ("iommu_dvmamap_sync_range: attempting to flush " 926 "non-streaming entry\n")); 927 return (0); 928 } 929 930 vaend = (va + len + PGOFSET) & ~PGOFSET; 931 va &= ~PGOFSET; 932 933 #ifdef DIAGNOSTIC 934 if (va < is->is_dvmabase || vaend > is->is_dvmaend) 935 panic("invalid va range: %llx to %llx (%x to %x)", 936 (long long)va, (long long)vaend, 937 is->is_dvmabase, 938 is->is_dvmaend); 939 #endif 940 941 for ( ; va <= vaend; va += PAGE_SIZE) { 942 DPRINTF(IDB_BUSDMA, 943 ("iommu_dvmamap_sync_range: flushing va %p\n", 944 (void *)(u_long)va)); 945 iommu_strbuf_flush(sb, va); 946 } 947 948 return (1); 949 } 950 951 void 952 iommu_dvmamap_sync(t, sb, map, offset, len, ops) 953 bus_dma_tag_t t; 954 struct strbuf_ctl *sb; 955 bus_dmamap_t map; 956 bus_addr_t offset; 957 bus_size_t len; 958 int ops; 959 { 960 bus_size_t count; 961 int i, needsflush = 0; 962 963 if (!sb->sb_flush) 964 return; 965 966 for (i = 0; i < map->dm_nsegs; i++) { 967 if (offset < map->dm_segs[i].ds_len) 968 break; 969 offset -= map->dm_segs[i].ds_len; 970 } 971 972 if (i == map->dm_nsegs) 973 panic("iommu_dvmamap_sync: segment too short %llu", 974 (unsigned long long)offset); 975 976 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) { 977 /* Nothing to do */; 978 } 979 980 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) { 981 982 for (; len > 0 && i < map->dm_nsegs; i++) { 983 count = MIN(map->dm_segs[i].ds_len - offset, len); 984 if (count > 0 && 985 iommu_dvmamap_sync_range(sb, 986 map->dm_segs[i].ds_addr + offset, count)) 987 needsflush = 1; 988 offset = 0; 989 len -= count; 990 } 991 #ifdef DIAGNOSTIC 992 if (i == map->dm_nsegs && len > 0) 993 panic("iommu_dvmamap_sync: leftover %llu", 994 (unsigned long long)len); 995 #endif 996 997 if (needsflush) 998 iommu_strbuf_flush_done(sb); 999 } 1000 } 1001 1002 int 1003 iommu_dvmamem_alloc(t, sb, size, alignment, boundary, segs, nsegs, rsegs, flags) 1004 bus_dma_tag_t t; 1005 struct strbuf_ctl *sb; 1006 bus_size_t size, alignment, boundary; 1007 bus_dma_segment_t *segs; 1008 int nsegs; 1009 int *rsegs; 1010 int flags; 1011 { 1012 1013 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx " 1014 "segp %p flags %d\n", (unsigned long long)size, 1015 (unsigned long long)alignment, (unsigned long long)boundary, 1016 segs, flags)); 1017 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, 1018 segs, nsegs, rsegs, flags|BUS_DMA_DVMA)); 1019 } 1020 1021 void 1022 iommu_dvmamem_free(t, sb, segs, nsegs) 1023 bus_dma_tag_t t; 1024 struct strbuf_ctl *sb; 1025 bus_dma_segment_t *segs; 1026 int nsegs; 1027 { 1028 1029 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n", 1030 segs, nsegs)); 1031 bus_dmamem_free(t->_parent, segs, nsegs); 1032 } 1033 1034 /* 1035 * Map the DVMA mappings into the kernel pmap. 1036 * Check the flags to see whether we're streaming or coherent. 1037 */ 1038 int 1039 iommu_dvmamem_map(t, sb, segs, nsegs, size, kvap, flags) 1040 bus_dma_tag_t t; 1041 struct strbuf_ctl *sb; 1042 bus_dma_segment_t *segs; 1043 int nsegs; 1044 size_t size; 1045 caddr_t *kvap; 1046 int flags; 1047 { 1048 struct vm_page *pg; 1049 vaddr_t va; 1050 bus_addr_t addr; 1051 struct pglist *pglist; 1052 int cbit; 1053 1054 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n", 1055 segs, nsegs, size)); 1056 1057 /* 1058 * Allocate some space in the kernel map, and then map these pages 1059 * into this space. 1060 */ 1061 size = round_page(size); 1062 va = uvm_km_valloc(kernel_map, size); 1063 if (va == 0) 1064 return (ENOMEM); 1065 1066 *kvap = (caddr_t)va; 1067 1068 /* 1069 * digest flags: 1070 */ 1071 cbit = 0; 1072 if (flags & BUS_DMA_COHERENT) /* Disable vcache */ 1073 cbit |= PMAP_NVC; 1074 if (flags & BUS_DMA_NOCACHE) /* sideffects */ 1075 cbit |= PMAP_NC; 1076 1077 /* 1078 * Now take this and map it into the CPU. 1079 */ 1080 pglist = segs[0]._ds_mlist; 1081 TAILQ_FOREACH(pg, pglist, pageq) { 1082 #ifdef DIAGNOSTIC 1083 if (size == 0) 1084 panic("iommu_dvmamem_map: size botch"); 1085 #endif 1086 addr = VM_PAGE_TO_PHYS(pg); 1087 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: " 1088 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit)); 1089 pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE); 1090 va += PAGE_SIZE; 1091 size -= PAGE_SIZE; 1092 } 1093 pmap_update(pmap_kernel()); 1094 return (0); 1095 } 1096 1097 /* 1098 * Unmap DVMA mappings from kernel 1099 */ 1100 void 1101 iommu_dvmamem_unmap(t, sb, kva, size) 1102 bus_dma_tag_t t; 1103 struct strbuf_ctl *sb; 1104 caddr_t kva; 1105 size_t size; 1106 { 1107 1108 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n", 1109 kva, size)); 1110 1111 #ifdef DIAGNOSTIC 1112 if ((u_long)kva & PGOFSET) 1113 panic("iommu_dvmamem_unmap"); 1114 #endif 1115 1116 size = round_page(size); 1117 pmap_kremove((vaddr_t)kva, size); 1118 pmap_update(pmap_kernel()); 1119 uvm_km_free(kernel_map, (vaddr_t)kva, size); 1120 } 1121