1 /* $NetBSD: iommu.c,v 1.52 2002/06/02 14:44:41 drochner Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002 Eduardo Horvath 5 * Copyright (c) 1999, 2000 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * UltraSPARC IOMMU support; used by both the sbus and pci code. 34 */ 35 #include "opt_ddb.h" 36 37 #include <sys/param.h> 38 #include <sys/extent.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/device.h> 42 #include <sys/proc.h> 43 44 #include <uvm/uvm_extern.h> 45 46 #include <machine/bus.h> 47 #include <sparc64/sparc64/cache.h> 48 #include <sparc64/dev/iommureg.h> 49 #include <sparc64/dev/iommuvar.h> 50 51 #include <machine/autoconf.h> 52 #include <machine/cpu.h> 53 54 #ifdef DEBUG 55 #define IDB_BUSDMA 0x1 56 #define IDB_IOMMU 0x2 57 #define IDB_INFO 0x4 58 #define IDB_SYNC 0x8 59 int iommudebug = 0x0; 60 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0) 61 #else 62 #define DPRINTF(l, s) 63 #endif 64 65 #define iommu_strbuf_flush(i,v) do { \ 66 if ((i)->is_sbvalid[0]) \ 67 bus_space_write_8((i)->is_bustag, (i)->is_sb[0], \ 68 STRBUFREG(strbuf_pgflush), (v)); \ 69 if ((i)->is_sbvalid[1]) \ 70 bus_space_write_8((i)->is_bustag, (i)->is_sb[1], \ 71 STRBUFREG(strbuf_pgflush), (v)); \ 72 } while (0) 73 74 static int iommu_strbuf_flush_done __P((struct iommu_state *)); 75 76 /* 77 * initialise the UltraSPARC IOMMU (SBUS or PCI): 78 * - allocate and setup the iotsb. 79 * - enable the IOMMU 80 * - initialise the streaming buffers (if they exist) 81 * - create a private DVMA map. 82 */ 83 void 84 iommu_init(name, is, tsbsize, iovabase) 85 char *name; 86 struct iommu_state *is; 87 int tsbsize; 88 u_int32_t iovabase; 89 { 90 psize_t size; 91 vaddr_t va; 92 paddr_t pa; 93 struct vm_page *m; 94 struct pglist mlist; 95 96 /* 97 * Setup the iommu. 98 * 99 * The sun4u iommu is part of the SBUS or PCI controller so we will 100 * deal with it here.. 101 * 102 * For sysio and psycho/psycho+ the IOMMU address space always ends at 103 * 0xffffe000, but the starting address depends on the size of the 104 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each 105 * entry is 8 bytes. The start of the map can be calculated by 106 * (0xffffe000 << (8 + is->is_tsbsize)). 107 * 108 * But sabre and hummingbird use a different scheme that seems to 109 * be hard-wired, so we read the start and size from the PROM and 110 * just use those values. 111 */ 112 is->is_cr = (tsbsize << 16) | IOMMUCR_EN; 113 is->is_tsbsize = tsbsize; 114 if (iovabase == -1) { 115 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); 116 is->is_dvmaend = IOTSB_VEND; 117 } else { 118 is->is_dvmabase = iovabase; 119 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize); 120 } 121 122 /* 123 * Allocate memory for I/O pagetables. They need to be physically 124 * contiguous. 125 */ 126 127 size = NBPG<<(is->is_tsbsize); 128 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 129 (paddr_t)NBPG, (paddr_t)0, &mlist, 1, 0) != 0) 130 panic("iommu_init: no memory"); 131 132 va = uvm_km_valloc(kernel_map, size); 133 if (va == 0) 134 panic("iommu_init: no memory"); 135 is->is_tsb = (int64_t *)va; 136 137 m = TAILQ_FIRST(&mlist); 138 is->is_ptsb = VM_PAGE_TO_PHYS(m); 139 140 /* Map the pages */ 141 for (; m != NULL; m = TAILQ_NEXT(m,pageq)) { 142 pa = VM_PAGE_TO_PHYS(m); 143 pmap_enter(pmap_kernel(), va, pa | PMAP_NVC, 144 VM_PROT_READ|VM_PROT_WRITE, 145 VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); 146 va += NBPG; 147 } 148 pmap_update(pmap_kernel()); 149 bzero(is->is_tsb, size); 150 151 #ifdef DEBUG 152 if (iommudebug & IDB_INFO) 153 { 154 /* Probe the iommu */ 155 156 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", 157 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 158 offsetof (struct iommureg, iommu_cr)), 159 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 160 offsetof (struct iommureg, iommu_tsb)), 161 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu, 162 offsetof (struct iommureg, iommu_flush))); 163 printf("iommu cr=%llx tsb=%llx\n", 164 (unsigned long long)bus_space_read_8(is->is_bustag, 165 is->is_iommu, 166 offsetof (struct iommureg, iommu_cr)), 167 (unsigned long long)bus_space_read_8(is->is_bustag, 168 is->is_iommu, 169 offsetof (struct iommureg, iommu_tsb))); 170 printf("TSB base %p phys %llx\n", (void *)is->is_tsb, 171 (unsigned long long)is->is_ptsb); 172 delay(1000000); /* 1 s */ 173 } 174 #endif 175 176 /* 177 * Initialize streaming buffer, if it is there. 178 */ 179 if (is->is_sbvalid[0] || is->is_sbvalid[1]) 180 (void)pmap_extract(pmap_kernel(), (vaddr_t)&is->is_flush[0], 181 (paddr_t *)&is->is_flushpa); 182 183 /* 184 * now actually start up the IOMMU 185 */ 186 iommu_reset(is); 187 188 /* 189 * Now all the hardware's working we need to allocate a dvma map. 190 */ 191 printf("DVMA map: %x to %x\n", 192 (unsigned int)is->is_dvmabase, 193 (unsigned int)is->is_dvmaend); 194 printf("IOTSB: %llx to %llx\n", 195 (unsigned long long)is->is_ptsb, 196 (unsigned long long)(is->is_ptsb + size)); 197 is->is_dvmamap = extent_create(name, 198 is->is_dvmabase, is->is_dvmaend - NBPG, 199 M_DEVBUF, 0, 0, EX_NOWAIT); 200 } 201 202 /* 203 * Streaming buffers don't exist on the UltraSPARC IIi; we should have 204 * detected that already and disabled them. If not, we will notice that 205 * they aren't there when the STRBUF_EN bit does not remain. 206 */ 207 void 208 iommu_reset(is) 209 struct iommu_state *is; 210 { 211 int i; 212 213 /* Need to do 64-bit stores */ 214 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_tsb), 215 is->is_ptsb); 216 217 /* Enable IOMMU in diagnostic mode */ 218 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_cr), 219 is->is_cr|IOMMUCR_DE); 220 221 for (i=0; i<2; i++) { 222 if (is->is_sbvalid[i]) { 223 224 /* Enable diagnostics mode? */ 225 bus_space_write_8(is->is_bustag, is->is_sb[i], 226 STRBUFREG(strbuf_ctl), STRBUF_EN); 227 228 /* No streaming buffers? Disable them */ 229 if (bus_space_read_8(is->is_bustag, is->is_sb[i], 230 STRBUFREG(strbuf_ctl)) == 0) 231 is->is_sbvalid[i] = 0; 232 } 233 } 234 } 235 236 /* 237 * Here are the iommu control routines. 238 */ 239 void 240 iommu_enter(is, va, pa, flags) 241 struct iommu_state *is; 242 vaddr_t va; 243 int64_t pa; 244 int flags; 245 { 246 int64_t tte; 247 248 #ifdef DIAGNOSTIC 249 if (va < is->is_dvmabase || va > is->is_dvmaend) 250 panic("iommu_enter: va %#lx not in DVMA space", va); 251 #endif 252 253 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE), 254 (flags&BUS_DMA_STREAMING)); 255 #ifdef DEBUG 256 tte |= (flags & 0xff000LL)<<(4*8); 257 #endif 258 259 /* Is the streamcache flush really needed? */ 260 if (is->is_sbvalid[0] || is->is_sbvalid[1]) { 261 iommu_strbuf_flush(is, va); 262 iommu_strbuf_flush_done(is); 263 } 264 DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n", 265 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va)); 266 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte; 267 bus_space_write_8(is->is_bustag, is->is_iommu, 268 IOMMUREG(iommu_flush), va); 269 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n", 270 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize), 271 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 272 (u_long)tte)); 273 } 274 275 276 /* 277 * Find the value of a DVMA address (debug routine). 278 */ 279 paddr_t 280 iommu_extract(is, dva) 281 struct iommu_state *is; 282 vaddr_t dva; 283 { 284 int64_t tte = 0; 285 286 if (dva >= is->is_dvmabase && dva < is->is_dvmaend) 287 tte = is->is_tsb[IOTSBSLOT(dva,is->is_tsbsize)]; 288 289 if ((tte&IOTTE_V) == 0) 290 return ((paddr_t)-1L); 291 return (tte&IOTTE_PAMASK); 292 } 293 294 /* 295 * iommu_remove: removes mappings created by iommu_enter 296 * 297 * Only demap from IOMMU if flag is set. 298 * 299 * XXX: this function needs better internal error checking. 300 */ 301 void 302 iommu_remove(is, va, len) 303 struct iommu_state *is; 304 vaddr_t va; 305 size_t len; 306 { 307 308 #ifdef DIAGNOSTIC 309 if (va < is->is_dvmabase || va > is->is_dvmaend) 310 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 311 if ((long)(va + len) < (long)va) 312 panic("iommu_remove: va 0x%lx + len 0x%lx wraps", 313 (long) va, (long) len); 314 if (len & ~0xfffffff) 315 panic("iommu_remove: rediculous len 0x%lx", (u_long)len); 316 #endif 317 318 va = trunc_page(va); 319 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n", 320 va, (u_long)IOTSBSLOT(va, is->is_tsbsize), 321 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)])); 322 while (len > 0) { 323 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d " 324 "for va %p size %lx\n", 325 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, 326 (u_long)len)); 327 if (is->is_sbvalid[0] || is->is_sbvalid[1]) { 328 DPRINTF(IDB_IOMMU, ("iommu_remove: flushing va %p " 329 "TSB[%lx]@%p=%lx, %lu bytes left\n", 330 (void *)(u_long)va, 331 (long)IOTSBSLOT(va,is->is_tsbsize), 332 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va, 333 is->is_tsbsize)], 334 (long)(is->is_tsb[IOTSBSLOT(va, 335 is->is_tsbsize)]), 336 (u_long)len)); 337 iommu_strbuf_flush(is, va); 338 if (len <= NBPG) 339 iommu_strbuf_flush_done(is); 340 DPRINTF(IDB_IOMMU, ("iommu_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n", 341 (void *)(u_long)va, (long)IOTSBSLOT(va,is->is_tsbsize), 342 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 343 (long)(is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]), 344 (u_long)len)); 345 } 346 347 if (len <= NBPG) 348 len = 0; 349 else 350 len -= NBPG; 351 352 /* XXX Zero-ing the entry would not require RMW */ 353 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V; 354 bus_space_write_8(is->is_bustag, is->is_iommu, 355 IOMMUREG(iommu_flush), va); 356 va += NBPG; 357 } 358 } 359 360 static int 361 iommu_strbuf_flush_done(is) 362 struct iommu_state *is; 363 { 364 struct timeval cur, flushtimeout; 365 366 #define BUMPTIME(t, usec) { \ 367 register volatile struct timeval *tp = (t); \ 368 register long us; \ 369 \ 370 tp->tv_usec = us = tp->tv_usec + (usec); \ 371 if (us >= 1000000) { \ 372 tp->tv_usec = us - 1000000; \ 373 tp->tv_sec++; \ 374 } \ 375 } 376 377 if (!is->is_sbvalid[0] && !is->is_sbvalid[1]) 378 return (0); 379 380 /* 381 * Streaming buffer flushes: 382 * 383 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If 384 * we're not on a cache line boundary (64-bits): 385 * 2 Store 0 in flag 386 * 3 Store pointer to flag in flushsync 387 * 4 wait till flushsync becomes 0x1 388 * 389 * If it takes more than .5 sec, something 390 * went wrong. 391 */ 392 393 is->is_flush[0] = 1; 394 is->is_flush[1] = 1; 395 if (is->is_sbvalid[0]) { 396 is->is_flush[0] = 0; 397 bus_space_write_8(is->is_bustag, is->is_sb[0], 398 STRBUFREG(strbuf_flushsync), is->is_flushpa); 399 } 400 if (is->is_sbvalid[1]) { 401 is->is_flush[0] = 1; 402 bus_space_write_8(is->is_bustag, is->is_sb[1], 403 STRBUFREG(strbuf_flushsync), is->is_flushpa + 8); 404 } 405 406 microtime(&flushtimeout); 407 cur = flushtimeout; 408 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */ 409 410 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx,%lx " 411 "at va = %lx pa = %lx now=%lx:%lx until = %lx:%lx\n", 412 (long)is->is_flush[0], (long)is->is_flush[1], 413 (long)&is->is_flush[0], (long)is->is_flushpa, 414 cur.tv_sec, cur.tv_usec, 415 flushtimeout.tv_sec, flushtimeout.tv_usec)); 416 417 /* Bypass non-coherent D$ */ 418 while ((!ldxa(is->is_flushpa, ASI_PHYS_CACHED) || 419 !ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED)) && 420 ((cur.tv_sec <= flushtimeout.tv_sec) && 421 (cur.tv_usec <= flushtimeout.tv_usec))) 422 microtime(&cur); 423 424 #ifdef DIAGNOSTIC 425 if (!ldxa(is->is_flushpa, ASI_PHYS_CACHED) || 426 !ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED)) { 427 printf("iommu_strbuf_flush_done: flush timeout %p,%p at %p\n", 428 (void *)(u_long)is->is_flush[0], 429 (void *)(u_long)is->is_flush[1], 430 (void *)(u_long)is->is_flushpa); /* panic? */ 431 #ifdef DDB 432 Debugger(); 433 #endif 434 } 435 #endif 436 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n")); 437 return (is->is_flush[0] && is->is_flush[1]); 438 } 439 440 /* 441 * IOMMU DVMA operations, common to SBUS and PCI. 442 */ 443 int 444 iommu_dvmamap_load(t, is, map, buf, buflen, p, flags) 445 bus_dma_tag_t t; 446 struct iommu_state *is; 447 bus_dmamap_t map; 448 void *buf; 449 bus_size_t buflen; 450 struct proc *p; 451 int flags; 452 { 453 int s; 454 int err; 455 bus_size_t sgsize; 456 paddr_t curaddr; 457 u_long dvmaddr, sgstart, sgend; 458 bus_size_t align, boundary; 459 vaddr_t vaddr = (vaddr_t)buf; 460 int seg; 461 pmap_t pmap; 462 463 if (map->dm_nsegs) { 464 /* Already in use?? */ 465 #ifdef DIAGNOSTIC 466 printf("iommu_dvmamap_load: map still in use\n"); 467 #endif 468 bus_dmamap_unload(t, map); 469 } 470 /* 471 * Make sure that on error condition we return "no valid mappings". 472 */ 473 map->dm_nsegs = 0; 474 475 if (buflen > map->_dm_size) { 476 DPRINTF(IDB_BUSDMA, 477 ("iommu_dvmamap_load(): error %d > %d -- " 478 "map size exceeded!\n", (int)buflen, (int)map->_dm_size)); 479 return (EINVAL); 480 } 481 482 sgsize = round_page(buflen + ((int)vaddr & PGOFSET)); 483 484 /* 485 * A boundary presented to bus_dmamem_alloc() takes precedence 486 * over boundary in the map. 487 */ 488 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0) 489 boundary = map->_dm_boundary; 490 align = max(map->dm_segs[0]._ds_align, NBPG); 491 s = splhigh(); 492 /* 493 * If our segment size is larger than the boundary we need to 494 * split the transfer up int little pieces ourselves. 495 */ 496 err = extent_alloc(is->is_dvmamap, sgsize, align, 497 (sgsize > boundary) ? 0 : boundary, 498 EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dvmaddr); 499 splx(s); 500 501 #ifdef DEBUG 502 if (err || (dvmaddr == (bus_addr_t)-1)) 503 { 504 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n", 505 (int)sgsize, flags); 506 #ifdef DDB 507 Debugger(); 508 #endif 509 } 510 #endif 511 if (err != 0) 512 return (err); 513 514 if (dvmaddr == (bus_addr_t)-1) 515 return (ENOMEM); 516 517 /* Set the active DVMA map */ 518 map->_dm_dvmastart = dvmaddr; 519 map->_dm_dvmasize = sgsize; 520 521 /* 522 * Now split the DVMA range into segments, not crossing 523 * the boundary. 524 */ 525 seg = 0; 526 sgstart = dvmaddr + (vaddr & PGOFSET); 527 sgend = sgstart + buflen - 1; 528 map->dm_segs[seg].ds_addr = sgstart; 529 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary-1 %lx " 530 "~(boundary-1) %lx\n", boundary, (boundary-1), ~(boundary-1))); 531 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) { 532 /* Oops. We crossed a boundary. Split the xfer. */ 533 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 534 "seg %d start %lx size %lx\n", seg, 535 (long)map->dm_segs[seg].ds_addr, 536 map->dm_segs[seg].ds_len)); 537 map->dm_segs[seg].ds_len = 538 boundary - (sgstart & (boundary - 1)); 539 if (++seg > map->_dm_segcnt) { 540 /* Too many segments. Fail the operation. */ 541 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 542 "too many segments %d\n", seg)); 543 s = splhigh(); 544 /* How can this fail? And if it does what can we do? */ 545 err = extent_free(is->is_dvmamap, 546 dvmaddr, sgsize, EX_NOWAIT); 547 map->_dm_dvmastart = 0; 548 map->_dm_dvmasize = 0; 549 splx(s); 550 return (E2BIG); 551 } 552 sgstart = roundup(sgstart, boundary); 553 map->dm_segs[seg].ds_addr = sgstart; 554 } 555 map->dm_segs[seg].ds_len = sgend - sgstart + 1; 556 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 557 "seg %d start %lx size %lx\n", seg, 558 (long)map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len)); 559 map->dm_nsegs = seg+1; 560 map->dm_mapsize = buflen; 561 562 if (p != NULL) 563 pmap = p->p_vmspace->vm_map.pmap; 564 else 565 pmap = pmap_kernel(); 566 567 for (; buflen > 0; ) { 568 /* 569 * Get the physical address for this page. 570 */ 571 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) { 572 bus_dmamap_unload(t, map); 573 return (-1); 574 } 575 576 /* 577 * Compute the segment size, and adjust counts. 578 */ 579 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 580 if (buflen < sgsize) 581 sgsize = buflen; 582 583 DPRINTF(IDB_BUSDMA, 584 ("iommu_dvmamap_load: map %p loading va %p " 585 "dva %lx at pa %lx\n", 586 map, (void *)vaddr, (long)dvmaddr, 587 (long)(curaddr&~(NBPG-1)))); 588 iommu_enter(is, trunc_page(dvmaddr), trunc_page(curaddr), 589 flags|0x4000); 590 591 dvmaddr += PAGE_SIZE; 592 vaddr += sgsize; 593 buflen -= sgsize; 594 } 595 #ifdef DIAGNOSTIC 596 for (seg = 0; seg < map->dm_nsegs; seg++) { 597 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 598 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 599 printf("seg %d dvmaddr %lx out of range %x - %x\n", 600 seg, (long)map->dm_segs[seg].ds_addr, 601 is->is_dvmabase, is->is_dvmaend); 602 Debugger(); 603 } 604 } 605 #endif 606 return (0); 607 } 608 609 610 void 611 iommu_dvmamap_unload(t, is, map) 612 bus_dma_tag_t t; 613 struct iommu_state *is; 614 bus_dmamap_t map; 615 { 616 int error, s; 617 bus_size_t sgsize; 618 619 /* Flush the iommu */ 620 #ifdef DEBUG 621 if (!map->_dm_dvmastart) { 622 printf("iommu_dvmamap_unload: No dvmastart is zero\n"); 623 #ifdef DDB 624 Debugger(); 625 #endif 626 } 627 #endif 628 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 629 630 /* Flush the caches */ 631 bus_dmamap_unload(t->_parent, map); 632 633 /* Mark the mappings as invalid. */ 634 map->dm_mapsize = 0; 635 map->dm_nsegs = 0; 636 637 s = splhigh(); 638 error = extent_free(is->is_dvmamap, map->_dm_dvmastart, 639 map->_dm_dvmasize, EX_NOWAIT); 640 map->_dm_dvmastart = 0; 641 map->_dm_dvmasize = 0; 642 splx(s); 643 if (error != 0) 644 printf("warning: %qd of DVMA space lost\n", (long long)sgsize); 645 646 /* Clear the map */ 647 } 648 649 650 int 651 iommu_dvmamap_load_raw(t, is, map, segs, nsegs, flags, size) 652 bus_dma_tag_t t; 653 struct iommu_state *is; 654 bus_dmamap_t map; 655 bus_dma_segment_t *segs; 656 int nsegs; 657 int flags; 658 bus_size_t size; 659 { 660 struct vm_page *m; 661 int i, j, s; 662 int left; 663 int err; 664 bus_size_t sgsize; 665 paddr_t pa; 666 bus_size_t boundary, align; 667 u_long dvmaddr, sgstart, sgend; 668 struct pglist *mlist; 669 int pagesz = PAGE_SIZE; 670 int npg = 0; /* DEBUG */ 671 672 if (map->dm_nsegs) { 673 /* Already in use?? */ 674 #ifdef DIAGNOSTIC 675 printf("iommu_dvmamap_load_raw: map still in use\n"); 676 #endif 677 bus_dmamap_unload(t, map); 678 } 679 680 /* 681 * A boundary presented to bus_dmamem_alloc() takes precedence 682 * over boundary in the map. 683 */ 684 if ((boundary = segs[0]._ds_boundary) == 0) 685 boundary = map->_dm_boundary; 686 687 align = max(segs[0]._ds_align, pagesz); 688 689 /* 690 * Make sure that on error condition we return "no valid mappings". 691 */ 692 map->dm_nsegs = 0; 693 /* Count up the total number of pages we need */ 694 pa = segs[0].ds_addr; 695 sgsize = 0; 696 left = size; 697 for (i=0; left && i<nsegs; i++) { 698 if (round_page(pa) != round_page(segs[i].ds_addr)) 699 sgsize = round_page(sgsize); 700 sgsize += min(left, segs[i].ds_len); 701 left -= segs[i].ds_len; 702 pa = segs[i].ds_addr + segs[i].ds_len; 703 } 704 sgsize = round_page(sgsize); 705 706 s = splhigh(); 707 /* 708 * If our segment size is larger than the boundary we need to 709 * split the transfer up into little pieces ourselves. 710 */ 711 err = extent_alloc(is->is_dvmamap, sgsize, align, 712 (sgsize > boundary) ? 0 : boundary, 713 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) | 714 EX_BOUNDZERO, (u_long *)&dvmaddr); 715 splx(s); 716 717 if (err != 0) 718 return (err); 719 720 #ifdef DEBUG 721 if (dvmaddr == (bus_addr_t)-1) 722 { 723 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n", 724 (int)sgsize, flags); 725 Debugger(); 726 } 727 #endif 728 if (dvmaddr == (bus_addr_t)-1) 729 return (ENOMEM); 730 731 /* Set the active DVMA map */ 732 map->_dm_dvmastart = dvmaddr; 733 map->_dm_dvmasize = sgsize; 734 735 if ((mlist = segs[0]._ds_mlist) == NULL) { 736 u_long prev_va = NULL; 737 paddr_t prev_pa = 0; 738 int end = 0, offset; 739 740 /* 741 * This segs is made up of individual physical 742 * segments, probably by _bus_dmamap_load_uio() or 743 * _bus_dmamap_load_mbuf(). Ignore the mlist and 744 * load each one individually. 745 */ 746 map->dm_mapsize = size; 747 748 j = 0; 749 for (i = 0; i < nsegs ; i++) { 750 751 pa = segs[i].ds_addr; 752 offset = (pa & PGOFSET); 753 pa = trunc_page(pa); 754 dvmaddr = trunc_page(dvmaddr); 755 left = min(size, segs[i].ds_len); 756 757 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting " 758 "physseg %d start %lx size %lx\n", i, 759 (long)segs[i].ds_addr, segs[i].ds_len)); 760 761 if ((pa == prev_pa) && 762 ((offset != 0) || (end != offset))) { 763 /* We can re-use this mapping */ 764 #ifdef DEBUG 765 if (iommudebug & 0x10) printf("reusing dva %lx prev %lx pa %lx prev %lx\n", 766 dvmaddr, prev_va, pa, prev_pa); 767 #endif 768 dvmaddr = prev_va; 769 } 770 771 sgstart = dvmaddr + offset; 772 sgend = sgstart + left - 1; 773 774 /* Are the segments virtually adjacent? */ 775 if ((j > 0) && (end == offset) && 776 ((offset == 0) || (pa == prev_pa))) { 777 /* Just append to the previous segment. */ 778 #ifdef DEBUG 779 if (iommudebug & 0x10) { 780 printf("appending: offset %x pa %lx prev %lx dva %lx prev %lx\n", 781 offset, pa, prev_pa, dvmaddr, prev_va); 782 } 783 #endif 784 785 map->dm_segs[--j].ds_len += left; 786 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 787 "appending seg %d start %lx size %lx\n", j, 788 (long)map->dm_segs[j].ds_addr, 789 map->dm_segs[j].ds_len)); 790 } else { 791 map->dm_segs[j].ds_addr = sgstart; 792 map->dm_segs[j].ds_len = left; 793 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 794 "seg %d start %lx size %lx\n", j, 795 (long)map->dm_segs[j].ds_addr, 796 map->dm_segs[j].ds_len)); 797 } 798 end = (offset + left) & PGOFSET; 799 800 /* Check for boundary issues */ 801 while ((sgstart & ~(boundary - 1)) != 802 (sgend & ~(boundary - 1))) { 803 /* Need a new segment. */ 804 map->dm_segs[j].ds_len = 805 sgstart & (boundary - 1); 806 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 807 "seg %d start %lx size %lx\n", j, 808 (long)map->dm_segs[j].ds_addr, 809 map->dm_segs[j].ds_len)); 810 if (++j > map->_dm_segcnt) { 811 iommu_dvmamap_unload(t, is, map); 812 return (E2BIG); 813 } 814 sgstart = roundup(sgstart, boundary); 815 map->dm_segs[j].ds_addr = sgstart; 816 map->dm_segs[j].ds_len = sgend - sgstart + 1; 817 } 818 819 if (sgsize == 0) 820 panic("iommu_dmamap_load_raw: size botch"); 821 822 /* Now map a series of pages. */ 823 while (dvmaddr <= sgend) { 824 DPRINTF(IDB_BUSDMA, 825 ("iommu_dvmamap_load_raw: map %p " 826 "loading va %lx at pa %lx\n", 827 map, (long)dvmaddr, 828 (long)(pa))); 829 /* Enter it if we haven't before. */ 830 if (prev_va != dvmaddr) 831 #ifdef DEBUG 832 { if (iommudebug & 0x10) printf("seg %d:%d entering dvma %lx, prev %lx pa %lx\n", i,j, dvmaddr, prev_va, pa); 833 #endif 834 iommu_enter(is, prev_va = dvmaddr, 835 prev_pa = pa, 836 flags|(++npg<<12)); 837 #ifdef DEBUG 838 } else if (iommudebug & 0x10) printf("seg %d:%d skipping dvma %lx, prev %lx\n", i,j, dvmaddr, prev_va); 839 #endif 840 841 dvmaddr += pagesz; 842 pa += pagesz; 843 } 844 845 size -= left; 846 ++j; 847 } 848 849 map->dm_nsegs = j; 850 #ifdef DIAGNOSTIC 851 { int seg; 852 for (seg = 0; seg < map->dm_nsegs; seg++) { 853 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 854 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 855 printf("seg %d dvmaddr %lx out of range %x - %x\n", 856 seg, (long)map->dm_segs[seg].ds_addr, 857 is->is_dvmabase, is->is_dvmaend); 858 Debugger(); 859 } 860 } 861 } 862 #endif 863 return (0); 864 } 865 /* 866 * This was allocated with bus_dmamem_alloc. 867 * The pages are on an `mlist'. 868 */ 869 map->dm_mapsize = size; 870 i = 0; 871 sgstart = dvmaddr; 872 sgend = sgstart + size - 1; 873 map->dm_segs[i].ds_addr = sgstart; 874 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) { 875 /* Oops. We crossed a boundary. Split the xfer. */ 876 map->dm_segs[i].ds_len = sgstart & (boundary - 1); 877 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 878 "seg %d start %lx size %lx\n", i, 879 (long)map->dm_segs[i].ds_addr, 880 map->dm_segs[i].ds_len)); 881 if (++i > map->_dm_segcnt) { 882 /* Too many segments. Fail the operation. */ 883 s = splhigh(); 884 /* How can this fail? And if it does what can we do? */ 885 err = extent_free(is->is_dvmamap, 886 dvmaddr, sgsize, EX_NOWAIT); 887 map->_dm_dvmastart = 0; 888 map->_dm_dvmasize = 0; 889 splx(s); 890 return (E2BIG); 891 } 892 sgstart = roundup(sgstart, boundary); 893 map->dm_segs[i].ds_addr = sgstart; 894 } 895 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 896 "seg %d start %lx size %lx\n", i, 897 (long)map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len)); 898 map->dm_segs[i].ds_len = sgend - sgstart + 1; 899 900 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 901 if (sgsize == 0) 902 panic("iommu_dmamap_load_raw: size botch"); 903 pa = VM_PAGE_TO_PHYS(m); 904 905 DPRINTF(IDB_BUSDMA, 906 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n", 907 map, (long)dvmaddr, (long)(pa))); 908 iommu_enter(is, dvmaddr, pa, flags|0x8000); 909 910 dvmaddr += pagesz; 911 sgsize -= pagesz; 912 } 913 map->dm_mapsize = size; 914 map->dm_nsegs = i+1; 915 #ifdef DIAGNOSTIC 916 { int seg; 917 for (seg = 0; seg < map->dm_nsegs; seg++) { 918 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 919 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 920 printf("seg %d dvmaddr %lx out of range %x - %x\n", 921 seg, (long)map->dm_segs[seg].ds_addr, 922 is->is_dvmabase, is->is_dvmaend); 923 Debugger(); 924 } 925 } 926 } 927 #endif 928 return (0); 929 } 930 931 void 932 iommu_dvmamap_sync(t, is, map, offset, len, ops) 933 bus_dma_tag_t t; 934 struct iommu_state *is; 935 bus_dmamap_t map; 936 bus_addr_t offset; 937 bus_size_t len; 938 int ops; 939 { 940 vaddr_t va = map->dm_segs[0].ds_addr + offset; 941 942 /* 943 * We only support one DMA segment; supporting more makes this code 944 * too unweildy. 945 */ 946 947 if (ops & BUS_DMASYNC_PREREAD) { 948 DPRINTF(IDB_SYNC, 949 ("iommu_dvmamap_sync: syncing va %p len %lu " 950 "BUS_DMASYNC_PREREAD\n", (void *)(u_long)va, (u_long)len)); 951 952 /* Nothing to do */; 953 } 954 if (ops & BUS_DMASYNC_POSTREAD) { 955 DPRINTF(IDB_SYNC, 956 ("iommu_dvmamap_sync: syncing va %p len %lu " 957 "BUS_DMASYNC_POSTREAD\n", (void *)(u_long)va, (u_long)len)); 958 /* if we have a streaming buffer, flush it here first */ 959 if (is->is_sbvalid[0] || is->is_sbvalid[1]) 960 while (len > 0) { 961 DPRINTF(IDB_BUSDMA, 962 ("iommu_dvmamap_sync: flushing va %p, " 963 "%lu bytes left\n", (void *)(u_long)va, 964 (u_long)len)); 965 iommu_strbuf_flush(is, va); 966 if (len <= NBPG) { 967 iommu_strbuf_flush_done(is); 968 len = 0; 969 } else 970 len -= NBPG; 971 va += NBPG; 972 } 973 } 974 if (ops & BUS_DMASYNC_PREWRITE) { 975 DPRINTF(IDB_SYNC, 976 ("iommu_dvmamap_sync: syncing va %p len %lu " 977 "BUS_DMASYNC_PREWRITE\n", (void *)(u_long)va, (u_long)len)); 978 /* if we have a streaming buffer, flush it here first */ 979 if (is->is_sbvalid[0] || is->is_sbvalid[1]) 980 while (len > 0) { 981 DPRINTF(IDB_BUSDMA, 982 ("iommu_dvmamap_sync: flushing va %p, %lu " 983 "bytes left\n", (void *)(u_long)va, 984 (u_long)len)); 985 iommu_strbuf_flush(is, va); 986 if (len <= NBPG) { 987 iommu_strbuf_flush_done(is); 988 len = 0; 989 } else 990 len -= NBPG; 991 va += NBPG; 992 } 993 } 994 if (ops & BUS_DMASYNC_POSTWRITE) { 995 DPRINTF(IDB_SYNC, 996 ("iommu_dvmamap_sync: syncing va %p len %lu " 997 "BUS_DMASYNC_POSTWRITE\n", (void *)(u_long)va, (u_long)len)); 998 /* Nothing to do */; 999 } 1000 } 1001 1002 int 1003 iommu_dvmamem_alloc(t, is, size, alignment, boundary, segs, nsegs, rsegs, flags) 1004 bus_dma_tag_t t; 1005 struct iommu_state *is; 1006 bus_size_t size, alignment, boundary; 1007 bus_dma_segment_t *segs; 1008 int nsegs; 1009 int *rsegs; 1010 int flags; 1011 { 1012 1013 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx " 1014 "segp %p flags %d\n", (unsigned long long)size, 1015 (unsigned long long)alignment, (unsigned long long)boundary, 1016 segs, flags)); 1017 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, 1018 segs, nsegs, rsegs, flags|BUS_DMA_DVMA)); 1019 } 1020 1021 void 1022 iommu_dvmamem_free(t, is, segs, nsegs) 1023 bus_dma_tag_t t; 1024 struct iommu_state *is; 1025 bus_dma_segment_t *segs; 1026 int nsegs; 1027 { 1028 1029 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n", 1030 segs, nsegs)); 1031 bus_dmamem_free(t->_parent, segs, nsegs); 1032 } 1033 1034 /* 1035 * Map the DVMA mappings into the kernel pmap. 1036 * Check the flags to see whether we're streaming or coherent. 1037 */ 1038 int 1039 iommu_dvmamem_map(t, is, segs, nsegs, size, kvap, flags) 1040 bus_dma_tag_t t; 1041 struct iommu_state *is; 1042 bus_dma_segment_t *segs; 1043 int nsegs; 1044 size_t size; 1045 caddr_t *kvap; 1046 int flags; 1047 { 1048 struct vm_page *m; 1049 vaddr_t va; 1050 bus_addr_t addr; 1051 struct pglist *mlist; 1052 int cbit; 1053 1054 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n", 1055 segs, nsegs, size)); 1056 1057 /* 1058 * Allocate some space in the kernel map, and then map these pages 1059 * into this space. 1060 */ 1061 size = round_page(size); 1062 va = uvm_km_valloc(kernel_map, size); 1063 if (va == 0) 1064 return (ENOMEM); 1065 1066 *kvap = (caddr_t)va; 1067 1068 /* 1069 * digest flags: 1070 */ 1071 cbit = 0; 1072 if (flags & BUS_DMA_COHERENT) /* Disable vcache */ 1073 cbit |= PMAP_NVC; 1074 if (flags & BUS_DMA_NOCACHE) /* sideffects */ 1075 cbit |= PMAP_NC; 1076 1077 /* 1078 * Now take this and map it into the CPU. 1079 */ 1080 mlist = segs[0]._ds_mlist; 1081 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) { 1082 #ifdef DIAGNOSTIC 1083 if (size == 0) 1084 panic("iommu_dvmamem_map: size botch"); 1085 #endif 1086 addr = VM_PAGE_TO_PHYS(m); 1087 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: " 1088 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit)); 1089 pmap_enter(pmap_kernel(), va, addr | cbit, 1090 VM_PROT_READ | VM_PROT_WRITE, 1091 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 1092 va += PAGE_SIZE; 1093 size -= PAGE_SIZE; 1094 } 1095 pmap_update(pmap_kernel()); 1096 1097 return (0); 1098 } 1099 1100 /* 1101 * Unmap DVMA mappings from kernel 1102 */ 1103 void 1104 iommu_dvmamem_unmap(t, is, kva, size) 1105 bus_dma_tag_t t; 1106 struct iommu_state *is; 1107 caddr_t kva; 1108 size_t size; 1109 { 1110 1111 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n", 1112 kva, size)); 1113 1114 #ifdef DIAGNOSTIC 1115 if ((u_long)kva & PGOFSET) 1116 panic("iommu_dvmamem_unmap"); 1117 #endif 1118 1119 size = round_page(size); 1120 pmap_remove(pmap_kernel(), (vaddr_t)kva, size); 1121 pmap_update(pmap_kernel()); 1122 #if 0 1123 /* 1124 * XXX ? is this necessary? i think so and i think other 1125 * implementations are missing it. 1126 */ 1127 uvm_km_free(kernel_map, (vaddr_t)kva, size); 1128 #endif 1129 } 1130