1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/conf.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/ddi_impldefs.h>
32 #include <sys/cmn_err.h>
33 #include <sys/kmem.h>
34 #include <sys/vmem.h>
35 #include <sys/sysmacros.h>
36
37 #include <sys/ddidmareq.h>
38 #include <sys/sysiosbus.h>
39 #include <sys/iommu.h>
40 #include <sys/iocache.h>
41 #include <sys/dvma.h>
42
43 #include <vm/as.h>
44 #include <vm/hat.h>
45 #include <vm/page.h>
46 #include <vm/hat_sfmmu.h>
47 #include <sys/machparam.h>
48 #include <sys/machsystm.h>
49 #include <sys/vmsystm.h>
50 #include <sys/iommutsb.h>
51
52 /* Useful debugging Stuff */
53 #include <sys/nexusdebug.h>
54 #include <sys/debug.h>
55 /* Bitfield debugging definitions for this file */
56 #define IOMMU_GETDVMAPAGES_DEBUG 0x1
57 #define IOMMU_DMAMAP_DEBUG 0x2
58 #define IOMMU_DMAMCTL_DEBUG 0x4
59 #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8
60 #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10
61 #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20
62 #define IOMMU_DMAMCTL_NEXTWIN_DEBUG 0x40
63 #define IOMMU_DMAMCTL_NEXTSEG_DEBUG 0x80
64 #define IOMMU_DMAMCTL_MOVWIN_DEBUG 0x100
65 #define IOMMU_DMAMCTL_REPWIN_DEBUG 0x200
66 #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400
67 #define IOMMU_DMAMCTL_COFF_DEBUG 0x800
68 #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000
69 #define IOMMU_REGISTERS_DEBUG 0x2000
70 #define IOMMU_DMA_SETUP_DEBUG 0x4000
71 #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000
72 #define IOMMU_DMA_BINDHDL_DEBUG 0x10000
73 #define IOMMU_DMA_WIN_DEBUG 0x20000
74 #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000
75 #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000
76 #define IOMMU_FASTDMA_RESERVE 0x100000
77 #define IOMMU_FASTDMA_LOAD 0x200000
78 #define IOMMU_INTER_INTRA_XFER 0x400000
79 #define IOMMU_TTE 0x800000
80 #define IOMMU_TLB 0x1000000
81 #define IOMMU_FASTDMA_SYNC 0x2000000
82
83 /* Turn on if you need to keep track of outstanding IOMMU usage */
84 /* #define IO_MEMUSAGE */
85 /* Turn on to debug IOMMU unmapping code */
86 /* #define IO_MEMDEBUG */
87
88 static struct dvma_ops iommu_dvma_ops = {
89 DVMAO_REV,
90 iommu_dvma_kaddr_load,
91 iommu_dvma_unload,
92 iommu_dvma_sync
93 };
94
95 extern void *sbusp; /* sbus soft state hook */
96
97 #define DVMA_MAX_CACHE 65536
98
99 /*
100 * This is the number of pages that a mapping request needs before we force
101 * the TLB flush code to use diagnostic registers. This value was determined
102 * through a series of test runs measuring dma mapping settup performance.
103 */
104 int tlb_flush_using_diag = 16;
105
106 int sysio_iommu_tsb_sizes[] = {
107 IOMMU_TSB_SIZE_8M,
108 IOMMU_TSB_SIZE_16M,
109 IOMMU_TSB_SIZE_32M,
110 IOMMU_TSB_SIZE_64M,
111 IOMMU_TSB_SIZE_128M,
112 IOMMU_TSB_SIZE_256M,
113 IOMMU_TSB_SIZE_512M,
114 IOMMU_TSB_SIZE_1G
115 };
116
117 static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
118
119 int
iommu_init(struct sbus_soft_state * softsp,caddr_t address)120 iommu_init(struct sbus_soft_state *softsp, caddr_t address)
121 {
122 int i;
123 char name[40];
124
125 #ifdef DEBUG
126 debug_info = 1;
127 #endif
128
129 /*
130 * Simply add each registers offset to the base address
131 * to calculate the already mapped virtual address of
132 * the device register...
133 *
134 * define a macro for the pointer arithmetic; all registers
135 * are 64 bits wide and are defined as uint64_t's.
136 */
137
138 #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o))
139
140 softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
141 softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
142 softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
143 softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
144 softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
145
146 #undef REG_ADDR
147
148 mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
149 mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
150
151 /* Set up the DVMA resource sizes */
152 if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
153 IOMMU_TSB_COOKIE_NONE) {
154 cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
155 ddi_driver_name(softsp->dip),
156 ddi_get_instance(softsp->dip));
157 return (DDI_FAILURE);
158 }
159 softsp->soft_tsb_base_addr =
160 iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
161 softsp->iommu_dvma_size =
162 iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
163 IOMMU_TSB_TO_RNG;
164 softsp->iommu_dvma_base = (ioaddr_t)
165 (0 - (ioaddr_t)softsp->iommu_dvma_size);
166
167 (void) snprintf(name, sizeof (name), "%s%d_dvma",
168 ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
169
170 /*
171 * Initialize the DVMA vmem arena.
172 */
173 softsp->dvma_arena = vmem_create(name,
174 (void *)(uintptr_t)softsp->iommu_dvma_base,
175 softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
176 DVMA_MAX_CACHE, VM_SLEEP);
177
178 /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
179 softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
180
181 #if defined(DEBUG) && defined(IO_MEMUSAGE)
182 mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
183 softsp->iomem = (struct io_mem_list *)0;
184 #endif /* DEBUG && IO_MEMUSAGE */
185 /*
186 * Get the base address of the TSB table and store it in the hardware
187 */
188
189 /*
190 * We plan on the PROM flushing all TLB entries. If this is not the
191 * case, this is where we should flush the hardware TLB.
192 */
193
194 /* Set the IOMMU registers */
195 (void) iommu_resume_init(softsp);
196
197 /* check the convenient copy of TSB base, and flush write buffers */
198 if (*softsp->tsb_base_addr !=
199 va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
200 iommu_tsb_free(softsp->iommu_tsb_cookie);
201 return (DDI_FAILURE);
202 }
203
204 softsp->sbus_io_lo_pfn = UINT32_MAX;
205 softsp->sbus_io_hi_pfn = 0;
206 for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
207 struct rangespec *rangep;
208 uint64_t addr;
209 pfn_t hipfn, lopfn;
210
211 rangep = sysio_pd_getrng(softsp->dip, i);
212 addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
213 addr |= (uint64_t)rangep->rng_offset;
214 lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
215 addr += (uint64_t)(rangep->rng_size - 1);
216 hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
217
218 softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
219 lopfn : softsp->sbus_io_lo_pfn;
220
221 softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
222 hipfn : softsp->sbus_io_hi_pfn;
223 }
224
225 DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
226 "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
227 (void *)softsp->iommu_ctrl_reg, (void *)softsp->tsb_base_addr,
228 (void *)softsp->iommu_flush_reg,
229 (void *)softsp->soft_tsb_base_addr));
230
231 return (DDI_SUCCESS);
232 }
233
234 /*
235 * function to uninitialize the iommu and release the tsb back to
236 * the spare pool. See startup.c for tsb spare management.
237 */
238
239 int
iommu_uninit(struct sbus_soft_state * softsp)240 iommu_uninit(struct sbus_soft_state *softsp)
241 {
242 vmem_destroy(softsp->dvma_arena);
243
244 /* flip off the IOMMU enable switch */
245 *softsp->iommu_ctrl_reg &=
246 (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
247
248 iommu_tsb_free(softsp->iommu_tsb_cookie);
249
250 return (DDI_SUCCESS);
251 }
252
253 /*
254 * Initialize iommu hardware registers when the system is being resumed.
255 * (Subset of iommu_init())
256 */
257 int
iommu_resume_init(struct sbus_soft_state * softsp)258 iommu_resume_init(struct sbus_soft_state *softsp)
259 {
260 int i;
261 uint_t tsb_size;
262 uint_t tsb_bytes;
263
264 /*
265 * Reset the base address of the TSB table in the hardware
266 */
267 *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
268
269 /*
270 * Figure out the correct size of the IOMMU TSB entries. If we
271 * end up with a size smaller than that needed for 8M of IOMMU
272 * space, default the size to 8M. XXX We could probably panic here
273 */
274 i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
275 - 1;
276
277 tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
278
279 while (i > 0) {
280 if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
281 break;
282 i--;
283 }
284
285 tsb_size = i;
286
287 /* OK, lets flip the "on" switch of the IOMMU */
288 *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
289 | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
290
291 return (DDI_SUCCESS);
292 }
293
294 void
iommu_tlb_flush(struct sbus_soft_state * softsp,ioaddr_t addr,pgcnt_t npages)295 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
296 {
297 volatile uint64_t tmpreg;
298 volatile uint64_t *vaddr_reg, *valid_bit_reg;
299 ioaddr_t hiaddr, ioaddr;
300 int i, do_flush = 0;
301
302 if (npages == 1) {
303 *softsp->iommu_flush_reg = (uint64_t)addr;
304 tmpreg = *softsp->sbus_ctrl_reg;
305 return;
306 }
307
308 hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
309 for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
310 valid_bit_reg = softsp->iommu_tlb_data;
311 i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
312 tmpreg = *vaddr_reg;
313 ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
314 IOMMU_TLBTAG_VA_SHIFT);
315
316 DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, "
317 "TLB vaddr reg %lx, IO addr 0x%x "
318 "Base addr 0x%x, Hi addr 0x%x\n",
319 (void *)vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
320
321 if (ioaddr >= addr && ioaddr <= hiaddr) {
322 tmpreg = *valid_bit_reg;
323
324 DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, "
325 "TLB valid reg %lx\n",
326 (void *)valid_bit_reg, tmpreg));
327
328 if (tmpreg & IOMMU_TLB_VALID) {
329 *softsp->iommu_flush_reg = (uint64_t)ioaddr;
330 do_flush = 1;
331 }
332 }
333 }
334
335 if (do_flush)
336 tmpreg = *softsp->sbus_ctrl_reg;
337 }
338
339
340 /*
341 * Shorthand defines
342 */
343
344 #define ALO dma_lim->dlim_addr_lo
345 #define AHI dma_lim->dlim_addr_hi
346 #define OBJSIZE dmareq->dmar_object.dmao_size
347 #define IOTTE_NDX(vaddr, base) (base + \
348 (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
349 softsp->iommu_dvma_base)))
350 /*
351 * If DDI_DMA_PARTIAL flag is set and the request is for
352 * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
353 * we turn off the DDI_DMA_PARTIAL flag
354 */
355 #define MIN_DVMA_WIN_SIZE (128)
356
357 /* ARGSUSED */
358 void
iommu_remove_mappings(ddi_dma_impl_t * mp)359 iommu_remove_mappings(ddi_dma_impl_t *mp)
360 {
361 #if defined(DEBUG) && defined(IO_MEMDEBUG)
362 pgcnt_t npages;
363 ioaddr_t ioaddr;
364 volatile uint64_t *iotte_ptr;
365 ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
366 pgcnt_t npages = mp->dmai_ndvmapages;
367 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
368 struct sbus_soft_state *softsp = mppriv->softsp;
369
370 #if defined(IO_MEMUSAGE)
371 struct io_mem_list **prevp, *walk;
372 #endif /* DEBUG && IO_MEMUSAGE */
373
374 ASSERT(softsp != NULL);
375 /*
376 * Run thru the mapped entries and free 'em
377 */
378
379 ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
380 npages = mp->dmai_ndvmapages;
381
382 #if defined(IO_MEMUSAGE)
383 mutex_enter(&softsp->iomemlock);
384 prevp = &softsp->iomem;
385 walk = softsp->iomem;
386
387 while (walk) {
388 if (walk->ioaddr == ioaddr) {
389 *prevp = walk->next;
390 break;
391 }
392
393 prevp = &walk->next;
394 walk = walk->next;
395 }
396 mutex_exit(&softsp->iomemlock);
397
398 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
399 kmem_free(walk, sizeof (struct io_mem_list));
400 #endif /* IO_MEMUSAGE */
401
402 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
403
404 while (npages) {
405 DPRINTF(IOMMU_DMAMCTL_DEBUG,
406 ("dma_mctl: freeing ioaddr %x iotte %p\n",
407 ioaddr, iotte_ptr));
408 *iotte_ptr = (uint64_t)0; /* unload tte */
409 iommu_tlb_flush(softsp, ioaddr, 1);
410 npages--;
411 ioaddr += IOMMU_PAGESIZE;
412 iotte_ptr++;
413 }
414 #endif /* DEBUG && IO_MEMDEBUG */
415 }
416
417
418 int
iommu_create_vaddr_mappings(ddi_dma_impl_t * mp,uintptr_t addr)419 iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
420 {
421 pfn_t pfn;
422 struct as *as = NULL;
423 pgcnt_t npages;
424 ioaddr_t ioaddr;
425 uint_t offset;
426 volatile uint64_t *iotte_ptr;
427 uint64_t tmp_iotte_flag;
428 int rval = DDI_DMA_MAPPED;
429 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
430 struct sbus_soft_state *softsp = mppriv->softsp;
431 int diag_tlb_flush;
432 #if defined(DEBUG) && defined(IO_MEMUSAGE)
433 struct io_mem_list *iomemp;
434 pfn_t *pfnp;
435 #endif /* DEBUG && IO_MEMUSAGE */
436
437 ASSERT(softsp != NULL);
438
439 /* Set Valid and Cache for mem xfer */
440 tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
441
442 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
443 npages = iommu_btopr(mp->dmai_size + offset);
444 ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
445 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
446 diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
447
448 as = mp->dmai_object.dmao_obj.virt_obj.v_as;
449 if (as == NULL)
450 as = &kas;
451
452 /*
453 * Set the per object bits of the TTE here. We optimize this for
454 * the memory case so that the while loop overhead is minimal.
455 */
456 /* Turn on NOSYNC if we need consistent mem */
457 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
458 mp->dmai_rflags |= DMP_NOSYNC;
459 tmp_iotte_flag ^= IOTTE_STREAM;
460 /* Set streaming mode if not consistent mem */
461 } else if (softsp->stream_buf_off) {
462 tmp_iotte_flag ^= IOTTE_STREAM;
463 }
464
465 #if defined(DEBUG) && defined(IO_MEMUSAGE)
466 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
467 iomemp->rdip = mp->dmai_rdip;
468 iomemp->ioaddr = ioaddr;
469 iomemp->addr = addr;
470 iomemp->npages = npages;
471 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
472 KM_SLEEP);
473 #endif /* DEBUG && IO_MEMUSAGE */
474 /*
475 * Grab the mappings from the dmmu and stick 'em into the
476 * iommu.
477 */
478 ASSERT(npages != 0);
479
480 /* If we're going to flush the TLB using diag mode, do it now. */
481 if (diag_tlb_flush)
482 iommu_tlb_flush(softsp, ioaddr, npages);
483
484 do {
485 uint64_t iotte_flag = tmp_iotte_flag;
486
487 /*
488 * Fetch the pfn for the DMA object
489 */
490
491 ASSERT(as);
492 pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
493 ASSERT(pfn != PFN_INVALID);
494
495 if (!pf_is_memory(pfn)) {
496 /* DVMA'ing to IO space */
497
498 /* Turn off cache bit if set */
499 if (iotte_flag & IOTTE_CACHE)
500 iotte_flag ^= IOTTE_CACHE;
501
502 /* Turn off stream bit if set */
503 if (iotte_flag & IOTTE_STREAM)
504 iotte_flag ^= IOTTE_STREAM;
505
506 if (IS_INTRA_SBUS(softsp, pfn)) {
507 /* Intra sbus transfer */
508
509 /* Turn on intra flag */
510 iotte_flag |= IOTTE_INTRA;
511
512 DPRINTF(IOMMU_INTER_INTRA_XFER, (
513 "Intra xfer pfnum %lx TTE %lx\n",
514 pfn, iotte_flag));
515 } else {
516 if (pf_is_dmacapable(pfn) == 1) {
517 /*EMPTY*/
518 DPRINTF(IOMMU_INTER_INTRA_XFER,
519 ("Inter xfer pfnum %lx "
520 "tte hi %lx\n",
521 pfn, iotte_flag));
522 } else {
523 rval = DDI_DMA_NOMAPPING;
524 #if defined(DEBUG) && defined(IO_MEMDEBUG)
525 goto bad;
526 #endif /* DEBUG && IO_MEMDEBUG */
527 }
528 }
529 }
530 addr += IOMMU_PAGESIZE;
531
532 DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx "
533 "tte flag %lx addr %lx ioaddr %x\n",
534 (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
535
536 /* Flush the IOMMU TLB before loading a new mapping */
537 if (!diag_tlb_flush)
538 iommu_tlb_flush(softsp, ioaddr, 1);
539
540 /* Set the hardware IO TTE */
541 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
542
543 ioaddr += IOMMU_PAGESIZE;
544 npages--;
545 iotte_ptr++;
546 #if defined(DEBUG) && defined(IO_MEMUSAGE)
547 *pfnp = pfn;
548 pfnp++;
549 #endif /* DEBUG && IO_MEMUSAGE */
550 } while (npages != 0);
551
552 #if defined(DEBUG) && defined(IO_MEMUSAGE)
553 mutex_enter(&softsp->iomemlock);
554 iomemp->next = softsp->iomem;
555 softsp->iomem = iomemp;
556 mutex_exit(&softsp->iomemlock);
557 #endif /* DEBUG && IO_MEMUSAGE */
558
559 return (rval);
560
561 #if defined(DEBUG) && defined(IO_MEMDEBUG)
562 bad:
563 /* If we fail a mapping, free up any mapping resources used */
564 iommu_remove_mappings(mp);
565 return (rval);
566 #endif /* DEBUG && IO_MEMDEBUG */
567 }
568
569
570 int
iommu_create_pp_mappings(ddi_dma_impl_t * mp,page_t * pp,page_t ** pplist)571 iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
572 {
573 pfn_t pfn;
574 pgcnt_t npages;
575 ioaddr_t ioaddr;
576 uint_t offset;
577 volatile uint64_t *iotte_ptr;
578 uint64_t tmp_iotte_flag;
579 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
580 struct sbus_soft_state *softsp = mppriv->softsp;
581 int diag_tlb_flush;
582 #if defined(DEBUG) && defined(IO_MEMUSAGE)
583 struct io_mem_list *iomemp;
584 pfn_t *pfnp;
585 #endif /* DEBUG && IO_MEMUSAGE */
586 int rval = DDI_DMA_MAPPED;
587
588 /* Set Valid and Cache for mem xfer */
589 tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
590
591 ASSERT(softsp != NULL);
592
593 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
594 npages = iommu_btopr(mp->dmai_size + offset);
595 ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
596 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
597 diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
598
599 /*
600 * Set the per object bits of the TTE here. We optimize this for
601 * the memory case so that the while loop overhead is minimal.
602 */
603 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
604 /* Turn on NOSYNC if we need consistent mem */
605 mp->dmai_rflags |= DMP_NOSYNC;
606 tmp_iotte_flag ^= IOTTE_STREAM;
607 } else if (softsp->stream_buf_off) {
608 /* Set streaming mode if not consistent mem */
609 tmp_iotte_flag ^= IOTTE_STREAM;
610 }
611
612 #if defined(DEBUG) && defined(IO_MEMUSAGE)
613 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
614 iomemp->rdip = mp->dmai_rdip;
615 iomemp->ioaddr = ioaddr;
616 iomemp->npages = npages;
617 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
618 KM_SLEEP);
619 #endif /* DEBUG && IO_MEMUSAGE */
620 /*
621 * Grab the mappings from the dmmu and stick 'em into the
622 * iommu.
623 */
624 ASSERT(npages != 0);
625
626 /* If we're going to flush the TLB using diag mode, do it now. */
627 if (diag_tlb_flush)
628 iommu_tlb_flush(softsp, ioaddr, npages);
629
630 do {
631 uint64_t iotte_flag;
632
633 iotte_flag = tmp_iotte_flag;
634
635 if (pp != NULL) {
636 pfn = pp->p_pagenum;
637 pp = pp->p_next;
638 } else {
639 pfn = (*pplist)->p_pagenum;
640 pplist++;
641 }
642
643 DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx "
644 "tte flag %lx ioaddr %x\n", (void *)iotte_ptr,
645 pfn, iotte_flag, ioaddr));
646
647 /* Flush the IOMMU TLB before loading a new mapping */
648 if (!diag_tlb_flush)
649 iommu_tlb_flush(softsp, ioaddr, 1);
650
651 /* Set the hardware IO TTE */
652 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
653
654 ioaddr += IOMMU_PAGESIZE;
655 npages--;
656 iotte_ptr++;
657
658 #if defined(DEBUG) && defined(IO_MEMUSAGE)
659 *pfnp = pfn;
660 pfnp++;
661 #endif /* DEBUG && IO_MEMUSAGE */
662
663 } while (npages != 0);
664
665 #if defined(DEBUG) && defined(IO_MEMUSAGE)
666 mutex_enter(&softsp->iomemlock);
667 iomemp->next = softsp->iomem;
668 softsp->iomem = iomemp;
669 mutex_exit(&softsp->iomemlock);
670 #endif /* DEBUG && IO_MEMUSAGE */
671
672 return (rval);
673 }
674
675
676 int
iommu_dma_lim_setup(dev_info_t * dip,dev_info_t * rdip,struct sbus_soft_state * softsp,uint_t * burstsizep,uint_t burstsize64,uint_t * minxferp,uint_t dma_flags)677 iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
678 struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
679 uint_t *minxferp, uint_t dma_flags)
680 {
681 struct regspec *rp;
682
683 /* Take care of 64 bit limits. */
684 if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
685 /*
686 * return burst size for 32-bit mode
687 */
688 *burstsizep &= softsp->sbus_burst_sizes;
689 return (DDI_FAILURE);
690 }
691
692 /*
693 * check if SBus supports 64 bit and if caller
694 * is child of SBus. No support through bridges
695 */
696 if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
697 /*
698 * SBus doesn't support it or bridge. Do 32-bit
699 * xfers
700 */
701 *burstsizep &= softsp->sbus_burst_sizes;
702 return (DDI_FAILURE);
703 }
704
705 rp = ddi_rnumber_to_regspec(rdip, 0);
706 if (rp == NULL) {
707 *burstsizep &= softsp->sbus_burst_sizes;
708 return (DDI_FAILURE);
709 }
710
711 /* Check for old-style 64 bit burstsizes */
712 if (burstsize64 & SYSIO64_BURST_MASK) {
713 /* Scale back burstsizes if Necessary */
714 *burstsizep &= (softsp->sbus64_burst_sizes |
715 softsp->sbus_burst_sizes);
716 } else {
717 /* Get the 64 bit burstsizes. */
718 *burstsizep = burstsize64;
719
720 /* Scale back burstsizes if Necessary */
721 *burstsizep &= (softsp->sbus64_burst_sizes >>
722 SYSIO64_BURST_SHIFT);
723 }
724
725 /*
726 * Set the largest value of the smallest burstsize that the
727 * device or the bus can manage.
728 */
729 *minxferp = MAX(*minxferp,
730 (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
731
732 return (DDI_SUCCESS);
733 }
734
735
736 int
iommu_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * dma_attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)737 iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
738 ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
739 ddi_dma_handle_t *handlep)
740 {
741 ioaddr_t addrlow, addrhigh, segalign;
742 ddi_dma_impl_t *mp;
743 struct dma_impl_priv *mppriv;
744 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
745 ddi_get_soft_state(sbusp, ddi_get_instance(dip));
746
747 /*
748 * Setup dma burstsizes and min-xfer counts.
749 */
750 (void) iommu_dma_lim_setup(dip, rdip, softsp,
751 &dma_attr->dma_attr_burstsizes,
752 dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
753 dma_attr->dma_attr_flags);
754
755 if (dma_attr->dma_attr_burstsizes == 0)
756 return (DDI_DMA_BADATTR);
757
758 addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
759 addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
760 segalign = (ioaddr_t)dma_attr->dma_attr_seg;
761
762 /*
763 * Check sanity for hi and lo address limits
764 */
765 if ((addrhigh <= addrlow) ||
766 (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
767 return (DDI_DMA_BADATTR);
768 }
769 if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
770 return (DDI_DMA_BADATTR);
771
772 mppriv = kmem_zalloc(sizeof (*mppriv),
773 (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
774
775 if (mppriv == NULL) {
776 if (waitfp != DDI_DMA_DONTWAIT) {
777 ddi_set_callback(waitfp, arg,
778 &softsp->dvma_call_list_id);
779 }
780 return (DDI_DMA_NORESOURCES);
781 }
782 mp = (ddi_dma_impl_t *)mppriv;
783
784 DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p "
785 "hi %x lo %x min %x burst %x\n",
786 ddi_get_name(dip), (void *)mp, addrhigh, addrlow,
787 dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
788
789 mp->dmai_rdip = rdip;
790 mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
791 mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
792 mp->dmai_attr = *dma_attr;
793 /* See if the DMA engine has any limit restrictions. */
794 if (segalign == (ioaddr_t)UINT32_MAX &&
795 addrhigh == (ioaddr_t)UINT32_MAX &&
796 (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
797 mp->dmai_rflags |= DMP_NOLIMIT;
798 }
799 mppriv->softsp = softsp;
800 mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
801
802 *handlep = (ddi_dma_handle_t)mp;
803 return (DDI_SUCCESS);
804 }
805
806 /*ARGSUSED*/
807 int
iommu_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)808 iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
809 {
810 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
811 struct sbus_soft_state *softsp = mppriv->softsp;
812 ASSERT(softsp != NULL);
813
814 kmem_free(mppriv, sizeof (*mppriv));
815
816 if (softsp->dvma_call_list_id != 0) {
817 ddi_run_callback(&softsp->dvma_call_list_id);
818 }
819 return (DDI_SUCCESS);
820 }
821
822 static int
check_dma_attr(struct ddi_dma_req * dmareq,ddi_dma_attr_t * dma_attr,uint32_t * size)823 check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
824 uint32_t *size)
825 {
826 ioaddr_t addrlow;
827 ioaddr_t addrhigh;
828 uint32_t segalign;
829 uint32_t smask;
830
831 smask = *size - 1;
832 segalign = dma_attr->dma_attr_seg;
833 if (smask > segalign) {
834 if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
835 return (DDI_DMA_TOOBIG);
836 *size = segalign + 1;
837 }
838 addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
839 addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
840 if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
841 if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
842 (addrhigh == (ioaddr_t)-1))) {
843 if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
844 return (DDI_DMA_TOOBIG);
845 *size = MIN(addrhigh - addrlow + 1, *size);
846 }
847 }
848 return (DDI_DMA_MAPOK);
849 }
850
851 int
iommu_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)852 iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
853 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
854 ddi_dma_cookie_t *cp, uint_t *ccountp)
855 {
856 page_t *pp;
857 uint32_t size;
858 ioaddr_t ioaddr;
859 uint_t offset;
860 uintptr_t addr = 0;
861 pgcnt_t npages;
862 int rval;
863 ddi_dma_attr_t *dma_attr;
864 struct sbus_soft_state *softsp;
865 struct page **pplist = NULL;
866 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
867 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
868
869 #ifdef lint
870 dip = dip;
871 rdip = rdip;
872 #endif
873
874 if (mp->dmai_inuse)
875 return (DDI_DMA_INUSE);
876
877 dma_attr = &mp->dmai_attr;
878 size = (uint32_t)dmareq->dmar_object.dmao_size;
879 if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
880 rval = check_dma_attr(dmareq, dma_attr, &size);
881 if (rval != DDI_DMA_MAPOK)
882 return (rval);
883 }
884 mp->dmai_inuse = 1;
885 mp->dmai_offset = 0;
886 mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
887 (mp->dmai_rflags & DMP_NOLIMIT);
888
889 switch (dmareq->dmar_object.dmao_type) {
890 case DMA_OTYP_VADDR:
891 case DMA_OTYP_BUFVADDR:
892 addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
893 offset = addr & IOMMU_PAGEOFFSET;
894 pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
895 npages = iommu_btopr(OBJSIZE + offset);
896
897 DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages "
898 "req addr %lx off %x OBJSIZE %x\n",
899 npages, addr, offset, OBJSIZE));
900
901 /* We don't need the addr anymore if we have a shadow list */
902 if (pplist != NULL)
903 addr = NULL;
904 pp = NULL;
905 break;
906
907 case DMA_OTYP_PAGES:
908 pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
909 offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
910 npages = iommu_btopr(OBJSIZE + offset);
911 break;
912
913 case DMA_OTYP_PADDR:
914 default:
915 /*
916 * Not a supported type for this implementation
917 */
918 rval = DDI_DMA_NOMAPPING;
919 goto bad;
920 }
921
922 /* Get our soft state once we know we're mapping an object. */
923 softsp = mppriv->softsp;
924 ASSERT(softsp != NULL);
925
926 if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
927 if (size != OBJSIZE) {
928 /*
929 * If the request is for partial mapping arrangement,
930 * the device has to be able to address at least the
931 * size of the window we are establishing.
932 */
933 if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
934 rval = DDI_DMA_NOMAPPING;
935 goto bad;
936 }
937 npages = iommu_btopr(size + offset);
938 }
939 /*
940 * If the size requested is less than a moderate amt,
941 * skip the partial mapping stuff- it's not worth the
942 * effort.
943 */
944 if (npages > MIN_DVMA_WIN_SIZE) {
945 npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
946 size = iommu_ptob(MIN_DVMA_WIN_SIZE);
947 DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
948 "%lx sz %x\n", OBJSIZE, npages, size));
949 if (pplist != NULL) {
950 mp->dmai_minfo = (void *)pplist;
951 mp->dmai_rflags |= DMP_SHADOW;
952 }
953 } else {
954 mp->dmai_rflags ^= DDI_DMA_PARTIAL;
955 }
956 } else {
957 if (npages >= iommu_btop(softsp->iommu_dvma_size) -
958 MIN_DVMA_WIN_SIZE) {
959 rval = DDI_DMA_TOOBIG;
960 goto bad;
961 }
962 }
963
964 /*
965 * save dmareq-object, size and npages into mp
966 */
967 mp->dmai_object = dmareq->dmar_object;
968 mp->dmai_size = size;
969 mp->dmai_ndvmapages = npages;
970
971 if (mp->dmai_rflags & DMP_NOLIMIT) {
972 ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena,
973 iommu_ptob(npages),
974 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
975 if (ioaddr == 0) {
976 rval = DDI_DMA_NORESOURCES;
977 goto bad;
978 }
979
980 /*
981 * If we have a 1 page request and we're working with a page
982 * list, we're going to speed load an IOMMU entry.
983 */
984 if (npages == 1 && !addr) {
985 uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
986 IOTTE_WRITE | IOTTE_STREAM;
987 volatile uint64_t *iotte_ptr;
988 pfn_t pfn;
989 #if defined(DEBUG) && defined(IO_MEMUSAGE)
990 struct io_mem_list *iomemp;
991 pfn_t *pfnp;
992 #endif /* DEBUG && IO_MEMUSAGE */
993
994 iotte_ptr = IOTTE_NDX(ioaddr,
995 softsp->soft_tsb_base_addr);
996
997 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
998 mp->dmai_rflags |= DMP_NOSYNC;
999 iotte_flag ^= IOTTE_STREAM;
1000 } else if (softsp->stream_buf_off)
1001 iotte_flag ^= IOTTE_STREAM;
1002
1003 mp->dmai_rflags ^= DDI_DMA_PARTIAL;
1004
1005 if (pp != NULL)
1006 pfn = pp->p_pagenum;
1007 else
1008 pfn = (*pplist)->p_pagenum;
1009
1010 iommu_tlb_flush(softsp, ioaddr, 1);
1011
1012 *iotte_ptr =
1013 ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1014
1015 mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
1016 mp->dmai_nwin = 0;
1017 if (cp != NULL) {
1018 cp->dmac_notused = 0;
1019 cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1020 cp->dmac_size = mp->dmai_size;
1021 cp->dmac_type = 0;
1022 *ccountp = 1;
1023 }
1024
1025 DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p "
1026 "pfn %lx tte flag %lx addr %lx ioaddr %x\n",
1027 (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
1028
1029 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1030 iomemp = kmem_alloc(sizeof (struct io_mem_list),
1031 KM_SLEEP);
1032 iomemp->rdip = mp->dmai_rdip;
1033 iomemp->ioaddr = ioaddr;
1034 iomemp->addr = addr;
1035 iomemp->npages = npages;
1036 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
1037 (npages + 1), KM_SLEEP);
1038 *pfnp = pfn;
1039 mutex_enter(&softsp->iomemlock);
1040 iomemp->next = softsp->iomem;
1041 softsp->iomem = iomemp;
1042 mutex_exit(&softsp->iomemlock);
1043 #endif /* DEBUG && IO_MEMUSAGE */
1044
1045 return (DDI_DMA_MAPPED);
1046 }
1047 } else {
1048 ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1049 iommu_ptob(npages),
1050 MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
1051 (uint_t)dma_attr->dma_attr_seg + 1,
1052 (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo,
1053 (void *)(uintptr_t)
1054 ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
1055 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1056 }
1057
1058 if (ioaddr == 0) {
1059 if (dmareq->dmar_fp == DDI_DMA_SLEEP)
1060 rval = DDI_DMA_NOMAPPING;
1061 else
1062 rval = DDI_DMA_NORESOURCES;
1063 goto bad;
1064 }
1065
1066 mp->dmai_mapping = ioaddr + offset;
1067 ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
1068
1069 /*
1070 * At this point we have a range of virtual address allocated
1071 * with which we now have to map to the requested object.
1072 */
1073 if (addr) {
1074 rval = iommu_create_vaddr_mappings(mp,
1075 addr & ~IOMMU_PAGEOFFSET);
1076 if (rval == DDI_DMA_NOMAPPING)
1077 goto bad_nomap;
1078 } else {
1079 rval = iommu_create_pp_mappings(mp, pp, pplist);
1080 if (rval == DDI_DMA_NOMAPPING)
1081 goto bad_nomap;
1082 }
1083
1084 if (cp) {
1085 cp->dmac_notused = 0;
1086 cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1087 cp->dmac_size = mp->dmai_size;
1088 cp->dmac_type = 0;
1089 *ccountp = 1;
1090 }
1091 if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1092 size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1093 mp->dmai_nwin =
1094 (dmareq->dmar_object.dmao_size + (size - 1)) / size;
1095 return (DDI_DMA_PARTIAL_MAP);
1096 } else {
1097 mp->dmai_nwin = 0;
1098 return (DDI_DMA_MAPPED);
1099 }
1100
1101 bad_nomap:
1102 /*
1103 * Could not create mmu mappings.
1104 */
1105 if (mp->dmai_rflags & DMP_NOLIMIT) {
1106 vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1107 iommu_ptob(npages));
1108 } else {
1109 vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1110 iommu_ptob(npages));
1111 }
1112
1113 bad:
1114 if (rval == DDI_DMA_NORESOURCES &&
1115 dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1116 ddi_set_callback(dmareq->dmar_fp,
1117 dmareq->dmar_arg, &softsp->dvma_call_list_id);
1118 }
1119 mp->dmai_inuse = 0;
1120 return (rval);
1121 }
1122
1123 /* ARGSUSED */
1124 int
iommu_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)1125 iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1126 ddi_dma_handle_t handle)
1127 {
1128 ioaddr_t addr;
1129 uint_t npages;
1130 size_t size;
1131 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1132 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1133 struct sbus_soft_state *softsp = mppriv->softsp;
1134 ASSERT(softsp != NULL);
1135
1136 addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1137 npages = mp->dmai_ndvmapages;
1138 size = iommu_ptob(npages);
1139
1140 DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
1141 "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
1142
1143 /* sync the entire object */
1144 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1145 /* flush stream write buffers */
1146 sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
1147 mppriv->phys_sync_flag);
1148 }
1149
1150 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1151 /*
1152 * 'Free' the dma mappings.
1153 */
1154 iommu_remove_mappings(mp);
1155 #endif /* DEBUG && IO_MEMDEBUG */
1156
1157 ASSERT(npages > (uint_t)0);
1158 if (mp->dmai_rflags & DMP_NOLIMIT)
1159 vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1160 else
1161 vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1162
1163 mp->dmai_ndvmapages = 0;
1164 mp->dmai_inuse = 0;
1165 mp->dmai_minfo = NULL;
1166
1167 if (softsp->dvma_call_list_id != 0)
1168 ddi_run_callback(&softsp->dvma_call_list_id);
1169
1170 return (DDI_SUCCESS);
1171 }
1172
1173 /*ARGSUSED*/
1174 int
iommu_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)1175 iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1176 ddi_dma_handle_t handle, off_t off, size_t len,
1177 uint_t cache_flags)
1178 {
1179 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1180 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1181
1182 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1183 sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1184 mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1185 mppriv->phys_sync_flag);
1186 }
1187 return (DDI_SUCCESS);
1188 }
1189
1190 /*ARGSUSED*/
1191 int
iommu_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1192 iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
1193 ddi_dma_handle_t handle, uint_t win, off_t *offp,
1194 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1195 {
1196 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1197 off_t offset;
1198 uint_t winsize;
1199 uint_t newoff;
1200 int rval;
1201
1202 offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
1203 winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1204
1205 DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
1206 winsize));
1207
1208 /*
1209 * win is in the range [0 .. dmai_nwin-1]
1210 */
1211 if (win >= mp->dmai_nwin)
1212 return (DDI_FAILURE);
1213
1214 newoff = win * winsize;
1215 if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
1216 return (DDI_FAILURE);
1217
1218 ASSERT(cookiep);
1219 cookiep->dmac_notused = 0;
1220 cookiep->dmac_type = 0;
1221 cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
1222 cookiep->dmac_size = mp->dmai_size;
1223 *ccountp = 1;
1224 *offp = (off_t)newoff;
1225 *lenp = (uint_t)winsize;
1226
1227 if (newoff == mp->dmai_offset) {
1228 /*
1229 * Nothing to do...
1230 */
1231 return (DDI_SUCCESS);
1232 }
1233
1234 if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
1235 return (rval);
1236
1237 /*
1238 * Set this again in case iommu_map_window() has changed it
1239 */
1240 cookiep->dmac_size = mp->dmai_size;
1241
1242 return (DDI_SUCCESS);
1243 }
1244
1245 static int
iommu_map_window(ddi_dma_impl_t * mp,off_t newoff,size_t winsize)1246 iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
1247 {
1248 uintptr_t addr = 0;
1249 page_t *pp;
1250 uint_t flags;
1251 struct page **pplist = NULL;
1252
1253 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1254 /* Free mappings for current window */
1255 iommu_remove_mappings(mp);
1256 #endif /* DEBUG && IO_MEMDEBUG */
1257
1258 mp->dmai_offset = newoff;
1259 mp->dmai_size = mp->dmai_object.dmao_size - newoff;
1260 mp->dmai_size = MIN(mp->dmai_size, winsize);
1261
1262 if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
1263 mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
1264 if (mp->dmai_rflags & DMP_SHADOW) {
1265 pplist = (struct page **)mp->dmai_minfo;
1266 ASSERT(pplist != NULL);
1267 pplist = pplist + (newoff >> MMU_PAGESHIFT);
1268 } else {
1269 addr = (uintptr_t)
1270 mp->dmai_object.dmao_obj.virt_obj.v_addr;
1271 addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
1272 }
1273 pp = NULL;
1274 } else {
1275 pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
1276 flags = 0;
1277 while (flags < newoff) {
1278 pp = pp->p_next;
1279 flags += MMU_PAGESIZE;
1280 }
1281 }
1282
1283 /* Set up mappings for next window */
1284 if (addr) {
1285 if (iommu_create_vaddr_mappings(mp, addr) < 0)
1286 return (DDI_FAILURE);
1287 } else {
1288 if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
1289 return (DDI_FAILURE);
1290 }
1291
1292 /*
1293 * also invalidate read stream buffer
1294 */
1295 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1296 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1297
1298 sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1299 mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1300 mppriv->phys_sync_flag);
1301 }
1302
1303 return (DDI_SUCCESS);
1304
1305 }
1306
1307 int
iommu_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareq,ddi_dma_handle_t * handlep)1308 iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
1309 struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
1310 {
1311 ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
1312 ddi_dma_impl_t *mp;
1313 ddi_dma_attr_t *dma_attr;
1314 struct dma_impl_priv *mppriv;
1315 ioaddr_t addrlow, addrhigh;
1316 ioaddr_t segalign;
1317 int rval;
1318 struct sbus_soft_state *softsp =
1319 (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1320 ddi_get_instance(dip));
1321
1322 addrlow = dma_lim->dlim_addr_lo;
1323 addrhigh = dma_lim->dlim_addr_hi;
1324 if ((addrhigh <= addrlow) ||
1325 (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
1326 return (DDI_DMA_NOMAPPING);
1327 }
1328
1329 /*
1330 * Setup DMA burstsizes and min-xfer counts.
1331 */
1332 (void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes,
1333 (uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer,
1334 dmareq->dmar_flags);
1335
1336 if (dma_lim->dlim_burstsizes == 0)
1337 return (DDI_DMA_NOMAPPING);
1338 /*
1339 * If not an advisory call, get a DMA handle
1340 */
1341 if (!handlep) {
1342 return (DDI_DMA_MAPOK);
1343 }
1344
1345 mppriv = kmem_zalloc(sizeof (*mppriv),
1346 (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1347 if (mppriv == NULL) {
1348 if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1349 ddi_set_callback(dmareq->dmar_fp,
1350 dmareq->dmar_arg, &softsp->dvma_call_list_id);
1351 }
1352 return (DDI_DMA_NORESOURCES);
1353 }
1354 mp = (ddi_dma_impl_t *)mppriv;
1355 mp->dmai_rdip = rdip;
1356 mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1357 mp->dmai_minxfer = dma_lim->dlim_minxfer;
1358 mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1359 mp->dmai_offset = 0;
1360 mp->dmai_ndvmapages = 0;
1361 mp->dmai_minfo = 0;
1362 mp->dmai_inuse = 0;
1363 segalign = dma_lim->dlim_cntr_max;
1364 /* See if the DMA engine has any limit restrictions. */
1365 if (segalign == UINT32_MAX && addrhigh == UINT32_MAX &&
1366 addrlow == 0) {
1367 mp->dmai_rflags |= DMP_NOLIMIT;
1368 }
1369 mppriv->softsp = softsp;
1370 mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
1371 dma_attr = &mp->dmai_attr;
1372 dma_attr->dma_attr_align = 1;
1373 dma_attr->dma_attr_addr_lo = addrlow;
1374 dma_attr->dma_attr_addr_hi = addrhigh;
1375 dma_attr->dma_attr_seg = segalign;
1376 dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes;
1377 rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp,
1378 dmareq, NULL, NULL);
1379 if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
1380 kmem_free(mppriv, sizeof (*mppriv));
1381 } else {
1382 *handlep = (ddi_dma_handle_t)mp;
1383 }
1384 return (rval);
1385 }
1386
1387 /*ARGSUSED*/
1388 int
iommu_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t cache_flags)1389 iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1390 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1391 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
1392 {
1393 ioaddr_t addr;
1394 uint_t offset;
1395 pgcnt_t npages;
1396 size_t size;
1397 ddi_dma_cookie_t *cp;
1398 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1399
1400 DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", (void *)mp));
1401 switch (request) {
1402 case DDI_DMA_FREE:
1403 {
1404 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1405 struct sbus_soft_state *softsp = mppriv->softsp;
1406 ASSERT(softsp != NULL);
1407
1408 /*
1409 * 'Free' the dma mappings.
1410 */
1411 addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1412 npages = mp->dmai_ndvmapages;
1413 size = iommu_ptob(npages);
1414
1415 DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:"
1416 "freeing vaddr %x for %x pages.\n", addr,
1417 mp->dmai_ndvmapages));
1418 /* sync the entire object */
1419 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1420 /* flush stream write buffers */
1421 sync_stream_buf(softsp, addr, npages,
1422 (int *)&mppriv->sync_flag, mppriv->phys_sync_flag);
1423 }
1424
1425 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1426 iommu_remove_mappings(mp);
1427 #endif /* DEBUG && IO_MEMDEBUG */
1428
1429 ASSERT(npages > (uint_t)0);
1430 if (mp->dmai_rflags & DMP_NOLIMIT)
1431 vmem_free(softsp->dvma_arena,
1432 (void *)(uintptr_t)addr, size);
1433 else
1434 vmem_xfree(softsp->dvma_arena,
1435 (void *)(uintptr_t)addr, size);
1436
1437 kmem_free(mppriv, sizeof (*mppriv));
1438
1439 if (softsp->dvma_call_list_id != 0)
1440 ddi_run_callback(&softsp->dvma_call_list_id);
1441
1442 break;
1443 }
1444
1445 case DDI_DMA_SET_SBUS64:
1446 {
1447 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1448
1449 return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
1450 &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
1451 DDI_DMA_SBUS_64BIT));
1452 }
1453
1454 case DDI_DMA_HTOC:
1455 DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx "
1456 "size %x\n", *offp, mp->dmai_mapping,
1457 mp->dmai_size));
1458
1459 if ((uint_t)(*offp) >= mp->dmai_size)
1460 return (DDI_FAILURE);
1461
1462 cp = (ddi_dma_cookie_t *)objp;
1463 cp->dmac_notused = 0;
1464 cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp));
1465 cp->dmac_size =
1466 mp->dmai_mapping + mp->dmai_size - cp->dmac_address;
1467 cp->dmac_type = 0;
1468
1469 break;
1470
1471 case DDI_DMA_KVADDR:
1472 /*
1473 * If a physical address mapping has percolated this high,
1474 * that is an error (maybe?).
1475 */
1476 if (mp->dmai_rflags & DMP_PHYSADDR) {
1477 DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys "
1478 "mapping\n"));
1479 return (DDI_FAILURE);
1480 }
1481
1482 return (DDI_FAILURE);
1483
1484 case DDI_DMA_NEXTWIN:
1485 {
1486 ddi_dma_win_t *owin, *nwin;
1487 uint_t winsize, newoff;
1488 int rval;
1489
1490 DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n"));
1491
1492 mp = (ddi_dma_impl_t *)handle;
1493 owin = (ddi_dma_win_t *)offp;
1494 nwin = (ddi_dma_win_t *)objp;
1495 if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1496 if (*owin == NULL) {
1497 DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG,
1498 ("nextwin: win == NULL\n"));
1499 mp->dmai_offset = 0;
1500 *nwin = (ddi_dma_win_t)mp;
1501 return (DDI_SUCCESS);
1502 }
1503
1504 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1505 winsize = iommu_ptob(mp->dmai_ndvmapages -
1506 iommu_btopr(offset));
1507
1508 newoff = (uint_t)(mp->dmai_offset + winsize);
1509 if (newoff > mp->dmai_object.dmao_size -
1510 mp->dmai_minxfer)
1511 return (DDI_DMA_DONE);
1512
1513 if ((rval = iommu_map_window(mp, newoff, winsize))
1514 != DDI_SUCCESS)
1515 return (rval);
1516 } else {
1517 DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no "
1518 "partial mapping\n"));
1519 if (*owin != NULL)
1520 return (DDI_DMA_DONE);
1521 mp->dmai_offset = 0;
1522 *nwin = (ddi_dma_win_t)mp;
1523 }
1524 break;
1525 }
1526
1527 case DDI_DMA_NEXTSEG:
1528 {
1529 ddi_dma_seg_t *oseg, *nseg;
1530
1531 DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n"));
1532
1533 oseg = (ddi_dma_seg_t *)lenp;
1534 if (*oseg != NULL)
1535 return (DDI_DMA_DONE);
1536 nseg = (ddi_dma_seg_t *)objp;
1537 *nseg = *((ddi_dma_seg_t *)offp);
1538 break;
1539 }
1540
1541 case DDI_DMA_SEGTOC:
1542 {
1543 ddi_dma_seg_impl_t *seg;
1544
1545 seg = (ddi_dma_seg_impl_t *)handle;
1546 cp = (ddi_dma_cookie_t *)objp;
1547 cp->dmac_notused = 0;
1548 cp->dmac_address = (ioaddr_t)seg->dmai_mapping;
1549 cp->dmac_size = *lenp = seg->dmai_size;
1550 cp->dmac_type = 0;
1551 *offp = seg->dmai_offset;
1552 break;
1553 }
1554
1555 case DDI_DMA_MOVWIN:
1556 {
1557 uint_t winsize;
1558 uint_t newoff;
1559 int rval;
1560
1561 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1562 winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1563
1564 DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx "
1565 "winsize %x\n", *offp, *lenp, winsize));
1566
1567 if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0)
1568 return (DDI_FAILURE);
1569
1570 if (*lenp != (uint_t)-1 && *lenp != winsize) {
1571 DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n"));
1572 return (DDI_FAILURE);
1573 }
1574 newoff = (uint_t)*offp;
1575 if (newoff & (winsize - 1)) {
1576 DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n"));
1577 return (DDI_FAILURE);
1578 }
1579
1580 if (newoff == mp->dmai_offset) {
1581 /*
1582 * Nothing to do...
1583 */
1584 break;
1585 }
1586
1587 /*
1588 * Check out new address...
1589 */
1590 if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) {
1591 DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of "
1592 "range\n"));
1593 return (DDI_FAILURE);
1594 }
1595
1596 rval = iommu_map_window(mp, newoff, winsize);
1597 if (rval != DDI_SUCCESS)
1598 return (rval);
1599
1600 if ((cp = (ddi_dma_cookie_t *)objp) != 0) {
1601 cp->dmac_notused = 0;
1602 cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1603 cp->dmac_size = mp->dmai_size;
1604 cp->dmac_type = 0;
1605 }
1606 *offp = (off_t)newoff;
1607 *lenp = (uint_t)winsize;
1608 break;
1609 }
1610
1611 case DDI_DMA_REPWIN:
1612 if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
1613 DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n"));
1614 return (DDI_FAILURE);
1615 }
1616
1617 *offp = (off_t)mp->dmai_offset;
1618
1619 addr = mp->dmai_ndvmapages -
1620 iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1621
1622 *lenp = (uint_t)iommu_ptob(addr);
1623
1624 DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n",
1625 mp->dmai_offset, mp->dmai_size));
1626
1627 break;
1628
1629 case DDI_DMA_GETERR:
1630 DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG,
1631 ("iommu_dma_mctl: geterr\n"));
1632
1633 break;
1634
1635 case DDI_DMA_COFF:
1636 cp = (ddi_dma_cookie_t *)offp;
1637 addr = cp->dmac_address;
1638
1639 if (addr < mp->dmai_mapping ||
1640 addr >= mp->dmai_mapping + mp->dmai_size)
1641 return (DDI_FAILURE);
1642
1643 *objp = (caddr_t)(addr - mp->dmai_mapping);
1644
1645 DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx "
1646 "size %x\n", (ulong_t)*objp, mp->dmai_mapping,
1647 mp->dmai_size));
1648
1649 break;
1650
1651 case DDI_DMA_RESERVE:
1652 {
1653 struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
1654 ddi_dma_lim_t *dma_lim;
1655 ddi_dma_handle_t *handlep;
1656 uint_t np;
1657 ioaddr_t ioaddr;
1658 int i;
1659 struct fast_dvma *iommu_fast_dvma;
1660 struct sbus_soft_state *softsp =
1661 (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1662 ddi_get_instance(dip));
1663
1664 /* Some simple sanity checks */
1665 dma_lim = dmareq->dmar_limits;
1666 if (dma_lim->dlim_burstsizes == 0) {
1667 DPRINTF(IOMMU_FASTDMA_RESERVE,
1668 ("Reserve: bad burstsizes\n"));
1669 return (DDI_DMA_BADLIMITS);
1670 }
1671 if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
1672 DPRINTF(IOMMU_FASTDMA_RESERVE,
1673 ("Reserve: bad limits\n"));
1674 return (DDI_DMA_BADLIMITS);
1675 }
1676
1677 np = dmareq->dmar_object.dmao_size;
1678 mutex_enter(&softsp->dma_pool_lock);
1679 if (np > softsp->dma_reserve) {
1680 mutex_exit(&softsp->dma_pool_lock);
1681 DPRINTF(IOMMU_FASTDMA_RESERVE,
1682 ("Reserve: dma_reserve is exhausted\n"));
1683 return (DDI_DMA_NORESOURCES);
1684 }
1685
1686 softsp->dma_reserve -= np;
1687 mutex_exit(&softsp->dma_pool_lock);
1688 mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
1689 mp->dmai_rflags = DMP_BYPASSNEXUS;
1690 mp->dmai_rdip = rdip;
1691 mp->dmai_minxfer = dma_lim->dlim_minxfer;
1692 mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1693
1694 ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1695 iommu_ptob(np), IOMMU_PAGESIZE, 0,
1696 dma_lim->dlim_cntr_max + 1,
1697 (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1),
1698 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1699
1700 if (ioaddr == 0) {
1701 mutex_enter(&softsp->dma_pool_lock);
1702 softsp->dma_reserve += np;
1703 mutex_exit(&softsp->dma_pool_lock);
1704 kmem_free(mp, sizeof (*mp));
1705 DPRINTF(IOMMU_FASTDMA_RESERVE,
1706 ("Reserve: No dvma resources available\n"));
1707 return (DDI_DMA_NOMAPPING);
1708 }
1709
1710 /* create a per request structure */
1711 iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
1712 KM_SLEEP);
1713
1714 /*
1715 * We need to remember the size of the transfer so that
1716 * we can figure the virtual pages to sync when the transfer
1717 * is complete.
1718 */
1719 iommu_fast_dvma->pagecnt = kmem_zalloc(np *
1720 sizeof (uint_t), KM_SLEEP);
1721
1722 /* Allocate a streaming cache sync flag for each index */
1723 iommu_fast_dvma->sync_flag = kmem_zalloc(np *
1724 sizeof (int), KM_SLEEP);
1725
1726 /* Allocate a physical sync flag for each index */
1727 iommu_fast_dvma->phys_sync_flag =
1728 kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
1729
1730 for (i = 0; i < np; i++)
1731 iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
1732 &iommu_fast_dvma->sync_flag[i]);
1733
1734 mp->dmai_mapping = ioaddr;
1735 mp->dmai_ndvmapages = np;
1736 iommu_fast_dvma->ops = &iommu_dvma_ops;
1737 iommu_fast_dvma->softsp = (caddr_t)softsp;
1738 mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
1739 handlep = (ddi_dma_handle_t *)objp;
1740 *handlep = (ddi_dma_handle_t)mp;
1741
1742 DPRINTF(IOMMU_FASTDMA_RESERVE,
1743 ("Reserve: mapping object %p base addr %lx size %x\n",
1744 (void *)mp, mp->dmai_mapping, mp->dmai_ndvmapages));
1745
1746 break;
1747 }
1748
1749 case DDI_DMA_RELEASE:
1750 {
1751 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1752 uint_t np = npages = mp->dmai_ndvmapages;
1753 ioaddr_t ioaddr = mp->dmai_mapping;
1754 volatile uint64_t *iotte_ptr;
1755 struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
1756 mp->dmai_nexus_private;
1757 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1758 iommu_fast_dvma->softsp;
1759
1760 ASSERT(softsp != NULL);
1761
1762 /* Unload stale mappings and flush stale tlb's */
1763 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1764
1765 while (npages > (uint_t)0) {
1766 *iotte_ptr = (uint64_t)0; /* unload tte */
1767 iommu_tlb_flush(softsp, ioaddr, 1);
1768
1769 npages--;
1770 iotte_ptr++;
1771 ioaddr += IOMMU_PAGESIZE;
1772 }
1773
1774 ioaddr = (ioaddr_t)mp->dmai_mapping;
1775 mutex_enter(&softsp->dma_pool_lock);
1776 softsp->dma_reserve += np;
1777 mutex_exit(&softsp->dma_pool_lock);
1778
1779 if (mp->dmai_rflags & DMP_NOLIMIT)
1780 vmem_free(softsp->dvma_arena,
1781 (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1782 else
1783 vmem_xfree(softsp->dvma_arena,
1784 (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1785
1786 kmem_free(mp, sizeof (*mp));
1787 kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
1788 kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
1789 kmem_free(iommu_fast_dvma->phys_sync_flag, np *
1790 sizeof (uint64_t));
1791 kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
1792
1793
1794 DPRINTF(IOMMU_FASTDMA_RESERVE,
1795 ("Release: Base addr %x size %x\n", ioaddr, np));
1796 /*
1797 * Now that we've freed some resource,
1798 * if there is anybody waiting for it
1799 * try and get them going.
1800 */
1801 if (softsp->dvma_call_list_id != 0)
1802 ddi_run_callback(&softsp->dvma_call_list_id);
1803
1804 break;
1805 }
1806
1807 default:
1808 DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
1809 "0%x\n", request));
1810
1811 return (DDI_FAILURE);
1812 }
1813 return (DDI_SUCCESS);
1814 }
1815
1816 /*ARGSUSED*/
1817 void
iommu_dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)1818 iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
1819 ddi_dma_cookie_t *cp)
1820 {
1821 uintptr_t addr;
1822 ioaddr_t ioaddr;
1823 uint_t offset;
1824 pfn_t pfn;
1825 int npages;
1826 volatile uint64_t *iotte_ptr;
1827 uint64_t iotte_flag = 0;
1828 struct as *as = NULL;
1829 extern struct as kas;
1830 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1831 struct fast_dvma *iommu_fast_dvma =
1832 (struct fast_dvma *)mp->dmai_nexus_private;
1833 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1834 iommu_fast_dvma->softsp;
1835 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1836 struct io_mem_list *iomemp;
1837 pfn_t *pfnp;
1838 #endif /* DEBUG && IO_MEMUSAGE */
1839
1840 ASSERT(softsp != NULL);
1841
1842 addr = (uintptr_t)a;
1843 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1844 offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
1845 iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
1846 as = &kas;
1847 addr &= ~IOMMU_PAGEOFFSET;
1848 npages = iommu_btopr(len + offset);
1849
1850 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1851 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
1852 iomemp->rdip = mp->dmai_rdip;
1853 iomemp->ioaddr = ioaddr;
1854 iomemp->addr = addr;
1855 iomemp->npages = npages;
1856 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
1857 KM_SLEEP);
1858 #endif /* DEBUG && IO_MEMUSAGE */
1859
1860 cp->dmac_address = ioaddr | offset;
1861 cp->dmac_size = len;
1862
1863 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1864 /* read/write and streaming io on */
1865 iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
1866
1867 if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
1868 mp->dmai_rflags |= DMP_NOSYNC;
1869 else if (!softsp->stream_buf_off)
1870 iotte_flag |= IOTTE_STREAM;
1871
1872 DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
1873 "size %x offset %x index %x kaddr %lx\n",
1874 ioaddr, len, offset, index, addr));
1875 ASSERT(npages > 0);
1876 do {
1877 pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
1878 if (pfn == PFN_INVALID) {
1879 DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
1880 "from hat_getpfnum()\n"));
1881 }
1882
1883 iommu_tlb_flush(softsp, ioaddr, 1);
1884
1885 /* load tte */
1886 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1887
1888 npages--;
1889 iotte_ptr++;
1890
1891 addr += IOMMU_PAGESIZE;
1892 ioaddr += IOMMU_PAGESIZE;
1893
1894 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1895 *pfnp = pfn;
1896 pfnp++;
1897 #endif /* DEBUG && IO_MEMUSAGE */
1898
1899 } while (npages > 0);
1900
1901 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1902 mutex_enter(&softsp->iomemlock);
1903 iomemp->next = softsp->iomem;
1904 softsp->iomem = iomemp;
1905 mutex_exit(&softsp->iomemlock);
1906 #endif /* DEBUG && IO_MEMUSAGE */
1907 }
1908
1909 /*ARGSUSED*/
1910 void
iommu_dvma_unload(ddi_dma_handle_t h,uint_t index,uint_t view)1911 iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
1912 {
1913 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1914 ioaddr_t ioaddr;
1915 pgcnt_t npages;
1916 struct fast_dvma *iommu_fast_dvma =
1917 (struct fast_dvma *)mp->dmai_nexus_private;
1918 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1919 iommu_fast_dvma->softsp;
1920 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1921 struct io_mem_list **prevp, *walk;
1922 #endif /* DEBUG && IO_MEMUSAGE */
1923
1924 ASSERT(softsp != NULL);
1925
1926 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1927 npages = iommu_fast_dvma->pagecnt[index];
1928
1929 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1930 mutex_enter(&softsp->iomemlock);
1931 prevp = &softsp->iomem;
1932 walk = softsp->iomem;
1933
1934 while (walk != NULL) {
1935 if (walk->ioaddr == ioaddr) {
1936 *prevp = walk->next;
1937 break;
1938 }
1939 prevp = &walk->next;
1940 walk = walk->next;
1941 }
1942 mutex_exit(&softsp->iomemlock);
1943
1944 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
1945 kmem_free(walk, sizeof (struct io_mem_list));
1946 #endif /* DEBUG && IO_MEMUSAGE */
1947
1948 DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1949 "addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp,
1950 (void *)&iommu_fast_dvma->sync_flag[index],
1951 iommu_fast_dvma->phys_sync_flag[index],
1952 index, npages));
1953
1954 if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
1955 sync_stream_buf(softsp, ioaddr, npages,
1956 (int *)&iommu_fast_dvma->sync_flag[index],
1957 iommu_fast_dvma->phys_sync_flag[index]);
1958 }
1959 }
1960
1961 /*ARGSUSED*/
1962 void
iommu_dvma_sync(ddi_dma_handle_t h,uint_t index,uint_t view)1963 iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
1964 {
1965 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1966 ioaddr_t ioaddr;
1967 uint_t npages;
1968 struct fast_dvma *iommu_fast_dvma =
1969 (struct fast_dvma *)mp->dmai_nexus_private;
1970 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1971 iommu_fast_dvma->softsp;
1972
1973 if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1974 return;
1975
1976 ASSERT(softsp != NULL);
1977 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1978 npages = iommu_fast_dvma->pagecnt[index];
1979
1980 DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1981 "sync flag addr %p, sync flag pfn %llx\n", (void *)mp,
1982 (void *)&iommu_fast_dvma->sync_flag[index],
1983 iommu_fast_dvma->phys_sync_flag[index]));
1984
1985 sync_stream_buf(softsp, ioaddr, npages,
1986 (int *)&iommu_fast_dvma->sync_flag[index],
1987 iommu_fast_dvma->phys_sync_flag[index]);
1988 }
1989