1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/types.h> 30*0Sstevel@tonic-gate #include <sys/param.h> 31*0Sstevel@tonic-gate #include <sys/conf.h> 32*0Sstevel@tonic-gate #include <sys/ddi.h> 33*0Sstevel@tonic-gate #include <sys/sunddi.h> 34*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 35*0Sstevel@tonic-gate #include <sys/cmn_err.h> 36*0Sstevel@tonic-gate #include <sys/kmem.h> 37*0Sstevel@tonic-gate #include <sys/vmem.h> 38*0Sstevel@tonic-gate #include <sys/sysmacros.h> 39*0Sstevel@tonic-gate 40*0Sstevel@tonic-gate #include <sys/ddidmareq.h> 41*0Sstevel@tonic-gate #include <sys/sysiosbus.h> 42*0Sstevel@tonic-gate #include <sys/iommu.h> 43*0Sstevel@tonic-gate #include <sys/iocache.h> 44*0Sstevel@tonic-gate #include <sys/dvma.h> 45*0Sstevel@tonic-gate 46*0Sstevel@tonic-gate #include <vm/as.h> 47*0Sstevel@tonic-gate #include <vm/hat.h> 48*0Sstevel@tonic-gate #include <vm/page.h> 49*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 50*0Sstevel@tonic-gate #include <sys/machparam.h> 51*0Sstevel@tonic-gate #include <sys/machsystm.h> 52*0Sstevel@tonic-gate #include <sys/vmsystm.h> 53*0Sstevel@tonic-gate #include <sys/iommutsb.h> 54*0Sstevel@tonic-gate 55*0Sstevel@tonic-gate /* Useful debugging Stuff */ 56*0Sstevel@tonic-gate #include <sys/nexusdebug.h> 57*0Sstevel@tonic-gate #include <sys/debug.h> 58*0Sstevel@tonic-gate /* Bitfield debugging definitions for this file */ 59*0Sstevel@tonic-gate #define IOMMU_GETDVMAPAGES_DEBUG 0x1 60*0Sstevel@tonic-gate #define IOMMU_DMAMAP_DEBUG 0x2 61*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_DEBUG 0x4 62*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8 63*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10 64*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20 65*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTWIN_DEBUG 0x40 66*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTSEG_DEBUG 0x80 67*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_MOVWIN_DEBUG 0x100 68*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_REPWIN_DEBUG 0x200 69*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400 70*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_COFF_DEBUG 0x800 71*0Sstevel@tonic-gate #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000 72*0Sstevel@tonic-gate #define IOMMU_REGISTERS_DEBUG 0x2000 73*0Sstevel@tonic-gate #define IOMMU_DMA_SETUP_DEBUG 0x4000 74*0Sstevel@tonic-gate #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000 75*0Sstevel@tonic-gate #define IOMMU_DMA_BINDHDL_DEBUG 0x10000 76*0Sstevel@tonic-gate #define IOMMU_DMA_WIN_DEBUG 0x20000 77*0Sstevel@tonic-gate #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000 78*0Sstevel@tonic-gate #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000 79*0Sstevel@tonic-gate #define IOMMU_FASTDMA_RESERVE 0x100000 80*0Sstevel@tonic-gate #define IOMMU_FASTDMA_LOAD 0x200000 81*0Sstevel@tonic-gate #define IOMMU_INTER_INTRA_XFER 0x400000 82*0Sstevel@tonic-gate #define IOMMU_TTE 0x800000 83*0Sstevel@tonic-gate #define IOMMU_TLB 0x1000000 84*0Sstevel@tonic-gate #define IOMMU_FASTDMA_SYNC 0x2000000 85*0Sstevel@tonic-gate 86*0Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */ 87*0Sstevel@tonic-gate /* #define IO_MEMUSAGE */ 88*0Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */ 89*0Sstevel@tonic-gate /* #define IO_MEMDEBUG */ 90*0Sstevel@tonic-gate 91*0Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = { 92*0Sstevel@tonic-gate DVMAO_REV, 93*0Sstevel@tonic-gate iommu_dvma_kaddr_load, 94*0Sstevel@tonic-gate iommu_dvma_unload, 95*0Sstevel@tonic-gate iommu_dvma_sync 96*0Sstevel@tonic-gate }; 97*0Sstevel@tonic-gate 98*0Sstevel@tonic-gate extern void *sbusp; /* sbus soft state hook */ 99*0Sstevel@tonic-gate 100*0Sstevel@tonic-gate #define DVMA_MAX_CACHE 65536 101*0Sstevel@tonic-gate 102*0Sstevel@tonic-gate /* 103*0Sstevel@tonic-gate * This is the number of pages that a mapping request needs before we force 104*0Sstevel@tonic-gate * the TLB flush code to use diagnostic registers. This value was determined 105*0Sstevel@tonic-gate * through a series of test runs measuring dma mapping settup performance. 106*0Sstevel@tonic-gate */ 107*0Sstevel@tonic-gate int tlb_flush_using_diag = 16; 108*0Sstevel@tonic-gate 109*0Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = { 110*0Sstevel@tonic-gate IOMMU_TSB_SIZE_8M, 111*0Sstevel@tonic-gate IOMMU_TSB_SIZE_16M, 112*0Sstevel@tonic-gate IOMMU_TSB_SIZE_32M, 113*0Sstevel@tonic-gate IOMMU_TSB_SIZE_64M, 114*0Sstevel@tonic-gate IOMMU_TSB_SIZE_128M, 115*0Sstevel@tonic-gate IOMMU_TSB_SIZE_256M, 116*0Sstevel@tonic-gate IOMMU_TSB_SIZE_512M, 117*0Sstevel@tonic-gate IOMMU_TSB_SIZE_1G 118*0Sstevel@tonic-gate }; 119*0Sstevel@tonic-gate 120*0Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t); 121*0Sstevel@tonic-gate 122*0Sstevel@tonic-gate int 123*0Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address) 124*0Sstevel@tonic-gate { 125*0Sstevel@tonic-gate int i; 126*0Sstevel@tonic-gate char name[40]; 127*0Sstevel@tonic-gate 128*0Sstevel@tonic-gate #ifdef DEBUG 129*0Sstevel@tonic-gate debug_info = 1; 130*0Sstevel@tonic-gate #endif 131*0Sstevel@tonic-gate 132*0Sstevel@tonic-gate /* 133*0Sstevel@tonic-gate * Simply add each registers offset to the base address 134*0Sstevel@tonic-gate * to calculate the already mapped virtual address of 135*0Sstevel@tonic-gate * the device register... 136*0Sstevel@tonic-gate * 137*0Sstevel@tonic-gate * define a macro for the pointer arithmetic; all registers 138*0Sstevel@tonic-gate * are 64 bits wide and are defined as uint64_t's. 139*0Sstevel@tonic-gate */ 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o)) 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG); 144*0Sstevel@tonic-gate softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR); 145*0Sstevel@tonic-gate softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG); 146*0Sstevel@tonic-gate softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG); 147*0Sstevel@tonic-gate softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA); 148*0Sstevel@tonic-gate 149*0Sstevel@tonic-gate #undef REG_ADDR 150*0Sstevel@tonic-gate 151*0Sstevel@tonic-gate mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL); 152*0Sstevel@tonic-gate mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL); 153*0Sstevel@tonic-gate 154*0Sstevel@tonic-gate /* Set up the DVMA resource sizes */ 155*0Sstevel@tonic-gate if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) == 156*0Sstevel@tonic-gate IOMMU_TSB_COOKIE_NONE) { 157*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.", 158*0Sstevel@tonic-gate ddi_driver_name(softsp->dip), 159*0Sstevel@tonic-gate ddi_get_instance(softsp->dip)); 160*0Sstevel@tonic-gate return (DDI_FAILURE); 161*0Sstevel@tonic-gate } 162*0Sstevel@tonic-gate softsp->soft_tsb_base_addr = 163*0Sstevel@tonic-gate iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie); 164*0Sstevel@tonic-gate softsp->iommu_dvma_size = 165*0Sstevel@tonic-gate iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) << 166*0Sstevel@tonic-gate IOMMU_TSB_TO_RNG; 167*0Sstevel@tonic-gate softsp->iommu_dvma_base = (ioaddr_t) 168*0Sstevel@tonic-gate (0 - (ioaddr_t)softsp->iommu_dvma_size); 169*0Sstevel@tonic-gate 170*0Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s%d_dvma", 171*0Sstevel@tonic-gate ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip)); 172*0Sstevel@tonic-gate 173*0Sstevel@tonic-gate /* 174*0Sstevel@tonic-gate * Initialize the DVMA vmem arena. 175*0Sstevel@tonic-gate */ 176*0Sstevel@tonic-gate softsp->dvma_arena = vmem_create(name, (void *)softsp->iommu_dvma_base, 177*0Sstevel@tonic-gate softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL, 178*0Sstevel@tonic-gate DVMA_MAX_CACHE, VM_SLEEP); 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */ 181*0Sstevel@tonic-gate softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1); 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 184*0Sstevel@tonic-gate mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL); 185*0Sstevel@tonic-gate softsp->iomem = (struct io_mem_list *)0; 186*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 187*0Sstevel@tonic-gate /* 188*0Sstevel@tonic-gate * Get the base address of the TSB table and store it in the hardware 189*0Sstevel@tonic-gate */ 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate /* 192*0Sstevel@tonic-gate * We plan on the PROM flushing all TLB entries. If this is not the 193*0Sstevel@tonic-gate * case, this is where we should flush the hardware TLB. 194*0Sstevel@tonic-gate */ 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate /* Set the IOMMU registers */ 197*0Sstevel@tonic-gate (void) iommu_resume_init(softsp); 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate /* check the convenient copy of TSB base, and flush write buffers */ 200*0Sstevel@tonic-gate if (*softsp->tsb_base_addr != 201*0Sstevel@tonic-gate va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) { 202*0Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 203*0Sstevel@tonic-gate return (DDI_FAILURE); 204*0Sstevel@tonic-gate } 205*0Sstevel@tonic-gate 206*0Sstevel@tonic-gate softsp->sbus_io_lo_pfn = UINT32_MAX; 207*0Sstevel@tonic-gate softsp->sbus_io_hi_pfn = 0; 208*0Sstevel@tonic-gate for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) { 209*0Sstevel@tonic-gate struct rangespec *rangep; 210*0Sstevel@tonic-gate uint64_t addr; 211*0Sstevel@tonic-gate pfn_t hipfn, lopfn; 212*0Sstevel@tonic-gate 213*0Sstevel@tonic-gate rangep = sysio_pd_getrng(softsp->dip, i); 214*0Sstevel@tonic-gate addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32); 215*0Sstevel@tonic-gate addr |= (uint64_t)rangep->rng_offset; 216*0Sstevel@tonic-gate lopfn = (pfn_t)(addr >> MMU_PAGESHIFT); 217*0Sstevel@tonic-gate addr += (uint64_t)(rangep->rng_size - 1); 218*0Sstevel@tonic-gate hipfn = (pfn_t)(addr >> MMU_PAGESHIFT); 219*0Sstevel@tonic-gate 220*0Sstevel@tonic-gate softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ? 221*0Sstevel@tonic-gate lopfn : softsp->sbus_io_lo_pfn; 222*0Sstevel@tonic-gate 223*0Sstevel@tonic-gate softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ? 224*0Sstevel@tonic-gate hipfn : softsp->sbus_io_hi_pfn; 225*0Sstevel@tonic-gate } 226*0Sstevel@tonic-gate 227*0Sstevel@tonic-gate DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB " 228*0Sstevel@tonic-gate "base reg: %p IOMMU flush reg: %p TSB base addr %p\n", 229*0Sstevel@tonic-gate softsp->iommu_ctrl_reg, softsp->tsb_base_addr, 230*0Sstevel@tonic-gate softsp->iommu_flush_reg, softsp->soft_tsb_base_addr)); 231*0Sstevel@tonic-gate 232*0Sstevel@tonic-gate return (DDI_SUCCESS); 233*0Sstevel@tonic-gate } 234*0Sstevel@tonic-gate 235*0Sstevel@tonic-gate /* 236*0Sstevel@tonic-gate * function to uninitialize the iommu and release the tsb back to 237*0Sstevel@tonic-gate * the spare pool. See startup.c for tsb spare management. 238*0Sstevel@tonic-gate */ 239*0Sstevel@tonic-gate 240*0Sstevel@tonic-gate int 241*0Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp) 242*0Sstevel@tonic-gate { 243*0Sstevel@tonic-gate vmem_destroy(softsp->dvma_arena); 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate /* flip off the IOMMU enable switch */ 246*0Sstevel@tonic-gate *softsp->iommu_ctrl_reg &= 247*0Sstevel@tonic-gate (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE); 248*0Sstevel@tonic-gate 249*0Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 250*0Sstevel@tonic-gate 251*0Sstevel@tonic-gate return (DDI_SUCCESS); 252*0Sstevel@tonic-gate } 253*0Sstevel@tonic-gate 254*0Sstevel@tonic-gate /* 255*0Sstevel@tonic-gate * Initialize iommu hardware registers when the system is being resumed. 256*0Sstevel@tonic-gate * (Subset of iommu_init()) 257*0Sstevel@tonic-gate */ 258*0Sstevel@tonic-gate int 259*0Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp) 260*0Sstevel@tonic-gate { 261*0Sstevel@tonic-gate int i; 262*0Sstevel@tonic-gate uint_t tsb_size; 263*0Sstevel@tonic-gate uint_t tsb_bytes; 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate /* 266*0Sstevel@tonic-gate * Reset the base address of the TSB table in the hardware 267*0Sstevel@tonic-gate */ 268*0Sstevel@tonic-gate *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr); 269*0Sstevel@tonic-gate 270*0Sstevel@tonic-gate /* 271*0Sstevel@tonic-gate * Figure out the correct size of the IOMMU TSB entries. If we 272*0Sstevel@tonic-gate * end up with a size smaller than that needed for 8M of IOMMU 273*0Sstevel@tonic-gate * space, default the size to 8M. XXX We could probably panic here 274*0Sstevel@tonic-gate */ 275*0Sstevel@tonic-gate i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0]) 276*0Sstevel@tonic-gate - 1; 277*0Sstevel@tonic-gate 278*0Sstevel@tonic-gate tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie); 279*0Sstevel@tonic-gate 280*0Sstevel@tonic-gate while (i > 0) { 281*0Sstevel@tonic-gate if (tsb_bytes >= sysio_iommu_tsb_sizes[i]) 282*0Sstevel@tonic-gate break; 283*0Sstevel@tonic-gate i--; 284*0Sstevel@tonic-gate } 285*0Sstevel@tonic-gate 286*0Sstevel@tonic-gate tsb_size = i; 287*0Sstevel@tonic-gate 288*0Sstevel@tonic-gate /* OK, lets flip the "on" switch of the IOMMU */ 289*0Sstevel@tonic-gate *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT 290*0Sstevel@tonic-gate | IOMMU_ENABLE | IOMMU_DIAG_ENABLE); 291*0Sstevel@tonic-gate 292*0Sstevel@tonic-gate return (DDI_SUCCESS); 293*0Sstevel@tonic-gate } 294*0Sstevel@tonic-gate 295*0Sstevel@tonic-gate void 296*0Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) 297*0Sstevel@tonic-gate { 298*0Sstevel@tonic-gate volatile uint64_t tmpreg; 299*0Sstevel@tonic-gate volatile uint64_t *vaddr_reg, *valid_bit_reg; 300*0Sstevel@tonic-gate ioaddr_t hiaddr, ioaddr; 301*0Sstevel@tonic-gate int i, do_flush = 0; 302*0Sstevel@tonic-gate 303*0Sstevel@tonic-gate if (npages == 1) { 304*0Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)addr; 305*0Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 306*0Sstevel@tonic-gate return; 307*0Sstevel@tonic-gate } 308*0Sstevel@tonic-gate 309*0Sstevel@tonic-gate hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); 310*0Sstevel@tonic-gate for (i = 0, vaddr_reg = softsp->iommu_tlb_tag, 311*0Sstevel@tonic-gate valid_bit_reg = softsp->iommu_tlb_data; 312*0Sstevel@tonic-gate i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) { 313*0Sstevel@tonic-gate tmpreg = *vaddr_reg; 314*0Sstevel@tonic-gate ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) << 315*0Sstevel@tonic-gate IOMMU_TLBTAG_VA_SHIFT); 316*0Sstevel@tonic-gate 317*0Sstevel@tonic-gate DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%x, " 318*0Sstevel@tonic-gate "TLB vaddr reg %llx, IO addr 0x%x " 319*0Sstevel@tonic-gate "Base addr 0x%x, Hi addr 0x%x\n", 320*0Sstevel@tonic-gate vaddr_reg, tmpreg, ioaddr, addr, hiaddr)); 321*0Sstevel@tonic-gate 322*0Sstevel@tonic-gate if (ioaddr >= addr && ioaddr <= hiaddr) { 323*0Sstevel@tonic-gate tmpreg = *valid_bit_reg; 324*0Sstevel@tonic-gate 325*0Sstevel@tonic-gate DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%x, " 326*0Sstevel@tonic-gate "TLB valid reg %llx\n", 327*0Sstevel@tonic-gate valid_bit_reg, tmpreg)); 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate if (tmpreg & IOMMU_TLB_VALID) { 330*0Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)ioaddr; 331*0Sstevel@tonic-gate do_flush = 1; 332*0Sstevel@tonic-gate } 333*0Sstevel@tonic-gate } 334*0Sstevel@tonic-gate } 335*0Sstevel@tonic-gate 336*0Sstevel@tonic-gate if (do_flush) 337*0Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 338*0Sstevel@tonic-gate } 339*0Sstevel@tonic-gate 340*0Sstevel@tonic-gate 341*0Sstevel@tonic-gate /* 342*0Sstevel@tonic-gate * Shorthand defines 343*0Sstevel@tonic-gate */ 344*0Sstevel@tonic-gate 345*0Sstevel@tonic-gate #define ALO dma_lim->dlim_addr_lo 346*0Sstevel@tonic-gate #define AHI dma_lim->dlim_addr_hi 347*0Sstevel@tonic-gate #define OBJSIZE dmareq->dmar_object.dmao_size 348*0Sstevel@tonic-gate #define IOTTE_NDX(vaddr, base) (base + \ 349*0Sstevel@tonic-gate (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \ 350*0Sstevel@tonic-gate softsp->iommu_dvma_base))) 351*0Sstevel@tonic-gate /* 352*0Sstevel@tonic-gate * If DDI_DMA_PARTIAL flag is set and the request is for 353*0Sstevel@tonic-gate * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so 354*0Sstevel@tonic-gate * we turn off the DDI_DMA_PARTIAL flag 355*0Sstevel@tonic-gate */ 356*0Sstevel@tonic-gate #define MIN_DVMA_WIN_SIZE (128) 357*0Sstevel@tonic-gate 358*0Sstevel@tonic-gate /* ARGSUSED */ 359*0Sstevel@tonic-gate void 360*0Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp) 361*0Sstevel@tonic-gate { 362*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 363*0Sstevel@tonic-gate pgcnt_t npages; 364*0Sstevel@tonic-gate ioaddr_t ioaddr; 365*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 366*0Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 367*0Sstevel@tonic-gate pgcnt_t npages = mp->dmai_ndvmapages; 368*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 369*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 372*0Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 373*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 374*0Sstevel@tonic-gate 375*0Sstevel@tonic-gate ASSERT(softsp != NULL); 376*0Sstevel@tonic-gate /* 377*0Sstevel@tonic-gate * Run thru the mapped entries and free 'em 378*0Sstevel@tonic-gate */ 379*0Sstevel@tonic-gate 380*0Sstevel@tonic-gate ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 381*0Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 382*0Sstevel@tonic-gate 383*0Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 384*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 385*0Sstevel@tonic-gate prevp = &softsp->iomem; 386*0Sstevel@tonic-gate walk = softsp->iomem; 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate while (walk) { 389*0Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 390*0Sstevel@tonic-gate *prevp = walk->next; 391*0Sstevel@tonic-gate break; 392*0Sstevel@tonic-gate } 393*0Sstevel@tonic-gate 394*0Sstevel@tonic-gate prevp = &walk->next; 395*0Sstevel@tonic-gate walk = walk->next; 396*0Sstevel@tonic-gate } 397*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 398*0Sstevel@tonic-gate 399*0Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 400*0Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 401*0Sstevel@tonic-gate #endif /* IO_MEMUSAGE */ 402*0Sstevel@tonic-gate 403*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate while (npages) { 406*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, 407*0Sstevel@tonic-gate ("dma_mctl: freeing ioaddr %x iotte %p\n", 408*0Sstevel@tonic-gate ioaddr, iotte_ptr)); 409*0Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 410*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 411*0Sstevel@tonic-gate npages--; 412*0Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 413*0Sstevel@tonic-gate iotte_ptr++; 414*0Sstevel@tonic-gate } 415*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 416*0Sstevel@tonic-gate } 417*0Sstevel@tonic-gate 418*0Sstevel@tonic-gate 419*0Sstevel@tonic-gate int 420*0Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr) 421*0Sstevel@tonic-gate { 422*0Sstevel@tonic-gate pfn_t pfn; 423*0Sstevel@tonic-gate struct as *as = NULL; 424*0Sstevel@tonic-gate pgcnt_t npages; 425*0Sstevel@tonic-gate ioaddr_t ioaddr; 426*0Sstevel@tonic-gate uint_t offset; 427*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 428*0Sstevel@tonic-gate uint64_t tmp_iotte_flag; 429*0Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 430*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 431*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 432*0Sstevel@tonic-gate int diag_tlb_flush; 433*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 434*0Sstevel@tonic-gate struct io_mem_list *iomemp; 435*0Sstevel@tonic-gate pfn_t *pfnp; 436*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 437*0Sstevel@tonic-gate 438*0Sstevel@tonic-gate ASSERT(softsp != NULL); 439*0Sstevel@tonic-gate 440*0Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 441*0Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 442*0Sstevel@tonic-gate 443*0Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 444*0Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 445*0Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 446*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 447*0Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 448*0Sstevel@tonic-gate 449*0Sstevel@tonic-gate as = mp->dmai_object.dmao_obj.virt_obj.v_as; 450*0Sstevel@tonic-gate if (as == NULL) 451*0Sstevel@tonic-gate as = &kas; 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate /* 454*0Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 455*0Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 456*0Sstevel@tonic-gate */ 457*0Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 458*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 459*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 460*0Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 461*0Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 462*0Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 463*0Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 464*0Sstevel@tonic-gate } 465*0Sstevel@tonic-gate 466*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 467*0Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 468*0Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 469*0Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 470*0Sstevel@tonic-gate iomemp->addr = addr; 471*0Sstevel@tonic-gate iomemp->npages = npages; 472*0Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 473*0Sstevel@tonic-gate KM_SLEEP); 474*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 475*0Sstevel@tonic-gate /* 476*0Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 477*0Sstevel@tonic-gate * iommu. 478*0Sstevel@tonic-gate */ 479*0Sstevel@tonic-gate ASSERT(npages != 0); 480*0Sstevel@tonic-gate 481*0Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 482*0Sstevel@tonic-gate if (diag_tlb_flush) 483*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 484*0Sstevel@tonic-gate 485*0Sstevel@tonic-gate do { 486*0Sstevel@tonic-gate uint64_t iotte_flag = tmp_iotte_flag; 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate /* 489*0Sstevel@tonic-gate * Fetch the pfn for the DMA object 490*0Sstevel@tonic-gate */ 491*0Sstevel@tonic-gate 492*0Sstevel@tonic-gate ASSERT(as); 493*0Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 494*0Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate if (!pf_is_memory(pfn)) { 497*0Sstevel@tonic-gate /* DVMA'ing to IO space */ 498*0Sstevel@tonic-gate 499*0Sstevel@tonic-gate /* Turn off cache bit if set */ 500*0Sstevel@tonic-gate if (iotte_flag & IOTTE_CACHE) 501*0Sstevel@tonic-gate iotte_flag ^= IOTTE_CACHE; 502*0Sstevel@tonic-gate 503*0Sstevel@tonic-gate /* Turn off stream bit if set */ 504*0Sstevel@tonic-gate if (iotte_flag & IOTTE_STREAM) 505*0Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 506*0Sstevel@tonic-gate 507*0Sstevel@tonic-gate if (IS_INTRA_SBUS(softsp, pfn)) { 508*0Sstevel@tonic-gate /* Intra sbus transfer */ 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate /* Turn on intra flag */ 511*0Sstevel@tonic-gate iotte_flag |= IOTTE_INTRA; 512*0Sstevel@tonic-gate 513*0Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, ( 514*0Sstevel@tonic-gate "Intra xfer pfnum %x TTE %llx\n", 515*0Sstevel@tonic-gate pfn, iotte_flag)); 516*0Sstevel@tonic-gate } else { 517*0Sstevel@tonic-gate if (pf_is_dmacapable(pfn) == 1) { 518*0Sstevel@tonic-gate /*EMPTY*/ 519*0Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, 520*0Sstevel@tonic-gate ("Inter xfer pfnum %lx " 521*0Sstevel@tonic-gate "tte hi %llx\n", 522*0Sstevel@tonic-gate pfn, iotte_flag)); 523*0Sstevel@tonic-gate } else { 524*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 525*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 526*0Sstevel@tonic-gate goto bad; 527*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 528*0Sstevel@tonic-gate } 529*0Sstevel@tonic-gate } 530*0Sstevel@tonic-gate } 531*0Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 532*0Sstevel@tonic-gate 533*0Sstevel@tonic-gate DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %x pfn %lx " 534*0Sstevel@tonic-gate "tte flag %llx addr %p ioaddr %x\n", 535*0Sstevel@tonic-gate iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 536*0Sstevel@tonic-gate 537*0Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 538*0Sstevel@tonic-gate if (!diag_tlb_flush) 539*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 540*0Sstevel@tonic-gate 541*0Sstevel@tonic-gate /* Set the hardware IO TTE */ 542*0Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 543*0Sstevel@tonic-gate 544*0Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 545*0Sstevel@tonic-gate npages--; 546*0Sstevel@tonic-gate iotte_ptr++; 547*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 548*0Sstevel@tonic-gate *pfnp = pfn; 549*0Sstevel@tonic-gate pfnp++; 550*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 551*0Sstevel@tonic-gate } while (npages != 0); 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 554*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 555*0Sstevel@tonic-gate iomemp->next = softsp->iomem; 556*0Sstevel@tonic-gate softsp->iomem = iomemp; 557*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 558*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 559*0Sstevel@tonic-gate 560*0Sstevel@tonic-gate return (rval); 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 563*0Sstevel@tonic-gate bad: 564*0Sstevel@tonic-gate /* If we fail a mapping, free up any mapping resources used */ 565*0Sstevel@tonic-gate iommu_remove_mappings(mp); 566*0Sstevel@tonic-gate return (rval); 567*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 568*0Sstevel@tonic-gate } 569*0Sstevel@tonic-gate 570*0Sstevel@tonic-gate 571*0Sstevel@tonic-gate int 572*0Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist) 573*0Sstevel@tonic-gate { 574*0Sstevel@tonic-gate pfn_t pfn; 575*0Sstevel@tonic-gate pgcnt_t npages; 576*0Sstevel@tonic-gate ioaddr_t ioaddr; 577*0Sstevel@tonic-gate uint_t offset; 578*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 579*0Sstevel@tonic-gate uint64_t tmp_iotte_flag; 580*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 581*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 582*0Sstevel@tonic-gate int diag_tlb_flush; 583*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 584*0Sstevel@tonic-gate struct io_mem_list *iomemp; 585*0Sstevel@tonic-gate pfn_t *pfnp; 586*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 587*0Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 588*0Sstevel@tonic-gate 589*0Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 590*0Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 591*0Sstevel@tonic-gate 592*0Sstevel@tonic-gate ASSERT(softsp != NULL); 593*0Sstevel@tonic-gate 594*0Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 595*0Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 596*0Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 597*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 598*0Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate /* 601*0Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 602*0Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 603*0Sstevel@tonic-gate */ 604*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 605*0Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 606*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 607*0Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 608*0Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 609*0Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 610*0Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 611*0Sstevel@tonic-gate } 612*0Sstevel@tonic-gate 613*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 614*0Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 615*0Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 616*0Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 617*0Sstevel@tonic-gate iomemp->npages = npages; 618*0Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 619*0Sstevel@tonic-gate KM_SLEEP); 620*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 621*0Sstevel@tonic-gate /* 622*0Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 623*0Sstevel@tonic-gate * iommu. 624*0Sstevel@tonic-gate */ 625*0Sstevel@tonic-gate ASSERT(npages != 0); 626*0Sstevel@tonic-gate 627*0Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 628*0Sstevel@tonic-gate if (diag_tlb_flush) 629*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate do { 632*0Sstevel@tonic-gate uint64_t iotte_flag; 633*0Sstevel@tonic-gate 634*0Sstevel@tonic-gate iotte_flag = tmp_iotte_flag; 635*0Sstevel@tonic-gate 636*0Sstevel@tonic-gate if (pp != NULL) { 637*0Sstevel@tonic-gate pfn = pp->p_pagenum; 638*0Sstevel@tonic-gate pp = pp->p_next; 639*0Sstevel@tonic-gate } else { 640*0Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 641*0Sstevel@tonic-gate pplist++; 642*0Sstevel@tonic-gate } 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate DPRINTF(IOMMU_TTE, ("pp mapping TTE index %x pfn %lx " 645*0Sstevel@tonic-gate "tte flag %llx ioaddr %x\n", iotte_ptr, 646*0Sstevel@tonic-gate pfn, iotte_flag, ioaddr)); 647*0Sstevel@tonic-gate 648*0Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 649*0Sstevel@tonic-gate if (!diag_tlb_flush) 650*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate /* Set the hardware IO TTE */ 653*0Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 654*0Sstevel@tonic-gate 655*0Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 656*0Sstevel@tonic-gate npages--; 657*0Sstevel@tonic-gate iotte_ptr++; 658*0Sstevel@tonic-gate 659*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 660*0Sstevel@tonic-gate *pfnp = pfn; 661*0Sstevel@tonic-gate pfnp++; 662*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 663*0Sstevel@tonic-gate 664*0Sstevel@tonic-gate } while (npages != 0); 665*0Sstevel@tonic-gate 666*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 667*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 668*0Sstevel@tonic-gate iomemp->next = softsp->iomem; 669*0Sstevel@tonic-gate softsp->iomem = iomemp; 670*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 671*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 672*0Sstevel@tonic-gate 673*0Sstevel@tonic-gate return (rval); 674*0Sstevel@tonic-gate } 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate 677*0Sstevel@tonic-gate int 678*0Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip, 679*0Sstevel@tonic-gate struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64, 680*0Sstevel@tonic-gate uint_t *minxferp, uint_t dma_flags) 681*0Sstevel@tonic-gate { 682*0Sstevel@tonic-gate struct regspec *rp; 683*0Sstevel@tonic-gate 684*0Sstevel@tonic-gate /* Take care of 64 bit limits. */ 685*0Sstevel@tonic-gate if (!(dma_flags & DDI_DMA_SBUS_64BIT)) { 686*0Sstevel@tonic-gate /* 687*0Sstevel@tonic-gate * return burst size for 32-bit mode 688*0Sstevel@tonic-gate */ 689*0Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 690*0Sstevel@tonic-gate return (DDI_FAILURE); 691*0Sstevel@tonic-gate } 692*0Sstevel@tonic-gate 693*0Sstevel@tonic-gate /* 694*0Sstevel@tonic-gate * check if SBus supports 64 bit and if caller 695*0Sstevel@tonic-gate * is child of SBus. No support through bridges 696*0Sstevel@tonic-gate */ 697*0Sstevel@tonic-gate if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) { 698*0Sstevel@tonic-gate /* 699*0Sstevel@tonic-gate * SBus doesn't support it or bridge. Do 32-bit 700*0Sstevel@tonic-gate * xfers 701*0Sstevel@tonic-gate */ 702*0Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 703*0Sstevel@tonic-gate return (DDI_FAILURE); 704*0Sstevel@tonic-gate } 705*0Sstevel@tonic-gate 706*0Sstevel@tonic-gate rp = ddi_rnumber_to_regspec(rdip, 0); 707*0Sstevel@tonic-gate if (rp == NULL) { 708*0Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 709*0Sstevel@tonic-gate return (DDI_FAILURE); 710*0Sstevel@tonic-gate } 711*0Sstevel@tonic-gate 712*0Sstevel@tonic-gate /* Check for old-style 64 bit burstsizes */ 713*0Sstevel@tonic-gate if (burstsize64 & SYSIO64_BURST_MASK) { 714*0Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 715*0Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes | 716*0Sstevel@tonic-gate softsp->sbus_burst_sizes); 717*0Sstevel@tonic-gate } else { 718*0Sstevel@tonic-gate /* Get the 64 bit burstsizes. */ 719*0Sstevel@tonic-gate *burstsizep = burstsize64; 720*0Sstevel@tonic-gate 721*0Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 722*0Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes >> 723*0Sstevel@tonic-gate SYSIO64_BURST_SHIFT); 724*0Sstevel@tonic-gate } 725*0Sstevel@tonic-gate 726*0Sstevel@tonic-gate /* 727*0Sstevel@tonic-gate * Set the largest value of the smallest burstsize that the 728*0Sstevel@tonic-gate * device or the bus can manage. 729*0Sstevel@tonic-gate */ 730*0Sstevel@tonic-gate *minxferp = MAX(*minxferp, 731*0Sstevel@tonic-gate (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1))); 732*0Sstevel@tonic-gate 733*0Sstevel@tonic-gate return (DDI_SUCCESS); 734*0Sstevel@tonic-gate } 735*0Sstevel@tonic-gate 736*0Sstevel@tonic-gate 737*0Sstevel@tonic-gate int 738*0Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 739*0Sstevel@tonic-gate ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg, 740*0Sstevel@tonic-gate ddi_dma_handle_t *handlep) 741*0Sstevel@tonic-gate { 742*0Sstevel@tonic-gate ioaddr_t addrlow, addrhigh, segalign; 743*0Sstevel@tonic-gate ddi_dma_impl_t *mp; 744*0Sstevel@tonic-gate struct dma_impl_priv *mppriv; 745*0Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 746*0Sstevel@tonic-gate ddi_get_soft_state(sbusp, ddi_get_instance(dip)); 747*0Sstevel@tonic-gate 748*0Sstevel@tonic-gate /* 749*0Sstevel@tonic-gate * Setup dma burstsizes and min-xfer counts. 750*0Sstevel@tonic-gate */ 751*0Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, 752*0Sstevel@tonic-gate &dma_attr->dma_attr_burstsizes, 753*0Sstevel@tonic-gate dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer, 754*0Sstevel@tonic-gate dma_attr->dma_attr_flags); 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate if (dma_attr->dma_attr_burstsizes == 0) 757*0Sstevel@tonic-gate return (DDI_DMA_BADATTR); 758*0Sstevel@tonic-gate 759*0Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 760*0Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 761*0Sstevel@tonic-gate segalign = (ioaddr_t)dma_attr->dma_attr_seg; 762*0Sstevel@tonic-gate 763*0Sstevel@tonic-gate /* 764*0Sstevel@tonic-gate * Check sanity for hi and lo address limits 765*0Sstevel@tonic-gate */ 766*0Sstevel@tonic-gate if ((addrhigh <= addrlow) || 767*0Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 768*0Sstevel@tonic-gate return (DDI_DMA_BADATTR); 769*0Sstevel@tonic-gate } 770*0Sstevel@tonic-gate if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL) 771*0Sstevel@tonic-gate return (DDI_DMA_BADATTR); 772*0Sstevel@tonic-gate 773*0Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 774*0Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 775*0Sstevel@tonic-gate 776*0Sstevel@tonic-gate if (mppriv == NULL) { 777*0Sstevel@tonic-gate if (waitfp != DDI_DMA_DONTWAIT) { 778*0Sstevel@tonic-gate ddi_set_callback(waitfp, arg, &softsp->dvma_call_list_id); 779*0Sstevel@tonic-gate } 780*0Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 781*0Sstevel@tonic-gate } 782*0Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 783*0Sstevel@tonic-gate 784*0Sstevel@tonic-gate DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %x " 785*0Sstevel@tonic-gate "hi %x lo %x min %x burst %x\n", 786*0Sstevel@tonic-gate ddi_get_name(dip), mp, addrhigh, addrlow, 787*0Sstevel@tonic-gate dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes)); 788*0Sstevel@tonic-gate 789*0Sstevel@tonic-gate mp->dmai_rdip = rdip; 790*0Sstevel@tonic-gate mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer; 791*0Sstevel@tonic-gate mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes; 792*0Sstevel@tonic-gate mp->dmai_attr = *dma_attr; 793*0Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 794*0Sstevel@tonic-gate if (segalign == (ioaddr_t)UINT32_MAX && 795*0Sstevel@tonic-gate addrhigh == (ioaddr_t)UINT32_MAX && 796*0Sstevel@tonic-gate (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) { 797*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 798*0Sstevel@tonic-gate } 799*0Sstevel@tonic-gate mppriv->softsp = softsp; 800*0Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 801*0Sstevel@tonic-gate 802*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 803*0Sstevel@tonic-gate return (DDI_SUCCESS); 804*0Sstevel@tonic-gate } 805*0Sstevel@tonic-gate 806*0Sstevel@tonic-gate /*ARGSUSED*/ 807*0Sstevel@tonic-gate int 808*0Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 809*0Sstevel@tonic-gate { 810*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle; 811*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 812*0Sstevel@tonic-gate ASSERT(softsp != NULL); 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 815*0Sstevel@tonic-gate 816*0Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) { 817*0Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 818*0Sstevel@tonic-gate } 819*0Sstevel@tonic-gate return (DDI_SUCCESS); 820*0Sstevel@tonic-gate } 821*0Sstevel@tonic-gate 822*0Sstevel@tonic-gate static int 823*0Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr, 824*0Sstevel@tonic-gate uint32_t *size) 825*0Sstevel@tonic-gate { 826*0Sstevel@tonic-gate ioaddr_t addrlow; 827*0Sstevel@tonic-gate ioaddr_t addrhigh; 828*0Sstevel@tonic-gate uint32_t segalign; 829*0Sstevel@tonic-gate uint32_t smask; 830*0Sstevel@tonic-gate 831*0Sstevel@tonic-gate smask = *size - 1; 832*0Sstevel@tonic-gate segalign = dma_attr->dma_attr_seg; 833*0Sstevel@tonic-gate if (smask > segalign) { 834*0Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 835*0Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 836*0Sstevel@tonic-gate *size = segalign + 1; 837*0Sstevel@tonic-gate } 838*0Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 839*0Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 840*0Sstevel@tonic-gate if (addrlow + smask > addrhigh || addrlow + smask < addrlow) { 841*0Sstevel@tonic-gate if (!((addrlow + dmareq->dmar_object.dmao_size == 0) && 842*0Sstevel@tonic-gate (addrhigh == (ioaddr_t)-1))) { 843*0Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 844*0Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 845*0Sstevel@tonic-gate *size = MIN(addrhigh - addrlow + 1, *size); 846*0Sstevel@tonic-gate } 847*0Sstevel@tonic-gate } 848*0Sstevel@tonic-gate return (DDI_DMA_MAPOK); 849*0Sstevel@tonic-gate } 850*0Sstevel@tonic-gate 851*0Sstevel@tonic-gate int 852*0Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 853*0Sstevel@tonic-gate ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 854*0Sstevel@tonic-gate ddi_dma_cookie_t *cp, uint_t *ccountp) 855*0Sstevel@tonic-gate { 856*0Sstevel@tonic-gate page_t *pp; 857*0Sstevel@tonic-gate uint32_t size; 858*0Sstevel@tonic-gate ioaddr_t ioaddr; 859*0Sstevel@tonic-gate uint_t offset; 860*0Sstevel@tonic-gate uintptr_t addr = 0; 861*0Sstevel@tonic-gate pgcnt_t npages; 862*0Sstevel@tonic-gate int rval; 863*0Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 864*0Sstevel@tonic-gate struct sbus_soft_state *softsp; 865*0Sstevel@tonic-gate struct page **pplist = NULL; 866*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 867*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 868*0Sstevel@tonic-gate 869*0Sstevel@tonic-gate #ifdef lint 870*0Sstevel@tonic-gate dip = dip; 871*0Sstevel@tonic-gate rdip = rdip; 872*0Sstevel@tonic-gate #endif 873*0Sstevel@tonic-gate 874*0Sstevel@tonic-gate if (mp->dmai_inuse) 875*0Sstevel@tonic-gate return (DDI_DMA_INUSE); 876*0Sstevel@tonic-gate 877*0Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 878*0Sstevel@tonic-gate size = (uint32_t)dmareq->dmar_object.dmao_size; 879*0Sstevel@tonic-gate if (!(mp->dmai_rflags & DMP_NOLIMIT)) { 880*0Sstevel@tonic-gate rval = check_dma_attr(dmareq, dma_attr, &size); 881*0Sstevel@tonic-gate if (rval != DDI_DMA_MAPOK) 882*0Sstevel@tonic-gate return (rval); 883*0Sstevel@tonic-gate } 884*0Sstevel@tonic-gate mp->dmai_inuse = 1; 885*0Sstevel@tonic-gate mp->dmai_offset = 0; 886*0Sstevel@tonic-gate mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) | 887*0Sstevel@tonic-gate (mp->dmai_rflags & DMP_NOLIMIT); 888*0Sstevel@tonic-gate 889*0Sstevel@tonic-gate switch (dmareq->dmar_object.dmao_type) { 890*0Sstevel@tonic-gate case DMA_OTYP_VADDR: 891*0Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 892*0Sstevel@tonic-gate addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 893*0Sstevel@tonic-gate offset = addr & IOMMU_PAGEOFFSET; 894*0Sstevel@tonic-gate pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv; 895*0Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 896*0Sstevel@tonic-gate 897*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %x pages " 898*0Sstevel@tonic-gate "req addr %lx off %x OBJSIZE %x\n", 899*0Sstevel@tonic-gate npages, addr, offset, OBJSIZE)); 900*0Sstevel@tonic-gate 901*0Sstevel@tonic-gate /* We don't need the addr anymore if we have a shadow list */ 902*0Sstevel@tonic-gate if (pplist != NULL) 903*0Sstevel@tonic-gate addr = NULL; 904*0Sstevel@tonic-gate pp = NULL; 905*0Sstevel@tonic-gate break; 906*0Sstevel@tonic-gate 907*0Sstevel@tonic-gate case DMA_OTYP_PAGES: 908*0Sstevel@tonic-gate pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 909*0Sstevel@tonic-gate offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset; 910*0Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 911*0Sstevel@tonic-gate break; 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate case DMA_OTYP_PADDR: 914*0Sstevel@tonic-gate default: 915*0Sstevel@tonic-gate /* 916*0Sstevel@tonic-gate * Not a supported type for this implementation 917*0Sstevel@tonic-gate */ 918*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 919*0Sstevel@tonic-gate goto bad; 920*0Sstevel@tonic-gate } 921*0Sstevel@tonic-gate 922*0Sstevel@tonic-gate /* Get our soft state once we know we're mapping an object. */ 923*0Sstevel@tonic-gate softsp = mppriv->softsp; 924*0Sstevel@tonic-gate ASSERT(softsp != NULL); 925*0Sstevel@tonic-gate 926*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 927*0Sstevel@tonic-gate if (size != OBJSIZE) { 928*0Sstevel@tonic-gate /* 929*0Sstevel@tonic-gate * If the request is for partial mapping arrangement, 930*0Sstevel@tonic-gate * the device has to be able to address at least the 931*0Sstevel@tonic-gate * size of the window we are establishing. 932*0Sstevel@tonic-gate */ 933*0Sstevel@tonic-gate if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) { 934*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 935*0Sstevel@tonic-gate goto bad; 936*0Sstevel@tonic-gate } 937*0Sstevel@tonic-gate npages = iommu_btopr(size + offset); 938*0Sstevel@tonic-gate } 939*0Sstevel@tonic-gate /* 940*0Sstevel@tonic-gate * If the size requested is less than a moderate amt, 941*0Sstevel@tonic-gate * skip the partial mapping stuff- it's not worth the 942*0Sstevel@tonic-gate * effort. 943*0Sstevel@tonic-gate */ 944*0Sstevel@tonic-gate if (npages > MIN_DVMA_WIN_SIZE) { 945*0Sstevel@tonic-gate npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset); 946*0Sstevel@tonic-gate size = iommu_ptob(MIN_DVMA_WIN_SIZE); 947*0Sstevel@tonic-gate DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg " 948*0Sstevel@tonic-gate "%x sz %lx\n", OBJSIZE, npages, size)); 949*0Sstevel@tonic-gate if (pplist != NULL) { 950*0Sstevel@tonic-gate mp->dmai_minfo = (void *)pplist; 951*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_SHADOW; 952*0Sstevel@tonic-gate } 953*0Sstevel@tonic-gate } else { 954*0Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 955*0Sstevel@tonic-gate } 956*0Sstevel@tonic-gate } else { 957*0Sstevel@tonic-gate if (npages >= iommu_btop(softsp->iommu_dvma_size) - 958*0Sstevel@tonic-gate MIN_DVMA_WIN_SIZE) { 959*0Sstevel@tonic-gate rval = DDI_DMA_TOOBIG; 960*0Sstevel@tonic-gate goto bad; 961*0Sstevel@tonic-gate } 962*0Sstevel@tonic-gate } 963*0Sstevel@tonic-gate 964*0Sstevel@tonic-gate /* 965*0Sstevel@tonic-gate * save dmareq-object, size and npages into mp 966*0Sstevel@tonic-gate */ 967*0Sstevel@tonic-gate mp->dmai_object = dmareq->dmar_object; 968*0Sstevel@tonic-gate mp->dmai_size = size; 969*0Sstevel@tonic-gate mp->dmai_ndvmapages = npages; 970*0Sstevel@tonic-gate 971*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 972*0Sstevel@tonic-gate ioaddr = (ioaddr_t)vmem_alloc(softsp->dvma_arena, 973*0Sstevel@tonic-gate iommu_ptob(npages), 974*0Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 975*0Sstevel@tonic-gate if (ioaddr == 0) { 976*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 977*0Sstevel@tonic-gate goto bad; 978*0Sstevel@tonic-gate } 979*0Sstevel@tonic-gate 980*0Sstevel@tonic-gate /* 981*0Sstevel@tonic-gate * If we have a 1 page request and we're working with a page 982*0Sstevel@tonic-gate * list, we're going to speed load an IOMMU entry. 983*0Sstevel@tonic-gate */ 984*0Sstevel@tonic-gate if (npages == 1 && !addr) { 985*0Sstevel@tonic-gate uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE | 986*0Sstevel@tonic-gate IOTTE_WRITE | IOTTE_STREAM; 987*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 988*0Sstevel@tonic-gate pfn_t pfn; 989*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 990*0Sstevel@tonic-gate struct io_mem_list *iomemp; 991*0Sstevel@tonic-gate pfn_t *pfnp; 992*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 993*0Sstevel@tonic-gate 994*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, 995*0Sstevel@tonic-gate softsp->soft_tsb_base_addr); 996*0Sstevel@tonic-gate 997*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 998*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 999*0Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 1000*0Sstevel@tonic-gate } else if (softsp->stream_buf_off) 1001*0Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 1002*0Sstevel@tonic-gate 1003*0Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 1004*0Sstevel@tonic-gate 1005*0Sstevel@tonic-gate if (pp != NULL) 1006*0Sstevel@tonic-gate pfn = pp->p_pagenum; 1007*0Sstevel@tonic-gate else 1008*0Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 1009*0Sstevel@tonic-gate 1010*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 1011*0Sstevel@tonic-gate 1012*0Sstevel@tonic-gate *iotte_ptr = 1013*0Sstevel@tonic-gate ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 1014*0Sstevel@tonic-gate 1015*0Sstevel@tonic-gate mp->dmai_mapping = (ioaddr_t)(ioaddr + offset); 1016*0Sstevel@tonic-gate mp->dmai_nwin = 0; 1017*0Sstevel@tonic-gate if (cp != NULL) { 1018*0Sstevel@tonic-gate cp->dmac_notused = 0; 1019*0Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 1020*0Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 1021*0Sstevel@tonic-gate cp->dmac_type = 0; 1022*0Sstevel@tonic-gate *ccountp = 1; 1023*0Sstevel@tonic-gate } 1024*0Sstevel@tonic-gate 1025*0Sstevel@tonic-gate DPRINTF(IOMMU_TTE, ("speed loading: TTE index %x " 1026*0Sstevel@tonic-gate "pfn %lx tte flag %llx addr %lx ioaddr %x\n", 1027*0Sstevel@tonic-gate iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 1028*0Sstevel@tonic-gate 1029*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1030*0Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), 1031*0Sstevel@tonic-gate KM_SLEEP); 1032*0Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 1033*0Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 1034*0Sstevel@tonic-gate iomemp->addr = addr; 1035*0Sstevel@tonic-gate iomemp->npages = npages; 1036*0Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * 1037*0Sstevel@tonic-gate (npages + 1), KM_SLEEP); 1038*0Sstevel@tonic-gate *pfnp = pfn; 1039*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 1040*0Sstevel@tonic-gate iomemp->next = softsp->iomem; 1041*0Sstevel@tonic-gate softsp->iomem = iomemp; 1042*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 1043*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1044*0Sstevel@tonic-gate 1045*0Sstevel@tonic-gate return (DDI_DMA_MAPPED); 1046*0Sstevel@tonic-gate } 1047*0Sstevel@tonic-gate } else { 1048*0Sstevel@tonic-gate ioaddr = (ioaddr_t)vmem_xalloc(softsp->dvma_arena, 1049*0Sstevel@tonic-gate iommu_ptob(npages), 1050*0Sstevel@tonic-gate MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0, 1051*0Sstevel@tonic-gate (uint_t)dma_attr->dma_attr_seg + 1, 1052*0Sstevel@tonic-gate (void *)(ioaddr_t)dma_attr->dma_attr_addr_lo, 1053*0Sstevel@tonic-gate (void *)((ioaddr_t)dma_attr->dma_attr_addr_hi + 1), 1054*0Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 1055*0Sstevel@tonic-gate } 1056*0Sstevel@tonic-gate 1057*0Sstevel@tonic-gate if (ioaddr == 0) { 1058*0Sstevel@tonic-gate if (dmareq->dmar_fp == DDI_DMA_SLEEP) 1059*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 1060*0Sstevel@tonic-gate else 1061*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 1062*0Sstevel@tonic-gate goto bad; 1063*0Sstevel@tonic-gate } 1064*0Sstevel@tonic-gate 1065*0Sstevel@tonic-gate mp->dmai_mapping = ioaddr + offset; 1066*0Sstevel@tonic-gate ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base); 1067*0Sstevel@tonic-gate 1068*0Sstevel@tonic-gate /* 1069*0Sstevel@tonic-gate * At this point we have a range of virtual address allocated 1070*0Sstevel@tonic-gate * with which we now have to map to the requested object. 1071*0Sstevel@tonic-gate */ 1072*0Sstevel@tonic-gate if (addr) { 1073*0Sstevel@tonic-gate rval = iommu_create_vaddr_mappings(mp, 1074*0Sstevel@tonic-gate addr & ~IOMMU_PAGEOFFSET); 1075*0Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 1076*0Sstevel@tonic-gate goto bad_nomap; 1077*0Sstevel@tonic-gate } else { 1078*0Sstevel@tonic-gate rval = iommu_create_pp_mappings(mp, pp, pplist); 1079*0Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 1080*0Sstevel@tonic-gate goto bad_nomap; 1081*0Sstevel@tonic-gate } 1082*0Sstevel@tonic-gate 1083*0Sstevel@tonic-gate if (cp) { 1084*0Sstevel@tonic-gate cp->dmac_notused = 0; 1085*0Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 1086*0Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 1087*0Sstevel@tonic-gate cp->dmac_type = 0; 1088*0Sstevel@tonic-gate *ccountp = 1; 1089*0Sstevel@tonic-gate } 1090*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 1091*0Sstevel@tonic-gate size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 1092*0Sstevel@tonic-gate mp->dmai_nwin = 1093*0Sstevel@tonic-gate (dmareq->dmar_object.dmao_size + (size - 1)) / size; 1094*0Sstevel@tonic-gate return (DDI_DMA_PARTIAL_MAP); 1095*0Sstevel@tonic-gate } else { 1096*0Sstevel@tonic-gate mp->dmai_nwin = 0; 1097*0Sstevel@tonic-gate return (DDI_DMA_MAPPED); 1098*0Sstevel@tonic-gate } 1099*0Sstevel@tonic-gate 1100*0Sstevel@tonic-gate bad_nomap: 1101*0Sstevel@tonic-gate /* 1102*0Sstevel@tonic-gate * Could not create mmu mappings. 1103*0Sstevel@tonic-gate */ 1104*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 1105*0Sstevel@tonic-gate vmem_free(softsp->dvma_arena, (void *)ioaddr, 1106*0Sstevel@tonic-gate iommu_ptob(npages)); 1107*0Sstevel@tonic-gate } else { 1108*0Sstevel@tonic-gate vmem_xfree(softsp->dvma_arena, (void *)ioaddr, 1109*0Sstevel@tonic-gate iommu_ptob(npages)); 1110*0Sstevel@tonic-gate } 1111*0Sstevel@tonic-gate 1112*0Sstevel@tonic-gate bad: 1113*0Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 1114*0Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 1115*0Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 1116*0Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 1117*0Sstevel@tonic-gate } 1118*0Sstevel@tonic-gate mp->dmai_inuse = 0; 1119*0Sstevel@tonic-gate return (rval); 1120*0Sstevel@tonic-gate } 1121*0Sstevel@tonic-gate 1122*0Sstevel@tonic-gate /* ARGSUSED */ 1123*0Sstevel@tonic-gate int 1124*0Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1125*0Sstevel@tonic-gate ddi_dma_handle_t handle) 1126*0Sstevel@tonic-gate { 1127*0Sstevel@tonic-gate ioaddr_t addr; 1128*0Sstevel@tonic-gate uint_t npages; 1129*0Sstevel@tonic-gate size_t size; 1130*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1131*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 1132*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 1133*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1134*0Sstevel@tonic-gate 1135*0Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 1136*0Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 1137*0Sstevel@tonic-gate size = iommu_ptob(npages); 1138*0Sstevel@tonic-gate 1139*0Sstevel@tonic-gate DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: " 1140*0Sstevel@tonic-gate "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages)); 1141*0Sstevel@tonic-gate 1142*0Sstevel@tonic-gate /* sync the entire object */ 1143*0Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 1144*0Sstevel@tonic-gate /* flush stream write buffers */ 1145*0Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag, 1146*0Sstevel@tonic-gate mppriv->phys_sync_flag); 1147*0Sstevel@tonic-gate } 1148*0Sstevel@tonic-gate 1149*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 1150*0Sstevel@tonic-gate /* 1151*0Sstevel@tonic-gate * 'Free' the dma mappings. 1152*0Sstevel@tonic-gate */ 1153*0Sstevel@tonic-gate iommu_remove_mappings(mp); 1154*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 1155*0Sstevel@tonic-gate 1156*0Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 1157*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1158*0Sstevel@tonic-gate vmem_free(softsp->dvma_arena, (void *)addr, size); 1159*0Sstevel@tonic-gate else 1160*0Sstevel@tonic-gate vmem_xfree(softsp->dvma_arena, (void *)addr, size); 1161*0Sstevel@tonic-gate 1162*0Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 1163*0Sstevel@tonic-gate mp->dmai_inuse = 0; 1164*0Sstevel@tonic-gate mp->dmai_minfo = NULL; 1165*0Sstevel@tonic-gate 1166*0Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 1167*0Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 1168*0Sstevel@tonic-gate 1169*0Sstevel@tonic-gate return (DDI_SUCCESS); 1170*0Sstevel@tonic-gate } 1171*0Sstevel@tonic-gate 1172*0Sstevel@tonic-gate /*ARGSUSED*/ 1173*0Sstevel@tonic-gate int 1174*0Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1175*0Sstevel@tonic-gate ddi_dma_handle_t handle, off_t off, size_t len, 1176*0Sstevel@tonic-gate uint_t cache_flags) 1177*0Sstevel@tonic-gate { 1178*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1179*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 1180*0Sstevel@tonic-gate 1181*0Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 1182*0Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 1183*0Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 1184*0Sstevel@tonic-gate mppriv->phys_sync_flag); 1185*0Sstevel@tonic-gate } 1186*0Sstevel@tonic-gate return (DDI_SUCCESS); 1187*0Sstevel@tonic-gate } 1188*0Sstevel@tonic-gate 1189*0Sstevel@tonic-gate /*ARGSUSED*/ 1190*0Sstevel@tonic-gate int 1191*0Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 1192*0Sstevel@tonic-gate ddi_dma_handle_t handle, uint_t win, off_t *offp, 1193*0Sstevel@tonic-gate size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1194*0Sstevel@tonic-gate { 1195*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1196*0Sstevel@tonic-gate off_t offset; 1197*0Sstevel@tonic-gate uint_t winsize; 1198*0Sstevel@tonic-gate uint_t newoff; 1199*0Sstevel@tonic-gate int rval; 1200*0Sstevel@tonic-gate 1201*0Sstevel@tonic-gate offset = mp->dmai_mapping & IOMMU_PAGEOFFSET; 1202*0Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 1203*0Sstevel@tonic-gate 1204*0Sstevel@tonic-gate DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win, 1205*0Sstevel@tonic-gate winsize)); 1206*0Sstevel@tonic-gate 1207*0Sstevel@tonic-gate /* 1208*0Sstevel@tonic-gate * win is in the range [0 .. dmai_nwin-1] 1209*0Sstevel@tonic-gate */ 1210*0Sstevel@tonic-gate if (win >= mp->dmai_nwin) 1211*0Sstevel@tonic-gate return (DDI_FAILURE); 1212*0Sstevel@tonic-gate 1213*0Sstevel@tonic-gate newoff = win * winsize; 1214*0Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) 1215*0Sstevel@tonic-gate return (DDI_FAILURE); 1216*0Sstevel@tonic-gate 1217*0Sstevel@tonic-gate ASSERT(cookiep); 1218*0Sstevel@tonic-gate cookiep->dmac_notused = 0; 1219*0Sstevel@tonic-gate cookiep->dmac_type = 0; 1220*0Sstevel@tonic-gate cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping; 1221*0Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 1222*0Sstevel@tonic-gate *ccountp = 1; 1223*0Sstevel@tonic-gate *offp = (off_t)newoff; 1224*0Sstevel@tonic-gate *lenp = (uint_t)winsize; 1225*0Sstevel@tonic-gate 1226*0Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 1227*0Sstevel@tonic-gate /* 1228*0Sstevel@tonic-gate * Nothing to do... 1229*0Sstevel@tonic-gate */ 1230*0Sstevel@tonic-gate return (DDI_SUCCESS); 1231*0Sstevel@tonic-gate } 1232*0Sstevel@tonic-gate 1233*0Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS) 1234*0Sstevel@tonic-gate return (rval); 1235*0Sstevel@tonic-gate 1236*0Sstevel@tonic-gate /* 1237*0Sstevel@tonic-gate * Set this again in case iommu_map_window() has changed it 1238*0Sstevel@tonic-gate */ 1239*0Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 1240*0Sstevel@tonic-gate 1241*0Sstevel@tonic-gate return (DDI_SUCCESS); 1242*0Sstevel@tonic-gate } 1243*0Sstevel@tonic-gate 1244*0Sstevel@tonic-gate static int 1245*0Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize) 1246*0Sstevel@tonic-gate { 1247*0Sstevel@tonic-gate uintptr_t addr = 0; 1248*0Sstevel@tonic-gate page_t *pp; 1249*0Sstevel@tonic-gate uint_t flags; 1250*0Sstevel@tonic-gate struct page **pplist = NULL; 1251*0Sstevel@tonic-gate 1252*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 1253*0Sstevel@tonic-gate /* Free mappings for current window */ 1254*0Sstevel@tonic-gate iommu_remove_mappings(mp); 1255*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 1256*0Sstevel@tonic-gate 1257*0Sstevel@tonic-gate mp->dmai_offset = newoff; 1258*0Sstevel@tonic-gate mp->dmai_size = mp->dmai_object.dmao_size - newoff; 1259*0Sstevel@tonic-gate mp->dmai_size = MIN(mp->dmai_size, winsize); 1260*0Sstevel@tonic-gate 1261*0Sstevel@tonic-gate if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR || 1262*0Sstevel@tonic-gate mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) { 1263*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_SHADOW) { 1264*0Sstevel@tonic-gate pplist = (struct page **)mp->dmai_minfo; 1265*0Sstevel@tonic-gate ASSERT(pplist != NULL); 1266*0Sstevel@tonic-gate pplist = pplist + (newoff >> MMU_PAGESHIFT); 1267*0Sstevel@tonic-gate } else { 1268*0Sstevel@tonic-gate addr = (uintptr_t) 1269*0Sstevel@tonic-gate mp->dmai_object.dmao_obj.virt_obj.v_addr; 1270*0Sstevel@tonic-gate addr = (addr + newoff) & ~IOMMU_PAGEOFFSET; 1271*0Sstevel@tonic-gate } 1272*0Sstevel@tonic-gate pp = NULL; 1273*0Sstevel@tonic-gate } else { 1274*0Sstevel@tonic-gate pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp; 1275*0Sstevel@tonic-gate flags = 0; 1276*0Sstevel@tonic-gate while (flags < newoff) { 1277*0Sstevel@tonic-gate pp = pp->p_next; 1278*0Sstevel@tonic-gate flags += MMU_PAGESIZE; 1279*0Sstevel@tonic-gate } 1280*0Sstevel@tonic-gate } 1281*0Sstevel@tonic-gate 1282*0Sstevel@tonic-gate /* Set up mappings for next window */ 1283*0Sstevel@tonic-gate if (addr) { 1284*0Sstevel@tonic-gate if (iommu_create_vaddr_mappings(mp, addr) < 0) 1285*0Sstevel@tonic-gate return (DDI_FAILURE); 1286*0Sstevel@tonic-gate } else { 1287*0Sstevel@tonic-gate if (iommu_create_pp_mappings(mp, pp, pplist) < 0) 1288*0Sstevel@tonic-gate return (DDI_FAILURE); 1289*0Sstevel@tonic-gate } 1290*0Sstevel@tonic-gate 1291*0Sstevel@tonic-gate /* 1292*0Sstevel@tonic-gate * also invalidate read stream buffer 1293*0Sstevel@tonic-gate */ 1294*0Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 1295*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 1296*0Sstevel@tonic-gate 1297*0Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 1298*0Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 1299*0Sstevel@tonic-gate mppriv->phys_sync_flag); 1300*0Sstevel@tonic-gate } 1301*0Sstevel@tonic-gate 1302*0Sstevel@tonic-gate return (DDI_SUCCESS); 1303*0Sstevel@tonic-gate 1304*0Sstevel@tonic-gate } 1305*0Sstevel@tonic-gate 1306*0Sstevel@tonic-gate int 1307*0Sstevel@tonic-gate iommu_dma_map(dev_info_t *dip, dev_info_t *rdip, 1308*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 1309*0Sstevel@tonic-gate { 1310*0Sstevel@tonic-gate ddi_dma_lim_t *dma_lim = dmareq->dmar_limits; 1311*0Sstevel@tonic-gate ddi_dma_impl_t *mp; 1312*0Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 1313*0Sstevel@tonic-gate struct dma_impl_priv *mppriv; 1314*0Sstevel@tonic-gate ioaddr_t addrlow, addrhigh; 1315*0Sstevel@tonic-gate ioaddr_t segalign; 1316*0Sstevel@tonic-gate int rval; 1317*0Sstevel@tonic-gate struct sbus_soft_state *softsp = 1318*0Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 1319*0Sstevel@tonic-gate ddi_get_instance(dip)); 1320*0Sstevel@tonic-gate 1321*0Sstevel@tonic-gate addrlow = dma_lim->dlim_addr_lo; 1322*0Sstevel@tonic-gate addrhigh = dma_lim->dlim_addr_hi; 1323*0Sstevel@tonic-gate if ((addrhigh <= addrlow) || 1324*0Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 1325*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1326*0Sstevel@tonic-gate } 1327*0Sstevel@tonic-gate 1328*0Sstevel@tonic-gate /* 1329*0Sstevel@tonic-gate * Setup DMA burstsizes and min-xfer counts. 1330*0Sstevel@tonic-gate */ 1331*0Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes, 1332*0Sstevel@tonic-gate (uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer, 1333*0Sstevel@tonic-gate dmareq->dmar_flags); 1334*0Sstevel@tonic-gate 1335*0Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) 1336*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1337*0Sstevel@tonic-gate /* 1338*0Sstevel@tonic-gate * If not an advisory call, get a DMA handle 1339*0Sstevel@tonic-gate */ 1340*0Sstevel@tonic-gate if (!handlep) { 1341*0Sstevel@tonic-gate return (DDI_DMA_MAPOK); 1342*0Sstevel@tonic-gate } 1343*0Sstevel@tonic-gate 1344*0Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 1345*0Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 1346*0Sstevel@tonic-gate if (mppriv == NULL) { 1347*0Sstevel@tonic-gate if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 1348*0Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 1349*0Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 1350*0Sstevel@tonic-gate } 1351*0Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 1352*0Sstevel@tonic-gate } 1353*0Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 1354*0Sstevel@tonic-gate mp->dmai_rdip = rdip; 1355*0Sstevel@tonic-gate mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1356*0Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 1357*0Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 1358*0Sstevel@tonic-gate mp->dmai_offset = 0; 1359*0Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 1360*0Sstevel@tonic-gate mp->dmai_minfo = 0; 1361*0Sstevel@tonic-gate mp->dmai_inuse = 0; 1362*0Sstevel@tonic-gate segalign = dma_lim->dlim_cntr_max; 1363*0Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 1364*0Sstevel@tonic-gate if (segalign == UINT32_MAX && addrhigh == UINT32_MAX && 1365*0Sstevel@tonic-gate addrlow == 0) { 1366*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 1367*0Sstevel@tonic-gate } 1368*0Sstevel@tonic-gate mppriv->softsp = softsp; 1369*0Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 1370*0Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 1371*0Sstevel@tonic-gate dma_attr->dma_attr_align = 1; 1372*0Sstevel@tonic-gate dma_attr->dma_attr_addr_lo = addrlow; 1373*0Sstevel@tonic-gate dma_attr->dma_attr_addr_hi = addrhigh; 1374*0Sstevel@tonic-gate dma_attr->dma_attr_seg = segalign; 1375*0Sstevel@tonic-gate dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes; 1376*0Sstevel@tonic-gate rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp, 1377*0Sstevel@tonic-gate dmareq, NULL, NULL); 1378*0Sstevel@tonic-gate if (rval && (rval != DDI_DMA_PARTIAL_MAP)) { 1379*0Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 1380*0Sstevel@tonic-gate } else { 1381*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 1382*0Sstevel@tonic-gate } 1383*0Sstevel@tonic-gate return (rval); 1384*0Sstevel@tonic-gate } 1385*0Sstevel@tonic-gate 1386*0Sstevel@tonic-gate /*ARGSUSED*/ 1387*0Sstevel@tonic-gate int 1388*0Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 1389*0Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 1390*0Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags) 1391*0Sstevel@tonic-gate { 1392*0Sstevel@tonic-gate ioaddr_t addr; 1393*0Sstevel@tonic-gate uint_t offset; 1394*0Sstevel@tonic-gate pgcnt_t npages; 1395*0Sstevel@tonic-gate size_t size; 1396*0Sstevel@tonic-gate ddi_dma_cookie_t *cp; 1397*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1398*0Sstevel@tonic-gate 1399*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", mp)); 1400*0Sstevel@tonic-gate switch (request) { 1401*0Sstevel@tonic-gate case DDI_DMA_FREE: 1402*0Sstevel@tonic-gate { 1403*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 1404*0Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 1405*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1406*0Sstevel@tonic-gate 1407*0Sstevel@tonic-gate /* 1408*0Sstevel@tonic-gate * 'Free' the dma mappings. 1409*0Sstevel@tonic-gate */ 1410*0Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 1411*0Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 1412*0Sstevel@tonic-gate size = iommu_ptob(npages); 1413*0Sstevel@tonic-gate 1414*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:" 1415*0Sstevel@tonic-gate "freeing vaddr %x for %x pages.\n", addr, 1416*0Sstevel@tonic-gate mp->dmai_ndvmapages)); 1417*0Sstevel@tonic-gate /* sync the entire object */ 1418*0Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 1419*0Sstevel@tonic-gate /* flush stream write buffers */ 1420*0Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, 1421*0Sstevel@tonic-gate (int *)&mppriv->sync_flag, mppriv->phys_sync_flag); 1422*0Sstevel@tonic-gate } 1423*0Sstevel@tonic-gate 1424*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 1425*0Sstevel@tonic-gate iommu_remove_mappings(mp); 1426*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 1427*0Sstevel@tonic-gate 1428*0Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 1429*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1430*0Sstevel@tonic-gate vmem_free(softsp->dvma_arena, (void *)addr, size); 1431*0Sstevel@tonic-gate else 1432*0Sstevel@tonic-gate vmem_xfree(softsp->dvma_arena, (void *)addr, size); 1433*0Sstevel@tonic-gate 1434*0Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 1435*0Sstevel@tonic-gate 1436*0Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 1437*0Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 1438*0Sstevel@tonic-gate 1439*0Sstevel@tonic-gate break; 1440*0Sstevel@tonic-gate } 1441*0Sstevel@tonic-gate 1442*0Sstevel@tonic-gate case DDI_DMA_SET_SBUS64: 1443*0Sstevel@tonic-gate { 1444*0Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 1445*0Sstevel@tonic-gate 1446*0Sstevel@tonic-gate return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp, 1447*0Sstevel@tonic-gate &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer, 1448*0Sstevel@tonic-gate DDI_DMA_SBUS_64BIT)); 1449*0Sstevel@tonic-gate } 1450*0Sstevel@tonic-gate 1451*0Sstevel@tonic-gate case DDI_DMA_HTOC: 1452*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx " 1453*0Sstevel@tonic-gate "size %lx\n", *offp, mp->dmai_mapping, 1454*0Sstevel@tonic-gate mp->dmai_size)); 1455*0Sstevel@tonic-gate 1456*0Sstevel@tonic-gate if ((uint_t)(*offp) >= mp->dmai_size) 1457*0Sstevel@tonic-gate return (DDI_FAILURE); 1458*0Sstevel@tonic-gate 1459*0Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 1460*0Sstevel@tonic-gate cp->dmac_notused = 0; 1461*0Sstevel@tonic-gate cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp)); 1462*0Sstevel@tonic-gate cp->dmac_size = 1463*0Sstevel@tonic-gate mp->dmai_mapping + mp->dmai_size - cp->dmac_address; 1464*0Sstevel@tonic-gate cp->dmac_type = 0; 1465*0Sstevel@tonic-gate 1466*0Sstevel@tonic-gate break; 1467*0Sstevel@tonic-gate 1468*0Sstevel@tonic-gate case DDI_DMA_KVADDR: 1469*0Sstevel@tonic-gate /* 1470*0Sstevel@tonic-gate * If a physical address mapping has percolated this high, 1471*0Sstevel@tonic-gate * that is an error (maybe?). 1472*0Sstevel@tonic-gate */ 1473*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_PHYSADDR) { 1474*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys " 1475*0Sstevel@tonic-gate "mapping\n")); 1476*0Sstevel@tonic-gate return (DDI_FAILURE); 1477*0Sstevel@tonic-gate } 1478*0Sstevel@tonic-gate 1479*0Sstevel@tonic-gate return (DDI_FAILURE); 1480*0Sstevel@tonic-gate 1481*0Sstevel@tonic-gate case DDI_DMA_NEXTWIN: 1482*0Sstevel@tonic-gate { 1483*0Sstevel@tonic-gate ddi_dma_win_t *owin, *nwin; 1484*0Sstevel@tonic-gate uint_t winsize, newoff; 1485*0Sstevel@tonic-gate int rval; 1486*0Sstevel@tonic-gate 1487*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n")); 1488*0Sstevel@tonic-gate 1489*0Sstevel@tonic-gate mp = (ddi_dma_impl_t *)handle; 1490*0Sstevel@tonic-gate owin = (ddi_dma_win_t *)offp; 1491*0Sstevel@tonic-gate nwin = (ddi_dma_win_t *)objp; 1492*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 1493*0Sstevel@tonic-gate if (*owin == NULL) { 1494*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, 1495*0Sstevel@tonic-gate ("nextwin: win == NULL\n")); 1496*0Sstevel@tonic-gate mp->dmai_offset = 0; 1497*0Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 1498*0Sstevel@tonic-gate return (DDI_SUCCESS); 1499*0Sstevel@tonic-gate } 1500*0Sstevel@tonic-gate 1501*0Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 1502*0Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - 1503*0Sstevel@tonic-gate iommu_btopr(offset)); 1504*0Sstevel@tonic-gate 1505*0Sstevel@tonic-gate newoff = (uint_t)(mp->dmai_offset + winsize); 1506*0Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - 1507*0Sstevel@tonic-gate mp->dmai_minxfer) 1508*0Sstevel@tonic-gate return (DDI_DMA_DONE); 1509*0Sstevel@tonic-gate 1510*0Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) 1511*0Sstevel@tonic-gate != DDI_SUCCESS) 1512*0Sstevel@tonic-gate return (rval); 1513*0Sstevel@tonic-gate } else { 1514*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no " 1515*0Sstevel@tonic-gate "partial mapping\n")); 1516*0Sstevel@tonic-gate if (*owin != NULL) 1517*0Sstevel@tonic-gate return (DDI_DMA_DONE); 1518*0Sstevel@tonic-gate mp->dmai_offset = 0; 1519*0Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 1520*0Sstevel@tonic-gate } 1521*0Sstevel@tonic-gate break; 1522*0Sstevel@tonic-gate } 1523*0Sstevel@tonic-gate 1524*0Sstevel@tonic-gate case DDI_DMA_NEXTSEG: 1525*0Sstevel@tonic-gate { 1526*0Sstevel@tonic-gate ddi_dma_seg_t *oseg, *nseg; 1527*0Sstevel@tonic-gate 1528*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n")); 1529*0Sstevel@tonic-gate 1530*0Sstevel@tonic-gate oseg = (ddi_dma_seg_t *)lenp; 1531*0Sstevel@tonic-gate if (*oseg != NULL) 1532*0Sstevel@tonic-gate return (DDI_DMA_DONE); 1533*0Sstevel@tonic-gate nseg = (ddi_dma_seg_t *)objp; 1534*0Sstevel@tonic-gate *nseg = *((ddi_dma_seg_t *)offp); 1535*0Sstevel@tonic-gate break; 1536*0Sstevel@tonic-gate } 1537*0Sstevel@tonic-gate 1538*0Sstevel@tonic-gate case DDI_DMA_SEGTOC: 1539*0Sstevel@tonic-gate { 1540*0Sstevel@tonic-gate ddi_dma_seg_impl_t *seg; 1541*0Sstevel@tonic-gate 1542*0Sstevel@tonic-gate seg = (ddi_dma_seg_impl_t *)handle; 1543*0Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 1544*0Sstevel@tonic-gate cp->dmac_notused = 0; 1545*0Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)seg->dmai_mapping; 1546*0Sstevel@tonic-gate cp->dmac_size = *lenp = seg->dmai_size; 1547*0Sstevel@tonic-gate cp->dmac_type = 0; 1548*0Sstevel@tonic-gate *offp = seg->dmai_offset; 1549*0Sstevel@tonic-gate break; 1550*0Sstevel@tonic-gate } 1551*0Sstevel@tonic-gate 1552*0Sstevel@tonic-gate case DDI_DMA_MOVWIN: 1553*0Sstevel@tonic-gate { 1554*0Sstevel@tonic-gate uint_t winsize; 1555*0Sstevel@tonic-gate uint_t newoff; 1556*0Sstevel@tonic-gate int rval; 1557*0Sstevel@tonic-gate 1558*0Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 1559*0Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 1560*0Sstevel@tonic-gate 1561*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %x " 1562*0Sstevel@tonic-gate "winsize %x\n", *offp, *lenp, winsize)); 1563*0Sstevel@tonic-gate 1564*0Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) 1565*0Sstevel@tonic-gate return (DDI_FAILURE); 1566*0Sstevel@tonic-gate 1567*0Sstevel@tonic-gate if (*lenp != (uint_t)-1 && *lenp != winsize) { 1568*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n")); 1569*0Sstevel@tonic-gate return (DDI_FAILURE); 1570*0Sstevel@tonic-gate } 1571*0Sstevel@tonic-gate newoff = (uint_t)*offp; 1572*0Sstevel@tonic-gate if (newoff & (winsize - 1)) { 1573*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n")); 1574*0Sstevel@tonic-gate return (DDI_FAILURE); 1575*0Sstevel@tonic-gate } 1576*0Sstevel@tonic-gate 1577*0Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 1578*0Sstevel@tonic-gate /* 1579*0Sstevel@tonic-gate * Nothing to do... 1580*0Sstevel@tonic-gate */ 1581*0Sstevel@tonic-gate break; 1582*0Sstevel@tonic-gate } 1583*0Sstevel@tonic-gate 1584*0Sstevel@tonic-gate /* 1585*0Sstevel@tonic-gate * Check out new address... 1586*0Sstevel@tonic-gate */ 1587*0Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) { 1588*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of " 1589*0Sstevel@tonic-gate "range\n")); 1590*0Sstevel@tonic-gate return (DDI_FAILURE); 1591*0Sstevel@tonic-gate } 1592*0Sstevel@tonic-gate 1593*0Sstevel@tonic-gate rval = iommu_map_window(mp, newoff, winsize); 1594*0Sstevel@tonic-gate if (rval != DDI_SUCCESS) 1595*0Sstevel@tonic-gate return (rval); 1596*0Sstevel@tonic-gate 1597*0Sstevel@tonic-gate if ((cp = (ddi_dma_cookie_t *)objp) != 0) { 1598*0Sstevel@tonic-gate cp->dmac_notused = 0; 1599*0Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 1600*0Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 1601*0Sstevel@tonic-gate cp->dmac_type = 0; 1602*0Sstevel@tonic-gate } 1603*0Sstevel@tonic-gate *offp = (off_t)newoff; 1604*0Sstevel@tonic-gate *lenp = (uint_t)winsize; 1605*0Sstevel@tonic-gate break; 1606*0Sstevel@tonic-gate } 1607*0Sstevel@tonic-gate 1608*0Sstevel@tonic-gate case DDI_DMA_REPWIN: 1609*0Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 1610*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n")); 1611*0Sstevel@tonic-gate return (DDI_FAILURE); 1612*0Sstevel@tonic-gate } 1613*0Sstevel@tonic-gate 1614*0Sstevel@tonic-gate *offp = (off_t)mp->dmai_offset; 1615*0Sstevel@tonic-gate 1616*0Sstevel@tonic-gate addr = mp->dmai_ndvmapages - 1617*0Sstevel@tonic-gate iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET); 1618*0Sstevel@tonic-gate 1619*0Sstevel@tonic-gate *lenp = (uint_t)iommu_ptob(addr); 1620*0Sstevel@tonic-gate 1621*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %x len %x\n", 1622*0Sstevel@tonic-gate mp->dmai_offset, mp->dmai_size)); 1623*0Sstevel@tonic-gate 1624*0Sstevel@tonic-gate break; 1625*0Sstevel@tonic-gate 1626*0Sstevel@tonic-gate case DDI_DMA_GETERR: 1627*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG, 1628*0Sstevel@tonic-gate ("iommu_dma_mctl: geterr\n")); 1629*0Sstevel@tonic-gate 1630*0Sstevel@tonic-gate break; 1631*0Sstevel@tonic-gate 1632*0Sstevel@tonic-gate case DDI_DMA_COFF: 1633*0Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)offp; 1634*0Sstevel@tonic-gate addr = cp->dmac_address; 1635*0Sstevel@tonic-gate 1636*0Sstevel@tonic-gate if (addr < mp->dmai_mapping || 1637*0Sstevel@tonic-gate addr >= mp->dmai_mapping + mp->dmai_size) 1638*0Sstevel@tonic-gate return (DDI_FAILURE); 1639*0Sstevel@tonic-gate 1640*0Sstevel@tonic-gate *objp = (caddr_t)(addr - mp->dmai_mapping); 1641*0Sstevel@tonic-gate 1642*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %x " 1643*0Sstevel@tonic-gate "size %x\n", (ulong_t)*objp, mp->dmai_mapping, 1644*0Sstevel@tonic-gate mp->dmai_size)); 1645*0Sstevel@tonic-gate 1646*0Sstevel@tonic-gate break; 1647*0Sstevel@tonic-gate 1648*0Sstevel@tonic-gate case DDI_DMA_RESERVE: 1649*0Sstevel@tonic-gate { 1650*0Sstevel@tonic-gate struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp; 1651*0Sstevel@tonic-gate ddi_dma_lim_t *dma_lim; 1652*0Sstevel@tonic-gate ddi_dma_handle_t *handlep; 1653*0Sstevel@tonic-gate uint_t np; 1654*0Sstevel@tonic-gate ioaddr_t ioaddr; 1655*0Sstevel@tonic-gate int i; 1656*0Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma; 1657*0Sstevel@tonic-gate struct sbus_soft_state *softsp = 1658*0Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 1659*0Sstevel@tonic-gate ddi_get_instance(dip)); 1660*0Sstevel@tonic-gate 1661*0Sstevel@tonic-gate /* Some simple sanity checks */ 1662*0Sstevel@tonic-gate dma_lim = dmareq->dmar_limits; 1663*0Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) { 1664*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1665*0Sstevel@tonic-gate ("Reserve: bad burstsizes\n")); 1666*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 1667*0Sstevel@tonic-gate } 1668*0Sstevel@tonic-gate if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) { 1669*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1670*0Sstevel@tonic-gate ("Reserve: bad limits\n")); 1671*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 1672*0Sstevel@tonic-gate } 1673*0Sstevel@tonic-gate 1674*0Sstevel@tonic-gate np = dmareq->dmar_object.dmao_size; 1675*0Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 1676*0Sstevel@tonic-gate if (np > softsp->dma_reserve) { 1677*0Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 1678*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1679*0Sstevel@tonic-gate ("Reserve: dma_reserve is exhausted\n")); 1680*0Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 1681*0Sstevel@tonic-gate } 1682*0Sstevel@tonic-gate 1683*0Sstevel@tonic-gate softsp->dma_reserve -= np; 1684*0Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 1685*0Sstevel@tonic-gate mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 1686*0Sstevel@tonic-gate mp->dmai_rflags = DMP_BYPASSNEXUS; 1687*0Sstevel@tonic-gate mp->dmai_rdip = rdip; 1688*0Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 1689*0Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 1690*0Sstevel@tonic-gate 1691*0Sstevel@tonic-gate ioaddr = (ioaddr_t)vmem_xalloc(softsp->dvma_arena, 1692*0Sstevel@tonic-gate iommu_ptob(np), IOMMU_PAGESIZE, 0, 1693*0Sstevel@tonic-gate dma_lim->dlim_cntr_max + 1, (void *)ALO, (void *)(AHI + 1), 1694*0Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 1695*0Sstevel@tonic-gate 1696*0Sstevel@tonic-gate if (ioaddr == 0) { 1697*0Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 1698*0Sstevel@tonic-gate softsp->dma_reserve += np; 1699*0Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 1700*0Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 1701*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1702*0Sstevel@tonic-gate ("Reserve: No dvma resources available\n")); 1703*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1704*0Sstevel@tonic-gate } 1705*0Sstevel@tonic-gate 1706*0Sstevel@tonic-gate /* create a per request structure */ 1707*0Sstevel@tonic-gate iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma), 1708*0Sstevel@tonic-gate KM_SLEEP); 1709*0Sstevel@tonic-gate 1710*0Sstevel@tonic-gate /* 1711*0Sstevel@tonic-gate * We need to remember the size of the transfer so that 1712*0Sstevel@tonic-gate * we can figure the virtual pages to sync when the transfer 1713*0Sstevel@tonic-gate * is complete. 1714*0Sstevel@tonic-gate */ 1715*0Sstevel@tonic-gate iommu_fast_dvma->pagecnt = kmem_zalloc(np * 1716*0Sstevel@tonic-gate sizeof (uint_t), KM_SLEEP); 1717*0Sstevel@tonic-gate 1718*0Sstevel@tonic-gate /* Allocate a streaming cache sync flag for each index */ 1719*0Sstevel@tonic-gate iommu_fast_dvma->sync_flag = kmem_zalloc(np * 1720*0Sstevel@tonic-gate sizeof (int), KM_SLEEP); 1721*0Sstevel@tonic-gate 1722*0Sstevel@tonic-gate /* Allocate a physical sync flag for each index */ 1723*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag = 1724*0Sstevel@tonic-gate kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP); 1725*0Sstevel@tonic-gate 1726*0Sstevel@tonic-gate for (i = 0; i < np; i++) 1727*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t) 1728*0Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[i]); 1729*0Sstevel@tonic-gate 1730*0Sstevel@tonic-gate mp->dmai_mapping = ioaddr; 1731*0Sstevel@tonic-gate mp->dmai_ndvmapages = np; 1732*0Sstevel@tonic-gate iommu_fast_dvma->ops = &iommu_dvma_ops; 1733*0Sstevel@tonic-gate iommu_fast_dvma->softsp = (caddr_t)softsp; 1734*0Sstevel@tonic-gate mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma; 1735*0Sstevel@tonic-gate handlep = (ddi_dma_handle_t *)objp; 1736*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 1737*0Sstevel@tonic-gate 1738*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1739*0Sstevel@tonic-gate ("Reserve: mapping object %p base addr %lx size %x\n", 1740*0Sstevel@tonic-gate mp, mp->dmai_mapping, mp->dmai_ndvmapages)); 1741*0Sstevel@tonic-gate 1742*0Sstevel@tonic-gate break; 1743*0Sstevel@tonic-gate } 1744*0Sstevel@tonic-gate 1745*0Sstevel@tonic-gate case DDI_DMA_RELEASE: 1746*0Sstevel@tonic-gate { 1747*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1748*0Sstevel@tonic-gate uint_t np = npages = mp->dmai_ndvmapages; 1749*0Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping; 1750*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 1751*0Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *) 1752*0Sstevel@tonic-gate mp->dmai_nexus_private; 1753*0Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 1754*0Sstevel@tonic-gate iommu_fast_dvma->softsp; 1755*0Sstevel@tonic-gate 1756*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1757*0Sstevel@tonic-gate 1758*0Sstevel@tonic-gate /* Unload stale mappings and flush stale tlb's */ 1759*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 1760*0Sstevel@tonic-gate 1761*0Sstevel@tonic-gate while (npages > (uint_t)0) { 1762*0Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 1763*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 1764*0Sstevel@tonic-gate 1765*0Sstevel@tonic-gate npages--; 1766*0Sstevel@tonic-gate iotte_ptr++; 1767*0Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 1768*0Sstevel@tonic-gate } 1769*0Sstevel@tonic-gate 1770*0Sstevel@tonic-gate ioaddr = (ioaddr_t)mp->dmai_mapping; 1771*0Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 1772*0Sstevel@tonic-gate softsp->dma_reserve += np; 1773*0Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 1774*0Sstevel@tonic-gate 1775*0Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1776*0Sstevel@tonic-gate vmem_free(softsp->dvma_arena, (void *)ioaddr, 1777*0Sstevel@tonic-gate iommu_ptob(np)); 1778*0Sstevel@tonic-gate else 1779*0Sstevel@tonic-gate vmem_xfree(softsp->dvma_arena, (void *)ioaddr, 1780*0Sstevel@tonic-gate iommu_ptob(np)); 1781*0Sstevel@tonic-gate 1782*0Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 1783*0Sstevel@tonic-gate kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t)); 1784*0Sstevel@tonic-gate kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int)); 1785*0Sstevel@tonic-gate kmem_free(iommu_fast_dvma->phys_sync_flag, np * 1786*0Sstevel@tonic-gate sizeof (uint64_t)); 1787*0Sstevel@tonic-gate kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma)); 1788*0Sstevel@tonic-gate 1789*0Sstevel@tonic-gate 1790*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 1791*0Sstevel@tonic-gate ("Release: Base addr %x size %x\n", ioaddr, np)); 1792*0Sstevel@tonic-gate /* 1793*0Sstevel@tonic-gate * Now that we've freed some resource, 1794*0Sstevel@tonic-gate * if there is anybody waiting for it 1795*0Sstevel@tonic-gate * try and get them going. 1796*0Sstevel@tonic-gate */ 1797*0Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 1798*0Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 1799*0Sstevel@tonic-gate 1800*0Sstevel@tonic-gate break; 1801*0Sstevel@tonic-gate } 1802*0Sstevel@tonic-gate 1803*0Sstevel@tonic-gate default: 1804*0Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option " 1805*0Sstevel@tonic-gate "0%x\n", request)); 1806*0Sstevel@tonic-gate 1807*0Sstevel@tonic-gate return (DDI_FAILURE); 1808*0Sstevel@tonic-gate } 1809*0Sstevel@tonic-gate return (DDI_SUCCESS); 1810*0Sstevel@tonic-gate } 1811*0Sstevel@tonic-gate 1812*0Sstevel@tonic-gate /*ARGSUSED*/ 1813*0Sstevel@tonic-gate void 1814*0Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 1815*0Sstevel@tonic-gate ddi_dma_cookie_t *cp) 1816*0Sstevel@tonic-gate { 1817*0Sstevel@tonic-gate uintptr_t addr; 1818*0Sstevel@tonic-gate ioaddr_t ioaddr; 1819*0Sstevel@tonic-gate uint_t offset; 1820*0Sstevel@tonic-gate pfn_t pfn; 1821*0Sstevel@tonic-gate int npages; 1822*0Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 1823*0Sstevel@tonic-gate uint64_t iotte_flag = 0; 1824*0Sstevel@tonic-gate struct as *as = NULL; 1825*0Sstevel@tonic-gate extern struct as kas; 1826*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 1827*0Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 1828*0Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 1829*0Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 1830*0Sstevel@tonic-gate iommu_fast_dvma->softsp; 1831*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1832*0Sstevel@tonic-gate struct io_mem_list *iomemp; 1833*0Sstevel@tonic-gate pfn_t *pfnp; 1834*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1835*0Sstevel@tonic-gate 1836*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1837*0Sstevel@tonic-gate 1838*0Sstevel@tonic-gate addr = (uintptr_t)a; 1839*0Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 1840*0Sstevel@tonic-gate offset = (uint_t)(addr & IOMMU_PAGEOFFSET); 1841*0Sstevel@tonic-gate iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset); 1842*0Sstevel@tonic-gate as = &kas; 1843*0Sstevel@tonic-gate addr &= ~IOMMU_PAGEOFFSET; 1844*0Sstevel@tonic-gate npages = iommu_btopr(len + offset); 1845*0Sstevel@tonic-gate 1846*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1847*0Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 1848*0Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 1849*0Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 1850*0Sstevel@tonic-gate iomemp->addr = addr; 1851*0Sstevel@tonic-gate iomemp->npages = npages; 1852*0Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 1853*0Sstevel@tonic-gate KM_SLEEP); 1854*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1855*0Sstevel@tonic-gate 1856*0Sstevel@tonic-gate cp->dmac_address = ioaddr | offset; 1857*0Sstevel@tonic-gate cp->dmac_size = len; 1858*0Sstevel@tonic-gate 1859*0Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 1860*0Sstevel@tonic-gate /* read/write and streaming io on */ 1861*0Sstevel@tonic-gate iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE; 1862*0Sstevel@tonic-gate 1863*0Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) 1864*0Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 1865*0Sstevel@tonic-gate else if (!softsp->stream_buf_off) 1866*0Sstevel@tonic-gate iotte_flag |= IOTTE_STREAM; 1867*0Sstevel@tonic-gate 1868*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x " 1869*0Sstevel@tonic-gate "size %x offset %x index %x kaddr %p\n", 1870*0Sstevel@tonic-gate ioaddr, len, offset, index, addr)); 1871*0Sstevel@tonic-gate ASSERT(npages > 0); 1872*0Sstevel@tonic-gate do { 1873*0Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 1874*0Sstevel@tonic-gate if (pfn == PFN_INVALID) { 1875*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn " 1876*0Sstevel@tonic-gate "from hat_getpfnum()\n")); 1877*0Sstevel@tonic-gate } 1878*0Sstevel@tonic-gate 1879*0Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 1880*0Sstevel@tonic-gate 1881*0Sstevel@tonic-gate /* load tte */ 1882*0Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 1883*0Sstevel@tonic-gate 1884*0Sstevel@tonic-gate npages--; 1885*0Sstevel@tonic-gate iotte_ptr++; 1886*0Sstevel@tonic-gate 1887*0Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 1888*0Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 1889*0Sstevel@tonic-gate 1890*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1891*0Sstevel@tonic-gate *pfnp = pfn; 1892*0Sstevel@tonic-gate pfnp++; 1893*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1894*0Sstevel@tonic-gate 1895*0Sstevel@tonic-gate } while (npages > 0); 1896*0Sstevel@tonic-gate 1897*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1898*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 1899*0Sstevel@tonic-gate iomemp->next = softsp->iomem; 1900*0Sstevel@tonic-gate softsp->iomem = iomemp; 1901*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 1902*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1903*0Sstevel@tonic-gate } 1904*0Sstevel@tonic-gate 1905*0Sstevel@tonic-gate /*ARGSUSED*/ 1906*0Sstevel@tonic-gate void 1907*0Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view) 1908*0Sstevel@tonic-gate { 1909*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 1910*0Sstevel@tonic-gate ioaddr_t ioaddr; 1911*0Sstevel@tonic-gate pgcnt_t npages; 1912*0Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 1913*0Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 1914*0Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 1915*0Sstevel@tonic-gate iommu_fast_dvma->softsp; 1916*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1917*0Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 1918*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1919*0Sstevel@tonic-gate 1920*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1921*0Sstevel@tonic-gate 1922*0Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 1923*0Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 1924*0Sstevel@tonic-gate 1925*0Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1926*0Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 1927*0Sstevel@tonic-gate prevp = &softsp->iomem; 1928*0Sstevel@tonic-gate walk = softsp->iomem; 1929*0Sstevel@tonic-gate 1930*0Sstevel@tonic-gate while (walk != NULL) { 1931*0Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 1932*0Sstevel@tonic-gate *prevp = walk->next; 1933*0Sstevel@tonic-gate break; 1934*0Sstevel@tonic-gate } 1935*0Sstevel@tonic-gate prevp = &walk->next; 1936*0Sstevel@tonic-gate walk = walk->next; 1937*0Sstevel@tonic-gate } 1938*0Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 1939*0Sstevel@tonic-gate 1940*0Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 1941*0Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 1942*0Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1943*0Sstevel@tonic-gate 1944*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag " 1945*0Sstevel@tonic-gate "addr %p sync flag pfn %x index %x page count %x\n", mp, 1946*0Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[index], 1947*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index], 1948*0Sstevel@tonic-gate index, npages)); 1949*0Sstevel@tonic-gate 1950*0Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) { 1951*0Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 1952*0Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 1953*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 1954*0Sstevel@tonic-gate } 1955*0Sstevel@tonic-gate } 1956*0Sstevel@tonic-gate 1957*0Sstevel@tonic-gate /*ARGSUSED*/ 1958*0Sstevel@tonic-gate void 1959*0Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view) 1960*0Sstevel@tonic-gate { 1961*0Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 1962*0Sstevel@tonic-gate ioaddr_t ioaddr; 1963*0Sstevel@tonic-gate uint_t npages; 1964*0Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 1965*0Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 1966*0Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 1967*0Sstevel@tonic-gate iommu_fast_dvma->softsp; 1968*0Sstevel@tonic-gate 1969*0Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 1970*0Sstevel@tonic-gate return; 1971*0Sstevel@tonic-gate 1972*0Sstevel@tonic-gate ASSERT(softsp != NULL); 1973*0Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 1974*0Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 1975*0Sstevel@tonic-gate 1976*0Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, " 1977*0Sstevel@tonic-gate "sync flag addr %p, sync flag pfn %x\n", mp, 1978*0Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[index], 1979*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index])); 1980*0Sstevel@tonic-gate 1981*0Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 1982*0Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 1983*0Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 1984*0Sstevel@tonic-gate } 1985