10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 230Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/param.h> 310Sstevel@tonic-gate #include <sys/conf.h> 320Sstevel@tonic-gate #include <sys/ddi.h> 330Sstevel@tonic-gate #include <sys/sunddi.h> 340Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 350Sstevel@tonic-gate #include <sys/cmn_err.h> 360Sstevel@tonic-gate #include <sys/kmem.h> 370Sstevel@tonic-gate #include <sys/vmem.h> 380Sstevel@tonic-gate #include <sys/sysmacros.h> 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/ddidmareq.h> 410Sstevel@tonic-gate #include <sys/sysiosbus.h> 420Sstevel@tonic-gate #include <sys/iommu.h> 430Sstevel@tonic-gate #include <sys/iocache.h> 440Sstevel@tonic-gate #include <sys/dvma.h> 450Sstevel@tonic-gate 460Sstevel@tonic-gate #include <vm/as.h> 470Sstevel@tonic-gate #include <vm/hat.h> 480Sstevel@tonic-gate #include <vm/page.h> 490Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 500Sstevel@tonic-gate #include <sys/machparam.h> 510Sstevel@tonic-gate #include <sys/machsystm.h> 520Sstevel@tonic-gate #include <sys/vmsystm.h> 530Sstevel@tonic-gate #include <sys/iommutsb.h> 540Sstevel@tonic-gate 550Sstevel@tonic-gate /* Useful debugging Stuff */ 560Sstevel@tonic-gate #include <sys/nexusdebug.h> 570Sstevel@tonic-gate #include <sys/debug.h> 580Sstevel@tonic-gate /* Bitfield debugging definitions for this file */ 590Sstevel@tonic-gate #define IOMMU_GETDVMAPAGES_DEBUG 0x1 600Sstevel@tonic-gate #define IOMMU_DMAMAP_DEBUG 0x2 610Sstevel@tonic-gate #define IOMMU_DMAMCTL_DEBUG 0x4 620Sstevel@tonic-gate #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8 630Sstevel@tonic-gate #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10 640Sstevel@tonic-gate #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20 650Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTWIN_DEBUG 0x40 660Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTSEG_DEBUG 0x80 670Sstevel@tonic-gate #define IOMMU_DMAMCTL_MOVWIN_DEBUG 0x100 680Sstevel@tonic-gate #define IOMMU_DMAMCTL_REPWIN_DEBUG 0x200 690Sstevel@tonic-gate #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400 700Sstevel@tonic-gate #define IOMMU_DMAMCTL_COFF_DEBUG 0x800 710Sstevel@tonic-gate #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000 720Sstevel@tonic-gate #define IOMMU_REGISTERS_DEBUG 0x2000 730Sstevel@tonic-gate #define IOMMU_DMA_SETUP_DEBUG 0x4000 740Sstevel@tonic-gate #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000 750Sstevel@tonic-gate #define IOMMU_DMA_BINDHDL_DEBUG 0x10000 760Sstevel@tonic-gate #define IOMMU_DMA_WIN_DEBUG 0x20000 770Sstevel@tonic-gate #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000 780Sstevel@tonic-gate #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000 790Sstevel@tonic-gate #define IOMMU_FASTDMA_RESERVE 0x100000 800Sstevel@tonic-gate #define IOMMU_FASTDMA_LOAD 0x200000 810Sstevel@tonic-gate #define IOMMU_INTER_INTRA_XFER 0x400000 820Sstevel@tonic-gate #define IOMMU_TTE 0x800000 830Sstevel@tonic-gate #define IOMMU_TLB 0x1000000 840Sstevel@tonic-gate #define IOMMU_FASTDMA_SYNC 0x2000000 850Sstevel@tonic-gate 860Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */ 870Sstevel@tonic-gate /* #define IO_MEMUSAGE */ 880Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */ 890Sstevel@tonic-gate /* #define IO_MEMDEBUG */ 900Sstevel@tonic-gate 910Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = { 920Sstevel@tonic-gate DVMAO_REV, 930Sstevel@tonic-gate iommu_dvma_kaddr_load, 940Sstevel@tonic-gate iommu_dvma_unload, 950Sstevel@tonic-gate iommu_dvma_sync 960Sstevel@tonic-gate }; 970Sstevel@tonic-gate 980Sstevel@tonic-gate extern void *sbusp; /* sbus soft state hook */ 990Sstevel@tonic-gate 1000Sstevel@tonic-gate #define DVMA_MAX_CACHE 65536 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate /* 1030Sstevel@tonic-gate * This is the number of pages that a mapping request needs before we force 1040Sstevel@tonic-gate * the TLB flush code to use diagnostic registers. This value was determined 1050Sstevel@tonic-gate * through a series of test runs measuring dma mapping settup performance. 1060Sstevel@tonic-gate */ 1070Sstevel@tonic-gate int tlb_flush_using_diag = 16; 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = { 1100Sstevel@tonic-gate IOMMU_TSB_SIZE_8M, 1110Sstevel@tonic-gate IOMMU_TSB_SIZE_16M, 1120Sstevel@tonic-gate IOMMU_TSB_SIZE_32M, 1130Sstevel@tonic-gate IOMMU_TSB_SIZE_64M, 1140Sstevel@tonic-gate IOMMU_TSB_SIZE_128M, 1150Sstevel@tonic-gate IOMMU_TSB_SIZE_256M, 1160Sstevel@tonic-gate IOMMU_TSB_SIZE_512M, 1170Sstevel@tonic-gate IOMMU_TSB_SIZE_1G 1180Sstevel@tonic-gate }; 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t); 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate int 1230Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address) 1240Sstevel@tonic-gate { 1250Sstevel@tonic-gate int i; 1260Sstevel@tonic-gate char name[40]; 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate #ifdef DEBUG 1290Sstevel@tonic-gate debug_info = 1; 1300Sstevel@tonic-gate #endif 1310Sstevel@tonic-gate 1320Sstevel@tonic-gate /* 1330Sstevel@tonic-gate * Simply add each registers offset to the base address 1340Sstevel@tonic-gate * to calculate the already mapped virtual address of 1350Sstevel@tonic-gate * the device register... 1360Sstevel@tonic-gate * 1370Sstevel@tonic-gate * define a macro for the pointer arithmetic; all registers 1380Sstevel@tonic-gate * are 64 bits wide and are defined as uint64_t's. 1390Sstevel@tonic-gate */ 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o)) 1420Sstevel@tonic-gate 1430Sstevel@tonic-gate softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG); 1440Sstevel@tonic-gate softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR); 1450Sstevel@tonic-gate softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG); 1460Sstevel@tonic-gate softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG); 1470Sstevel@tonic-gate softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA); 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate #undef REG_ADDR 1500Sstevel@tonic-gate 1510Sstevel@tonic-gate mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL); 1520Sstevel@tonic-gate mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL); 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate /* Set up the DVMA resource sizes */ 1550Sstevel@tonic-gate if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) == 1560Sstevel@tonic-gate IOMMU_TSB_COOKIE_NONE) { 1570Sstevel@tonic-gate cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.", 1580Sstevel@tonic-gate ddi_driver_name(softsp->dip), 1590Sstevel@tonic-gate ddi_get_instance(softsp->dip)); 1600Sstevel@tonic-gate return (DDI_FAILURE); 1610Sstevel@tonic-gate } 1620Sstevel@tonic-gate softsp->soft_tsb_base_addr = 1630Sstevel@tonic-gate iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie); 1640Sstevel@tonic-gate softsp->iommu_dvma_size = 1650Sstevel@tonic-gate iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) << 1660Sstevel@tonic-gate IOMMU_TSB_TO_RNG; 1670Sstevel@tonic-gate softsp->iommu_dvma_base = (ioaddr_t) 1680Sstevel@tonic-gate (0 - (ioaddr_t)softsp->iommu_dvma_size); 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s%d_dvma", 1710Sstevel@tonic-gate ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip)); 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate /* 1740Sstevel@tonic-gate * Initialize the DVMA vmem arena. 1750Sstevel@tonic-gate */ 176*1035Smike_s softsp->dvma_arena = vmem_create(name, 177*1035Smike_s (void *)(uintptr_t)softsp->iommu_dvma_base, 1780Sstevel@tonic-gate softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL, 1790Sstevel@tonic-gate DVMA_MAX_CACHE, VM_SLEEP); 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */ 1820Sstevel@tonic-gate softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1); 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1850Sstevel@tonic-gate mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL); 1860Sstevel@tonic-gate softsp->iomem = (struct io_mem_list *)0; 1870Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1880Sstevel@tonic-gate /* 1890Sstevel@tonic-gate * Get the base address of the TSB table and store it in the hardware 1900Sstevel@tonic-gate */ 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate /* 1930Sstevel@tonic-gate * We plan on the PROM flushing all TLB entries. If this is not the 1940Sstevel@tonic-gate * case, this is where we should flush the hardware TLB. 1950Sstevel@tonic-gate */ 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate /* Set the IOMMU registers */ 1980Sstevel@tonic-gate (void) iommu_resume_init(softsp); 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* check the convenient copy of TSB base, and flush write buffers */ 2010Sstevel@tonic-gate if (*softsp->tsb_base_addr != 2020Sstevel@tonic-gate va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) { 2030Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 2040Sstevel@tonic-gate return (DDI_FAILURE); 2050Sstevel@tonic-gate } 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate softsp->sbus_io_lo_pfn = UINT32_MAX; 2080Sstevel@tonic-gate softsp->sbus_io_hi_pfn = 0; 2090Sstevel@tonic-gate for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) { 2100Sstevel@tonic-gate struct rangespec *rangep; 2110Sstevel@tonic-gate uint64_t addr; 2120Sstevel@tonic-gate pfn_t hipfn, lopfn; 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate rangep = sysio_pd_getrng(softsp->dip, i); 2150Sstevel@tonic-gate addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32); 2160Sstevel@tonic-gate addr |= (uint64_t)rangep->rng_offset; 2170Sstevel@tonic-gate lopfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2180Sstevel@tonic-gate addr += (uint64_t)(rangep->rng_size - 1); 2190Sstevel@tonic-gate hipfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ? 2220Sstevel@tonic-gate lopfn : softsp->sbus_io_lo_pfn; 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ? 2250Sstevel@tonic-gate hipfn : softsp->sbus_io_hi_pfn; 2260Sstevel@tonic-gate } 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB " 2290Sstevel@tonic-gate "base reg: %p IOMMU flush reg: %p TSB base addr %p\n", 2300Sstevel@tonic-gate softsp->iommu_ctrl_reg, softsp->tsb_base_addr, 2310Sstevel@tonic-gate softsp->iommu_flush_reg, softsp->soft_tsb_base_addr)); 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate return (DDI_SUCCESS); 2340Sstevel@tonic-gate } 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate /* 2370Sstevel@tonic-gate * function to uninitialize the iommu and release the tsb back to 2380Sstevel@tonic-gate * the spare pool. See startup.c for tsb spare management. 2390Sstevel@tonic-gate */ 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate int 2420Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp) 2430Sstevel@tonic-gate { 2440Sstevel@tonic-gate vmem_destroy(softsp->dvma_arena); 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate /* flip off the IOMMU enable switch */ 2470Sstevel@tonic-gate *softsp->iommu_ctrl_reg &= 2480Sstevel@tonic-gate (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE); 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate return (DDI_SUCCESS); 2530Sstevel@tonic-gate } 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate /* 2560Sstevel@tonic-gate * Initialize iommu hardware registers when the system is being resumed. 2570Sstevel@tonic-gate * (Subset of iommu_init()) 2580Sstevel@tonic-gate */ 2590Sstevel@tonic-gate int 2600Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp) 2610Sstevel@tonic-gate { 2620Sstevel@tonic-gate int i; 2630Sstevel@tonic-gate uint_t tsb_size; 2640Sstevel@tonic-gate uint_t tsb_bytes; 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate /* 2670Sstevel@tonic-gate * Reset the base address of the TSB table in the hardware 2680Sstevel@tonic-gate */ 2690Sstevel@tonic-gate *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr); 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * Figure out the correct size of the IOMMU TSB entries. If we 2730Sstevel@tonic-gate * end up with a size smaller than that needed for 8M of IOMMU 2740Sstevel@tonic-gate * space, default the size to 8M. XXX We could probably panic here 2750Sstevel@tonic-gate */ 2760Sstevel@tonic-gate i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0]) 2770Sstevel@tonic-gate - 1; 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie); 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate while (i > 0) { 2820Sstevel@tonic-gate if (tsb_bytes >= sysio_iommu_tsb_sizes[i]) 2830Sstevel@tonic-gate break; 2840Sstevel@tonic-gate i--; 2850Sstevel@tonic-gate } 2860Sstevel@tonic-gate 2870Sstevel@tonic-gate tsb_size = i; 2880Sstevel@tonic-gate 2890Sstevel@tonic-gate /* OK, lets flip the "on" switch of the IOMMU */ 2900Sstevel@tonic-gate *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT 2910Sstevel@tonic-gate | IOMMU_ENABLE | IOMMU_DIAG_ENABLE); 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate return (DDI_SUCCESS); 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate 2960Sstevel@tonic-gate void 2970Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) 2980Sstevel@tonic-gate { 2990Sstevel@tonic-gate volatile uint64_t tmpreg; 3000Sstevel@tonic-gate volatile uint64_t *vaddr_reg, *valid_bit_reg; 3010Sstevel@tonic-gate ioaddr_t hiaddr, ioaddr; 3020Sstevel@tonic-gate int i, do_flush = 0; 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate if (npages == 1) { 3050Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)addr; 3060Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3070Sstevel@tonic-gate return; 3080Sstevel@tonic-gate } 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); 3110Sstevel@tonic-gate for (i = 0, vaddr_reg = softsp->iommu_tlb_tag, 3120Sstevel@tonic-gate valid_bit_reg = softsp->iommu_tlb_data; 3130Sstevel@tonic-gate i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) { 3140Sstevel@tonic-gate tmpreg = *vaddr_reg; 3150Sstevel@tonic-gate ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) << 3160Sstevel@tonic-gate IOMMU_TLBTAG_VA_SHIFT); 3170Sstevel@tonic-gate 318*1035Smike_s DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, " 319*1035Smike_s "TLB vaddr reg %lx, IO addr 0x%x " 3200Sstevel@tonic-gate "Base addr 0x%x, Hi addr 0x%x\n", 3210Sstevel@tonic-gate vaddr_reg, tmpreg, ioaddr, addr, hiaddr)); 3220Sstevel@tonic-gate 3230Sstevel@tonic-gate if (ioaddr >= addr && ioaddr <= hiaddr) { 3240Sstevel@tonic-gate tmpreg = *valid_bit_reg; 3250Sstevel@tonic-gate 326*1035Smike_s DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, " 327*1035Smike_s "TLB valid reg %lx\n", 3280Sstevel@tonic-gate valid_bit_reg, tmpreg)); 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate if (tmpreg & IOMMU_TLB_VALID) { 3310Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)ioaddr; 3320Sstevel@tonic-gate do_flush = 1; 3330Sstevel@tonic-gate } 3340Sstevel@tonic-gate } 3350Sstevel@tonic-gate } 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate if (do_flush) 3380Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3390Sstevel@tonic-gate } 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate /* 3430Sstevel@tonic-gate * Shorthand defines 3440Sstevel@tonic-gate */ 3450Sstevel@tonic-gate 3460Sstevel@tonic-gate #define ALO dma_lim->dlim_addr_lo 3470Sstevel@tonic-gate #define AHI dma_lim->dlim_addr_hi 3480Sstevel@tonic-gate #define OBJSIZE dmareq->dmar_object.dmao_size 3490Sstevel@tonic-gate #define IOTTE_NDX(vaddr, base) (base + \ 3500Sstevel@tonic-gate (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \ 3510Sstevel@tonic-gate softsp->iommu_dvma_base))) 3520Sstevel@tonic-gate /* 3530Sstevel@tonic-gate * If DDI_DMA_PARTIAL flag is set and the request is for 3540Sstevel@tonic-gate * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so 3550Sstevel@tonic-gate * we turn off the DDI_DMA_PARTIAL flag 3560Sstevel@tonic-gate */ 3570Sstevel@tonic-gate #define MIN_DVMA_WIN_SIZE (128) 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate /* ARGSUSED */ 3600Sstevel@tonic-gate void 3610Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp) 3620Sstevel@tonic-gate { 3630Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 3640Sstevel@tonic-gate pgcnt_t npages; 3650Sstevel@tonic-gate ioaddr_t ioaddr; 3660Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 3670Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3680Sstevel@tonic-gate pgcnt_t npages = mp->dmai_ndvmapages; 3690Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 3700Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3730Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 3740Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate ASSERT(softsp != NULL); 3770Sstevel@tonic-gate /* 3780Sstevel@tonic-gate * Run thru the mapped entries and free 'em 3790Sstevel@tonic-gate */ 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3820Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3850Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 3860Sstevel@tonic-gate prevp = &softsp->iomem; 3870Sstevel@tonic-gate walk = softsp->iomem; 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate while (walk) { 3900Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 3910Sstevel@tonic-gate *prevp = walk->next; 3920Sstevel@tonic-gate break; 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate prevp = &walk->next; 3960Sstevel@tonic-gate walk = walk->next; 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 3990Sstevel@tonic-gate 4000Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 4010Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 4020Sstevel@tonic-gate #endif /* IO_MEMUSAGE */ 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate while (npages) { 4070Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, 4080Sstevel@tonic-gate ("dma_mctl: freeing ioaddr %x iotte %p\n", 4090Sstevel@tonic-gate ioaddr, iotte_ptr)); 4100Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 4110Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 4120Sstevel@tonic-gate npages--; 4130Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 4140Sstevel@tonic-gate iotte_ptr++; 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 4170Sstevel@tonic-gate } 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate int 4210Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr) 4220Sstevel@tonic-gate { 4230Sstevel@tonic-gate pfn_t pfn; 4240Sstevel@tonic-gate struct as *as = NULL; 4250Sstevel@tonic-gate pgcnt_t npages; 4260Sstevel@tonic-gate ioaddr_t ioaddr; 4270Sstevel@tonic-gate uint_t offset; 4280Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 4290Sstevel@tonic-gate uint64_t tmp_iotte_flag; 4300Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 4310Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 4320Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 4330Sstevel@tonic-gate int diag_tlb_flush; 4340Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4350Sstevel@tonic-gate struct io_mem_list *iomemp; 4360Sstevel@tonic-gate pfn_t *pfnp; 4370Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate ASSERT(softsp != NULL); 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 4420Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 4450Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 4460Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 4470Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4480Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate as = mp->dmai_object.dmao_obj.virt_obj.v_as; 4510Sstevel@tonic-gate if (as == NULL) 4520Sstevel@tonic-gate as = &kas; 4530Sstevel@tonic-gate 4540Sstevel@tonic-gate /* 4550Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 4560Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 4570Sstevel@tonic-gate */ 4580Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 4590Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 4600Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 4610Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4620Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 4630Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 4640Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4650Sstevel@tonic-gate } 4660Sstevel@tonic-gate 4670Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4680Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 4690Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 4700Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 4710Sstevel@tonic-gate iomemp->addr = addr; 4720Sstevel@tonic-gate iomemp->npages = npages; 4730Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 4740Sstevel@tonic-gate KM_SLEEP); 4750Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4760Sstevel@tonic-gate /* 4770Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 4780Sstevel@tonic-gate * iommu. 4790Sstevel@tonic-gate */ 4800Sstevel@tonic-gate ASSERT(npages != 0); 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 4830Sstevel@tonic-gate if (diag_tlb_flush) 4840Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate do { 4870Sstevel@tonic-gate uint64_t iotte_flag = tmp_iotte_flag; 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate /* 4900Sstevel@tonic-gate * Fetch the pfn for the DMA object 4910Sstevel@tonic-gate */ 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate ASSERT(as); 4940Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 4950Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate if (!pf_is_memory(pfn)) { 4980Sstevel@tonic-gate /* DVMA'ing to IO space */ 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate /* Turn off cache bit if set */ 5010Sstevel@tonic-gate if (iotte_flag & IOTTE_CACHE) 5020Sstevel@tonic-gate iotte_flag ^= IOTTE_CACHE; 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate /* Turn off stream bit if set */ 5050Sstevel@tonic-gate if (iotte_flag & IOTTE_STREAM) 5060Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 5070Sstevel@tonic-gate 5080Sstevel@tonic-gate if (IS_INTRA_SBUS(softsp, pfn)) { 5090Sstevel@tonic-gate /* Intra sbus transfer */ 5100Sstevel@tonic-gate 5110Sstevel@tonic-gate /* Turn on intra flag */ 5120Sstevel@tonic-gate iotte_flag |= IOTTE_INTRA; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, ( 515*1035Smike_s "Intra xfer pfnum %lx TTE %lx\n", 5160Sstevel@tonic-gate pfn, iotte_flag)); 5170Sstevel@tonic-gate } else { 5180Sstevel@tonic-gate if (pf_is_dmacapable(pfn) == 1) { 5190Sstevel@tonic-gate /*EMPTY*/ 5200Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, 5210Sstevel@tonic-gate ("Inter xfer pfnum %lx " 522*1035Smike_s "tte hi %lx\n", 5230Sstevel@tonic-gate pfn, iotte_flag)); 5240Sstevel@tonic-gate } else { 5250Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 5260Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5270Sstevel@tonic-gate goto bad; 5280Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5290Sstevel@tonic-gate } 5300Sstevel@tonic-gate } 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 5330Sstevel@tonic-gate 534*1035Smike_s DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx " 535*1035Smike_s "tte flag %lx addr %lx ioaddr %x\n", 5360Sstevel@tonic-gate iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 5390Sstevel@tonic-gate if (!diag_tlb_flush) 5400Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate /* Set the hardware IO TTE */ 5430Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 5460Sstevel@tonic-gate npages--; 5470Sstevel@tonic-gate iotte_ptr++; 5480Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5490Sstevel@tonic-gate *pfnp = pfn; 5500Sstevel@tonic-gate pfnp++; 5510Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5520Sstevel@tonic-gate } while (npages != 0); 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5550Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 5560Sstevel@tonic-gate iomemp->next = softsp->iomem; 5570Sstevel@tonic-gate softsp->iomem = iomemp; 5580Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 5590Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate return (rval); 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5640Sstevel@tonic-gate bad: 5650Sstevel@tonic-gate /* If we fail a mapping, free up any mapping resources used */ 5660Sstevel@tonic-gate iommu_remove_mappings(mp); 5670Sstevel@tonic-gate return (rval); 5680Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5690Sstevel@tonic-gate } 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate 5720Sstevel@tonic-gate int 5730Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist) 5740Sstevel@tonic-gate { 5750Sstevel@tonic-gate pfn_t pfn; 5760Sstevel@tonic-gate pgcnt_t npages; 5770Sstevel@tonic-gate ioaddr_t ioaddr; 5780Sstevel@tonic-gate uint_t offset; 5790Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 5800Sstevel@tonic-gate uint64_t tmp_iotte_flag; 5810Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 5820Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 5830Sstevel@tonic-gate int diag_tlb_flush; 5840Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5850Sstevel@tonic-gate struct io_mem_list *iomemp; 5860Sstevel@tonic-gate pfn_t *pfnp; 5870Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5880Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 5910Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate ASSERT(softsp != NULL); 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 5960Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 5970Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 5980Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 5990Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate /* 6020Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 6030Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 6040Sstevel@tonic-gate */ 6050Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 6060Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 6070Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 6080Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6090Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 6100Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 6110Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6120Sstevel@tonic-gate } 6130Sstevel@tonic-gate 6140Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6150Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 6160Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 6170Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 6180Sstevel@tonic-gate iomemp->npages = npages; 6190Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 6200Sstevel@tonic-gate KM_SLEEP); 6210Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6220Sstevel@tonic-gate /* 6230Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 6240Sstevel@tonic-gate * iommu. 6250Sstevel@tonic-gate */ 6260Sstevel@tonic-gate ASSERT(npages != 0); 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 6290Sstevel@tonic-gate if (diag_tlb_flush) 6300Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate do { 6330Sstevel@tonic-gate uint64_t iotte_flag; 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate iotte_flag = tmp_iotte_flag; 6360Sstevel@tonic-gate 6370Sstevel@tonic-gate if (pp != NULL) { 6380Sstevel@tonic-gate pfn = pp->p_pagenum; 6390Sstevel@tonic-gate pp = pp->p_next; 6400Sstevel@tonic-gate } else { 6410Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 6420Sstevel@tonic-gate pplist++; 6430Sstevel@tonic-gate } 6440Sstevel@tonic-gate 645*1035Smike_s DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx " 646*1035Smike_s "tte flag %lx ioaddr %x\n", iotte_ptr, 6470Sstevel@tonic-gate pfn, iotte_flag, ioaddr)); 6480Sstevel@tonic-gate 6490Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 6500Sstevel@tonic-gate if (!diag_tlb_flush) 6510Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate /* Set the hardware IO TTE */ 6540Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 6570Sstevel@tonic-gate npages--; 6580Sstevel@tonic-gate iotte_ptr++; 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6610Sstevel@tonic-gate *pfnp = pfn; 6620Sstevel@tonic-gate pfnp++; 6630Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6640Sstevel@tonic-gate 6650Sstevel@tonic-gate } while (npages != 0); 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6680Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 6690Sstevel@tonic-gate iomemp->next = softsp->iomem; 6700Sstevel@tonic-gate softsp->iomem = iomemp; 6710Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 6720Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate return (rval); 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate int 6790Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip, 6800Sstevel@tonic-gate struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64, 6810Sstevel@tonic-gate uint_t *minxferp, uint_t dma_flags) 6820Sstevel@tonic-gate { 6830Sstevel@tonic-gate struct regspec *rp; 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate /* Take care of 64 bit limits. */ 6860Sstevel@tonic-gate if (!(dma_flags & DDI_DMA_SBUS_64BIT)) { 6870Sstevel@tonic-gate /* 6880Sstevel@tonic-gate * return burst size for 32-bit mode 6890Sstevel@tonic-gate */ 6900Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 6910Sstevel@tonic-gate return (DDI_FAILURE); 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate 6940Sstevel@tonic-gate /* 6950Sstevel@tonic-gate * check if SBus supports 64 bit and if caller 6960Sstevel@tonic-gate * is child of SBus. No support through bridges 6970Sstevel@tonic-gate */ 6980Sstevel@tonic-gate if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) { 6990Sstevel@tonic-gate /* 7000Sstevel@tonic-gate * SBus doesn't support it or bridge. Do 32-bit 7010Sstevel@tonic-gate * xfers 7020Sstevel@tonic-gate */ 7030Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7040Sstevel@tonic-gate return (DDI_FAILURE); 7050Sstevel@tonic-gate } 7060Sstevel@tonic-gate 7070Sstevel@tonic-gate rp = ddi_rnumber_to_regspec(rdip, 0); 7080Sstevel@tonic-gate if (rp == NULL) { 7090Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7100Sstevel@tonic-gate return (DDI_FAILURE); 7110Sstevel@tonic-gate } 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate /* Check for old-style 64 bit burstsizes */ 7140Sstevel@tonic-gate if (burstsize64 & SYSIO64_BURST_MASK) { 7150Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7160Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes | 7170Sstevel@tonic-gate softsp->sbus_burst_sizes); 7180Sstevel@tonic-gate } else { 7190Sstevel@tonic-gate /* Get the 64 bit burstsizes. */ 7200Sstevel@tonic-gate *burstsizep = burstsize64; 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7230Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes >> 7240Sstevel@tonic-gate SYSIO64_BURST_SHIFT); 7250Sstevel@tonic-gate } 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate /* 7280Sstevel@tonic-gate * Set the largest value of the smallest burstsize that the 7290Sstevel@tonic-gate * device or the bus can manage. 7300Sstevel@tonic-gate */ 7310Sstevel@tonic-gate *minxferp = MAX(*minxferp, 7320Sstevel@tonic-gate (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1))); 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate return (DDI_SUCCESS); 7350Sstevel@tonic-gate } 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate int 7390Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 7400Sstevel@tonic-gate ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg, 7410Sstevel@tonic-gate ddi_dma_handle_t *handlep) 7420Sstevel@tonic-gate { 7430Sstevel@tonic-gate ioaddr_t addrlow, addrhigh, segalign; 7440Sstevel@tonic-gate ddi_dma_impl_t *mp; 7450Sstevel@tonic-gate struct dma_impl_priv *mppriv; 7460Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 7470Sstevel@tonic-gate ddi_get_soft_state(sbusp, ddi_get_instance(dip)); 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate /* 7500Sstevel@tonic-gate * Setup dma burstsizes and min-xfer counts. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, 7530Sstevel@tonic-gate &dma_attr->dma_attr_burstsizes, 7540Sstevel@tonic-gate dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer, 7550Sstevel@tonic-gate dma_attr->dma_attr_flags); 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate if (dma_attr->dma_attr_burstsizes == 0) 7580Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7590Sstevel@tonic-gate 7600Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 7610Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 7620Sstevel@tonic-gate segalign = (ioaddr_t)dma_attr->dma_attr_seg; 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate /* 7650Sstevel@tonic-gate * Check sanity for hi and lo address limits 7660Sstevel@tonic-gate */ 7670Sstevel@tonic-gate if ((addrhigh <= addrlow) || 7680Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 7690Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7700Sstevel@tonic-gate } 7710Sstevel@tonic-gate if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL) 7720Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 7750Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate if (mppriv == NULL) { 7780Sstevel@tonic-gate if (waitfp != DDI_DMA_DONTWAIT) { 7790Sstevel@tonic-gate ddi_set_callback(waitfp, arg, &softsp->dvma_call_list_id); 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 7820Sstevel@tonic-gate } 7830Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 7840Sstevel@tonic-gate 785*1035Smike_s DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p " 7860Sstevel@tonic-gate "hi %x lo %x min %x burst %x\n", 7870Sstevel@tonic-gate ddi_get_name(dip), mp, addrhigh, addrlow, 7880Sstevel@tonic-gate dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes)); 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate mp->dmai_rdip = rdip; 7910Sstevel@tonic-gate mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer; 7920Sstevel@tonic-gate mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes; 7930Sstevel@tonic-gate mp->dmai_attr = *dma_attr; 7940Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 7950Sstevel@tonic-gate if (segalign == (ioaddr_t)UINT32_MAX && 7960Sstevel@tonic-gate addrhigh == (ioaddr_t)UINT32_MAX && 7970Sstevel@tonic-gate (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) { 7980Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate mppriv->softsp = softsp; 8010Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 8040Sstevel@tonic-gate return (DDI_SUCCESS); 8050Sstevel@tonic-gate } 8060Sstevel@tonic-gate 8070Sstevel@tonic-gate /*ARGSUSED*/ 8080Sstevel@tonic-gate int 8090Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 8100Sstevel@tonic-gate { 8110Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle; 8120Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 8130Sstevel@tonic-gate ASSERT(softsp != NULL); 8140Sstevel@tonic-gate 8150Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 8160Sstevel@tonic-gate 8170Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) { 8180Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 8190Sstevel@tonic-gate } 8200Sstevel@tonic-gate return (DDI_SUCCESS); 8210Sstevel@tonic-gate } 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate static int 8240Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr, 8250Sstevel@tonic-gate uint32_t *size) 8260Sstevel@tonic-gate { 8270Sstevel@tonic-gate ioaddr_t addrlow; 8280Sstevel@tonic-gate ioaddr_t addrhigh; 8290Sstevel@tonic-gate uint32_t segalign; 8300Sstevel@tonic-gate uint32_t smask; 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate smask = *size - 1; 8330Sstevel@tonic-gate segalign = dma_attr->dma_attr_seg; 8340Sstevel@tonic-gate if (smask > segalign) { 8350Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8360Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8370Sstevel@tonic-gate *size = segalign + 1; 8380Sstevel@tonic-gate } 8390Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 8400Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 8410Sstevel@tonic-gate if (addrlow + smask > addrhigh || addrlow + smask < addrlow) { 8420Sstevel@tonic-gate if (!((addrlow + dmareq->dmar_object.dmao_size == 0) && 8430Sstevel@tonic-gate (addrhigh == (ioaddr_t)-1))) { 8440Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8450Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8460Sstevel@tonic-gate *size = MIN(addrhigh - addrlow + 1, *size); 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate } 8490Sstevel@tonic-gate return (DDI_DMA_MAPOK); 8500Sstevel@tonic-gate } 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate int 8530Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 8540Sstevel@tonic-gate ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 8550Sstevel@tonic-gate ddi_dma_cookie_t *cp, uint_t *ccountp) 8560Sstevel@tonic-gate { 8570Sstevel@tonic-gate page_t *pp; 8580Sstevel@tonic-gate uint32_t size; 8590Sstevel@tonic-gate ioaddr_t ioaddr; 8600Sstevel@tonic-gate uint_t offset; 8610Sstevel@tonic-gate uintptr_t addr = 0; 8620Sstevel@tonic-gate pgcnt_t npages; 8630Sstevel@tonic-gate int rval; 8640Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 8650Sstevel@tonic-gate struct sbus_soft_state *softsp; 8660Sstevel@tonic-gate struct page **pplist = NULL; 8670Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 8680Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate #ifdef lint 8710Sstevel@tonic-gate dip = dip; 8720Sstevel@tonic-gate rdip = rdip; 8730Sstevel@tonic-gate #endif 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate if (mp->dmai_inuse) 8760Sstevel@tonic-gate return (DDI_DMA_INUSE); 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 8790Sstevel@tonic-gate size = (uint32_t)dmareq->dmar_object.dmao_size; 8800Sstevel@tonic-gate if (!(mp->dmai_rflags & DMP_NOLIMIT)) { 8810Sstevel@tonic-gate rval = check_dma_attr(dmareq, dma_attr, &size); 8820Sstevel@tonic-gate if (rval != DDI_DMA_MAPOK) 8830Sstevel@tonic-gate return (rval); 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate mp->dmai_inuse = 1; 8860Sstevel@tonic-gate mp->dmai_offset = 0; 8870Sstevel@tonic-gate mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) | 8880Sstevel@tonic-gate (mp->dmai_rflags & DMP_NOLIMIT); 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate switch (dmareq->dmar_object.dmao_type) { 8910Sstevel@tonic-gate case DMA_OTYP_VADDR: 8920Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 8930Sstevel@tonic-gate addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 8940Sstevel@tonic-gate offset = addr & IOMMU_PAGEOFFSET; 8950Sstevel@tonic-gate pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv; 8960Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 8970Sstevel@tonic-gate 898*1035Smike_s DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages " 8990Sstevel@tonic-gate "req addr %lx off %x OBJSIZE %x\n", 9000Sstevel@tonic-gate npages, addr, offset, OBJSIZE)); 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate /* We don't need the addr anymore if we have a shadow list */ 9030Sstevel@tonic-gate if (pplist != NULL) 9040Sstevel@tonic-gate addr = NULL; 9050Sstevel@tonic-gate pp = NULL; 9060Sstevel@tonic-gate break; 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate case DMA_OTYP_PAGES: 9090Sstevel@tonic-gate pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 9100Sstevel@tonic-gate offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset; 9110Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 9120Sstevel@tonic-gate break; 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate case DMA_OTYP_PADDR: 9150Sstevel@tonic-gate default: 9160Sstevel@tonic-gate /* 9170Sstevel@tonic-gate * Not a supported type for this implementation 9180Sstevel@tonic-gate */ 9190Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9200Sstevel@tonic-gate goto bad; 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* Get our soft state once we know we're mapping an object. */ 9240Sstevel@tonic-gate softsp = mppriv->softsp; 9250Sstevel@tonic-gate ASSERT(softsp != NULL); 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 9280Sstevel@tonic-gate if (size != OBJSIZE) { 9290Sstevel@tonic-gate /* 9300Sstevel@tonic-gate * If the request is for partial mapping arrangement, 9310Sstevel@tonic-gate * the device has to be able to address at least the 9320Sstevel@tonic-gate * size of the window we are establishing. 9330Sstevel@tonic-gate */ 9340Sstevel@tonic-gate if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) { 9350Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9360Sstevel@tonic-gate goto bad; 9370Sstevel@tonic-gate } 9380Sstevel@tonic-gate npages = iommu_btopr(size + offset); 9390Sstevel@tonic-gate } 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * If the size requested is less than a moderate amt, 9420Sstevel@tonic-gate * skip the partial mapping stuff- it's not worth the 9430Sstevel@tonic-gate * effort. 9440Sstevel@tonic-gate */ 9450Sstevel@tonic-gate if (npages > MIN_DVMA_WIN_SIZE) { 9460Sstevel@tonic-gate npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset); 9470Sstevel@tonic-gate size = iommu_ptob(MIN_DVMA_WIN_SIZE); 9480Sstevel@tonic-gate DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg " 949*1035Smike_s "%lx sz %x\n", OBJSIZE, npages, size)); 9500Sstevel@tonic-gate if (pplist != NULL) { 9510Sstevel@tonic-gate mp->dmai_minfo = (void *)pplist; 9520Sstevel@tonic-gate mp->dmai_rflags |= DMP_SHADOW; 9530Sstevel@tonic-gate } 9540Sstevel@tonic-gate } else { 9550Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 9560Sstevel@tonic-gate } 9570Sstevel@tonic-gate } else { 9580Sstevel@tonic-gate if (npages >= iommu_btop(softsp->iommu_dvma_size) - 9590Sstevel@tonic-gate MIN_DVMA_WIN_SIZE) { 9600Sstevel@tonic-gate rval = DDI_DMA_TOOBIG; 9610Sstevel@tonic-gate goto bad; 9620Sstevel@tonic-gate } 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate /* 9660Sstevel@tonic-gate * save dmareq-object, size and npages into mp 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate mp->dmai_object = dmareq->dmar_object; 9690Sstevel@tonic-gate mp->dmai_size = size; 9700Sstevel@tonic-gate mp->dmai_ndvmapages = npages; 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 973*1035Smike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena, 9740Sstevel@tonic-gate iommu_ptob(npages), 9750Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 9760Sstevel@tonic-gate if (ioaddr == 0) { 9770Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 9780Sstevel@tonic-gate goto bad; 9790Sstevel@tonic-gate } 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate /* 9820Sstevel@tonic-gate * If we have a 1 page request and we're working with a page 9830Sstevel@tonic-gate * list, we're going to speed load an IOMMU entry. 9840Sstevel@tonic-gate */ 9850Sstevel@tonic-gate if (npages == 1 && !addr) { 9860Sstevel@tonic-gate uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE | 9870Sstevel@tonic-gate IOTTE_WRITE | IOTTE_STREAM; 9880Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 9890Sstevel@tonic-gate pfn_t pfn; 9900Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 9910Sstevel@tonic-gate struct io_mem_list *iomemp; 9920Sstevel@tonic-gate pfn_t *pfnp; 9930Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, 9960Sstevel@tonic-gate softsp->soft_tsb_base_addr); 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 9990Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 10000Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 10010Sstevel@tonic-gate } else if (softsp->stream_buf_off) 10020Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate if (pp != NULL) 10070Sstevel@tonic-gate pfn = pp->p_pagenum; 10080Sstevel@tonic-gate else 10090Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 10100Sstevel@tonic-gate 10110Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 10120Sstevel@tonic-gate 10130Sstevel@tonic-gate *iotte_ptr = 10140Sstevel@tonic-gate ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 10150Sstevel@tonic-gate 10160Sstevel@tonic-gate mp->dmai_mapping = (ioaddr_t)(ioaddr + offset); 10170Sstevel@tonic-gate mp->dmai_nwin = 0; 10180Sstevel@tonic-gate if (cp != NULL) { 10190Sstevel@tonic-gate cp->dmac_notused = 0; 10200Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10210Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10220Sstevel@tonic-gate cp->dmac_type = 0; 10230Sstevel@tonic-gate *ccountp = 1; 10240Sstevel@tonic-gate } 10250Sstevel@tonic-gate 1026*1035Smike_s DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p " 1027*1035Smike_s "pfn %lx tte flag %lx addr %lx ioaddr %x\n", 10280Sstevel@tonic-gate iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 10290Sstevel@tonic-gate 10300Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 10310Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), 10320Sstevel@tonic-gate KM_SLEEP); 10330Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 10340Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 10350Sstevel@tonic-gate iomemp->addr = addr; 10360Sstevel@tonic-gate iomemp->npages = npages; 10370Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * 10380Sstevel@tonic-gate (npages + 1), KM_SLEEP); 10390Sstevel@tonic-gate *pfnp = pfn; 10400Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 10410Sstevel@tonic-gate iomemp->next = softsp->iomem; 10420Sstevel@tonic-gate softsp->iomem = iomemp; 10430Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 10440Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate return (DDI_DMA_MAPPED); 10470Sstevel@tonic-gate } 10480Sstevel@tonic-gate } else { 1049*1035Smike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 10500Sstevel@tonic-gate iommu_ptob(npages), 10510Sstevel@tonic-gate MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0, 10520Sstevel@tonic-gate (uint_t)dma_attr->dma_attr_seg + 1, 1053*1035Smike_s (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo, 1054*1035Smike_s (void *)(uintptr_t) 1055*1035Smike_s ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1), 10560Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate if (ioaddr == 0) { 10600Sstevel@tonic-gate if (dmareq->dmar_fp == DDI_DMA_SLEEP) 10610Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 10620Sstevel@tonic-gate else 10630Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 10640Sstevel@tonic-gate goto bad; 10650Sstevel@tonic-gate } 10660Sstevel@tonic-gate 10670Sstevel@tonic-gate mp->dmai_mapping = ioaddr + offset; 10680Sstevel@tonic-gate ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base); 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate /* 10710Sstevel@tonic-gate * At this point we have a range of virtual address allocated 10720Sstevel@tonic-gate * with which we now have to map to the requested object. 10730Sstevel@tonic-gate */ 10740Sstevel@tonic-gate if (addr) { 10750Sstevel@tonic-gate rval = iommu_create_vaddr_mappings(mp, 10760Sstevel@tonic-gate addr & ~IOMMU_PAGEOFFSET); 10770Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10780Sstevel@tonic-gate goto bad_nomap; 10790Sstevel@tonic-gate } else { 10800Sstevel@tonic-gate rval = iommu_create_pp_mappings(mp, pp, pplist); 10810Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10820Sstevel@tonic-gate goto bad_nomap; 10830Sstevel@tonic-gate } 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate if (cp) { 10860Sstevel@tonic-gate cp->dmac_notused = 0; 10870Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10880Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10890Sstevel@tonic-gate cp->dmac_type = 0; 10900Sstevel@tonic-gate *ccountp = 1; 10910Sstevel@tonic-gate } 10920Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 10930Sstevel@tonic-gate size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 10940Sstevel@tonic-gate mp->dmai_nwin = 10950Sstevel@tonic-gate (dmareq->dmar_object.dmao_size + (size - 1)) / size; 10960Sstevel@tonic-gate return (DDI_DMA_PARTIAL_MAP); 10970Sstevel@tonic-gate } else { 10980Sstevel@tonic-gate mp->dmai_nwin = 0; 10990Sstevel@tonic-gate return (DDI_DMA_MAPPED); 11000Sstevel@tonic-gate } 11010Sstevel@tonic-gate 11020Sstevel@tonic-gate bad_nomap: 11030Sstevel@tonic-gate /* 11040Sstevel@tonic-gate * Could not create mmu mappings. 11050Sstevel@tonic-gate */ 11060Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 1107*1035Smike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11080Sstevel@tonic-gate iommu_ptob(npages)); 11090Sstevel@tonic-gate } else { 1110*1035Smike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11110Sstevel@tonic-gate iommu_ptob(npages)); 11120Sstevel@tonic-gate } 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate bad: 11150Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 11160Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 11170Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 11180Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 11190Sstevel@tonic-gate } 11200Sstevel@tonic-gate mp->dmai_inuse = 0; 11210Sstevel@tonic-gate return (rval); 11220Sstevel@tonic-gate } 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate /* ARGSUSED */ 11250Sstevel@tonic-gate int 11260Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 11270Sstevel@tonic-gate ddi_dma_handle_t handle) 11280Sstevel@tonic-gate { 11290Sstevel@tonic-gate ioaddr_t addr; 11300Sstevel@tonic-gate uint_t npages; 11310Sstevel@tonic-gate size_t size; 11320Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11330Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11340Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 11350Sstevel@tonic-gate ASSERT(softsp != NULL); 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 11380Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 11390Sstevel@tonic-gate size = iommu_ptob(npages); 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: " 11420Sstevel@tonic-gate "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages)); 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate /* sync the entire object */ 11450Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11460Sstevel@tonic-gate /* flush stream write buffers */ 11470Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag, 11480Sstevel@tonic-gate mppriv->phys_sync_flag); 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 11520Sstevel@tonic-gate /* 11530Sstevel@tonic-gate * 'Free' the dma mappings. 11540Sstevel@tonic-gate */ 11550Sstevel@tonic-gate iommu_remove_mappings(mp); 11560Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 11570Sstevel@tonic-gate 11580Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 11590Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1160*1035Smike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11610Sstevel@tonic-gate else 1162*1035Smike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11630Sstevel@tonic-gate 11640Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 11650Sstevel@tonic-gate mp->dmai_inuse = 0; 11660Sstevel@tonic-gate mp->dmai_minfo = NULL; 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 11690Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate return (DDI_SUCCESS); 11720Sstevel@tonic-gate } 11730Sstevel@tonic-gate 11740Sstevel@tonic-gate /*ARGSUSED*/ 11750Sstevel@tonic-gate int 11760Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip, 11770Sstevel@tonic-gate ddi_dma_handle_t handle, off_t off, size_t len, 11780Sstevel@tonic-gate uint_t cache_flags) 11790Sstevel@tonic-gate { 11800Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11810Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11840Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 11850Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 11860Sstevel@tonic-gate mppriv->phys_sync_flag); 11870Sstevel@tonic-gate } 11880Sstevel@tonic-gate return (DDI_SUCCESS); 11890Sstevel@tonic-gate } 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate /*ARGSUSED*/ 11920Sstevel@tonic-gate int 11930Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 11940Sstevel@tonic-gate ddi_dma_handle_t handle, uint_t win, off_t *offp, 11950Sstevel@tonic-gate size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 11960Sstevel@tonic-gate { 11970Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11980Sstevel@tonic-gate off_t offset; 11990Sstevel@tonic-gate uint_t winsize; 12000Sstevel@tonic-gate uint_t newoff; 12010Sstevel@tonic-gate int rval; 12020Sstevel@tonic-gate 12030Sstevel@tonic-gate offset = mp->dmai_mapping & IOMMU_PAGEOFFSET; 12040Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win, 12070Sstevel@tonic-gate winsize)); 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate /* 12100Sstevel@tonic-gate * win is in the range [0 .. dmai_nwin-1] 12110Sstevel@tonic-gate */ 12120Sstevel@tonic-gate if (win >= mp->dmai_nwin) 12130Sstevel@tonic-gate return (DDI_FAILURE); 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate newoff = win * winsize; 12160Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) 12170Sstevel@tonic-gate return (DDI_FAILURE); 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate ASSERT(cookiep); 12200Sstevel@tonic-gate cookiep->dmac_notused = 0; 12210Sstevel@tonic-gate cookiep->dmac_type = 0; 12220Sstevel@tonic-gate cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping; 12230Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12240Sstevel@tonic-gate *ccountp = 1; 12250Sstevel@tonic-gate *offp = (off_t)newoff; 12260Sstevel@tonic-gate *lenp = (uint_t)winsize; 12270Sstevel@tonic-gate 12280Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 12290Sstevel@tonic-gate /* 12300Sstevel@tonic-gate * Nothing to do... 12310Sstevel@tonic-gate */ 12320Sstevel@tonic-gate return (DDI_SUCCESS); 12330Sstevel@tonic-gate } 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS) 12360Sstevel@tonic-gate return (rval); 12370Sstevel@tonic-gate 12380Sstevel@tonic-gate /* 12390Sstevel@tonic-gate * Set this again in case iommu_map_window() has changed it 12400Sstevel@tonic-gate */ 12410Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate return (DDI_SUCCESS); 12440Sstevel@tonic-gate } 12450Sstevel@tonic-gate 12460Sstevel@tonic-gate static int 12470Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize) 12480Sstevel@tonic-gate { 12490Sstevel@tonic-gate uintptr_t addr = 0; 12500Sstevel@tonic-gate page_t *pp; 12510Sstevel@tonic-gate uint_t flags; 12520Sstevel@tonic-gate struct page **pplist = NULL; 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 12550Sstevel@tonic-gate /* Free mappings for current window */ 12560Sstevel@tonic-gate iommu_remove_mappings(mp); 12570Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate mp->dmai_offset = newoff; 12600Sstevel@tonic-gate mp->dmai_size = mp->dmai_object.dmao_size - newoff; 12610Sstevel@tonic-gate mp->dmai_size = MIN(mp->dmai_size, winsize); 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR || 12640Sstevel@tonic-gate mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) { 12650Sstevel@tonic-gate if (mp->dmai_rflags & DMP_SHADOW) { 12660Sstevel@tonic-gate pplist = (struct page **)mp->dmai_minfo; 12670Sstevel@tonic-gate ASSERT(pplist != NULL); 12680Sstevel@tonic-gate pplist = pplist + (newoff >> MMU_PAGESHIFT); 12690Sstevel@tonic-gate } else { 12700Sstevel@tonic-gate addr = (uintptr_t) 12710Sstevel@tonic-gate mp->dmai_object.dmao_obj.virt_obj.v_addr; 12720Sstevel@tonic-gate addr = (addr + newoff) & ~IOMMU_PAGEOFFSET; 12730Sstevel@tonic-gate } 12740Sstevel@tonic-gate pp = NULL; 12750Sstevel@tonic-gate } else { 12760Sstevel@tonic-gate pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp; 12770Sstevel@tonic-gate flags = 0; 12780Sstevel@tonic-gate while (flags < newoff) { 12790Sstevel@tonic-gate pp = pp->p_next; 12800Sstevel@tonic-gate flags += MMU_PAGESIZE; 12810Sstevel@tonic-gate } 12820Sstevel@tonic-gate } 12830Sstevel@tonic-gate 12840Sstevel@tonic-gate /* Set up mappings for next window */ 12850Sstevel@tonic-gate if (addr) { 12860Sstevel@tonic-gate if (iommu_create_vaddr_mappings(mp, addr) < 0) 12870Sstevel@tonic-gate return (DDI_FAILURE); 12880Sstevel@tonic-gate } else { 12890Sstevel@tonic-gate if (iommu_create_pp_mappings(mp, pp, pplist) < 0) 12900Sstevel@tonic-gate return (DDI_FAILURE); 12910Sstevel@tonic-gate } 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate /* 12940Sstevel@tonic-gate * also invalidate read stream buffer 12950Sstevel@tonic-gate */ 12960Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 12970Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 13000Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 13010Sstevel@tonic-gate mppriv->phys_sync_flag); 13020Sstevel@tonic-gate } 13030Sstevel@tonic-gate 13040Sstevel@tonic-gate return (DDI_SUCCESS); 13050Sstevel@tonic-gate 13060Sstevel@tonic-gate } 13070Sstevel@tonic-gate 13080Sstevel@tonic-gate int 13090Sstevel@tonic-gate iommu_dma_map(dev_info_t *dip, dev_info_t *rdip, 13100Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 13110Sstevel@tonic-gate { 13120Sstevel@tonic-gate ddi_dma_lim_t *dma_lim = dmareq->dmar_limits; 13130Sstevel@tonic-gate ddi_dma_impl_t *mp; 13140Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 13150Sstevel@tonic-gate struct dma_impl_priv *mppriv; 13160Sstevel@tonic-gate ioaddr_t addrlow, addrhigh; 13170Sstevel@tonic-gate ioaddr_t segalign; 13180Sstevel@tonic-gate int rval; 13190Sstevel@tonic-gate struct sbus_soft_state *softsp = 13200Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 13210Sstevel@tonic-gate ddi_get_instance(dip)); 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate addrlow = dma_lim->dlim_addr_lo; 13240Sstevel@tonic-gate addrhigh = dma_lim->dlim_addr_hi; 13250Sstevel@tonic-gate if ((addrhigh <= addrlow) || 13260Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 13270Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 13280Sstevel@tonic-gate } 13290Sstevel@tonic-gate 13300Sstevel@tonic-gate /* 13310Sstevel@tonic-gate * Setup DMA burstsizes and min-xfer counts. 13320Sstevel@tonic-gate */ 13330Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes, 13340Sstevel@tonic-gate (uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer, 13350Sstevel@tonic-gate dmareq->dmar_flags); 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) 13380Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 13390Sstevel@tonic-gate /* 13400Sstevel@tonic-gate * If not an advisory call, get a DMA handle 13410Sstevel@tonic-gate */ 13420Sstevel@tonic-gate if (!handlep) { 13430Sstevel@tonic-gate return (DDI_DMA_MAPOK); 13440Sstevel@tonic-gate } 13450Sstevel@tonic-gate 13460Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 13470Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 13480Sstevel@tonic-gate if (mppriv == NULL) { 13490Sstevel@tonic-gate if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 13500Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 13510Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 13520Sstevel@tonic-gate } 13530Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 13540Sstevel@tonic-gate } 13550Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 13560Sstevel@tonic-gate mp->dmai_rdip = rdip; 13570Sstevel@tonic-gate mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 13580Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 13590Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 13600Sstevel@tonic-gate mp->dmai_offset = 0; 13610Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 13620Sstevel@tonic-gate mp->dmai_minfo = 0; 13630Sstevel@tonic-gate mp->dmai_inuse = 0; 13640Sstevel@tonic-gate segalign = dma_lim->dlim_cntr_max; 13650Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 13660Sstevel@tonic-gate if (segalign == UINT32_MAX && addrhigh == UINT32_MAX && 13670Sstevel@tonic-gate addrlow == 0) { 13680Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 13690Sstevel@tonic-gate } 13700Sstevel@tonic-gate mppriv->softsp = softsp; 13710Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 13720Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 13730Sstevel@tonic-gate dma_attr->dma_attr_align = 1; 13740Sstevel@tonic-gate dma_attr->dma_attr_addr_lo = addrlow; 13750Sstevel@tonic-gate dma_attr->dma_attr_addr_hi = addrhigh; 13760Sstevel@tonic-gate dma_attr->dma_attr_seg = segalign; 13770Sstevel@tonic-gate dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes; 13780Sstevel@tonic-gate rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp, 13790Sstevel@tonic-gate dmareq, NULL, NULL); 13800Sstevel@tonic-gate if (rval && (rval != DDI_DMA_PARTIAL_MAP)) { 13810Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 13820Sstevel@tonic-gate } else { 13830Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 13840Sstevel@tonic-gate } 13850Sstevel@tonic-gate return (rval); 13860Sstevel@tonic-gate } 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate /*ARGSUSED*/ 13890Sstevel@tonic-gate int 13900Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 13910Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 13920Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags) 13930Sstevel@tonic-gate { 13940Sstevel@tonic-gate ioaddr_t addr; 13950Sstevel@tonic-gate uint_t offset; 13960Sstevel@tonic-gate pgcnt_t npages; 13970Sstevel@tonic-gate size_t size; 13980Sstevel@tonic-gate ddi_dma_cookie_t *cp; 13990Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", mp)); 14020Sstevel@tonic-gate switch (request) { 14030Sstevel@tonic-gate case DDI_DMA_FREE: 14040Sstevel@tonic-gate { 14050Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 14060Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 14070Sstevel@tonic-gate ASSERT(softsp != NULL); 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate /* 14100Sstevel@tonic-gate * 'Free' the dma mappings. 14110Sstevel@tonic-gate */ 14120Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 14130Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 14140Sstevel@tonic-gate size = iommu_ptob(npages); 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:" 14170Sstevel@tonic-gate "freeing vaddr %x for %x pages.\n", addr, 14180Sstevel@tonic-gate mp->dmai_ndvmapages)); 14190Sstevel@tonic-gate /* sync the entire object */ 14200Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 14210Sstevel@tonic-gate /* flush stream write buffers */ 14220Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, 14230Sstevel@tonic-gate (int *)&mppriv->sync_flag, mppriv->phys_sync_flag); 14240Sstevel@tonic-gate } 14250Sstevel@tonic-gate 14260Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 14270Sstevel@tonic-gate iommu_remove_mappings(mp); 14280Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 14310Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1432*1035Smike_s vmem_free(softsp->dvma_arena, 1433*1035Smike_s (void *)(uintptr_t)addr, size); 14340Sstevel@tonic-gate else 1435*1035Smike_s vmem_xfree(softsp->dvma_arena, 1436*1035Smike_s (void *)(uintptr_t)addr, size); 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 14390Sstevel@tonic-gate 14400Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 14410Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate break; 14440Sstevel@tonic-gate } 14450Sstevel@tonic-gate 14460Sstevel@tonic-gate case DDI_DMA_SET_SBUS64: 14470Sstevel@tonic-gate { 14480Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 14490Sstevel@tonic-gate 14500Sstevel@tonic-gate return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp, 14510Sstevel@tonic-gate &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer, 14520Sstevel@tonic-gate DDI_DMA_SBUS_64BIT)); 14530Sstevel@tonic-gate } 14540Sstevel@tonic-gate 14550Sstevel@tonic-gate case DDI_DMA_HTOC: 14560Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx " 1457*1035Smike_s "size %x\n", *offp, mp->dmai_mapping, 14580Sstevel@tonic-gate mp->dmai_size)); 14590Sstevel@tonic-gate 14600Sstevel@tonic-gate if ((uint_t)(*offp) >= mp->dmai_size) 14610Sstevel@tonic-gate return (DDI_FAILURE); 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 14640Sstevel@tonic-gate cp->dmac_notused = 0; 14650Sstevel@tonic-gate cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp)); 14660Sstevel@tonic-gate cp->dmac_size = 14670Sstevel@tonic-gate mp->dmai_mapping + mp->dmai_size - cp->dmac_address; 14680Sstevel@tonic-gate cp->dmac_type = 0; 14690Sstevel@tonic-gate 14700Sstevel@tonic-gate break; 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate case DDI_DMA_KVADDR: 14730Sstevel@tonic-gate /* 14740Sstevel@tonic-gate * If a physical address mapping has percolated this high, 14750Sstevel@tonic-gate * that is an error (maybe?). 14760Sstevel@tonic-gate */ 14770Sstevel@tonic-gate if (mp->dmai_rflags & DMP_PHYSADDR) { 14780Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys " 14790Sstevel@tonic-gate "mapping\n")); 14800Sstevel@tonic-gate return (DDI_FAILURE); 14810Sstevel@tonic-gate } 14820Sstevel@tonic-gate 14830Sstevel@tonic-gate return (DDI_FAILURE); 14840Sstevel@tonic-gate 14850Sstevel@tonic-gate case DDI_DMA_NEXTWIN: 14860Sstevel@tonic-gate { 14870Sstevel@tonic-gate ddi_dma_win_t *owin, *nwin; 14880Sstevel@tonic-gate uint_t winsize, newoff; 14890Sstevel@tonic-gate int rval; 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n")); 14920Sstevel@tonic-gate 14930Sstevel@tonic-gate mp = (ddi_dma_impl_t *)handle; 14940Sstevel@tonic-gate owin = (ddi_dma_win_t *)offp; 14950Sstevel@tonic-gate nwin = (ddi_dma_win_t *)objp; 14960Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 14970Sstevel@tonic-gate if (*owin == NULL) { 14980Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, 14990Sstevel@tonic-gate ("nextwin: win == NULL\n")); 15000Sstevel@tonic-gate mp->dmai_offset = 0; 15010Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 15020Sstevel@tonic-gate return (DDI_SUCCESS); 15030Sstevel@tonic-gate } 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 15060Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - 15070Sstevel@tonic-gate iommu_btopr(offset)); 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate newoff = (uint_t)(mp->dmai_offset + winsize); 15100Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - 15110Sstevel@tonic-gate mp->dmai_minxfer) 15120Sstevel@tonic-gate return (DDI_DMA_DONE); 15130Sstevel@tonic-gate 15140Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) 15150Sstevel@tonic-gate != DDI_SUCCESS) 15160Sstevel@tonic-gate return (rval); 15170Sstevel@tonic-gate } else { 15180Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no " 15190Sstevel@tonic-gate "partial mapping\n")); 15200Sstevel@tonic-gate if (*owin != NULL) 15210Sstevel@tonic-gate return (DDI_DMA_DONE); 15220Sstevel@tonic-gate mp->dmai_offset = 0; 15230Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 15240Sstevel@tonic-gate } 15250Sstevel@tonic-gate break; 15260Sstevel@tonic-gate } 15270Sstevel@tonic-gate 15280Sstevel@tonic-gate case DDI_DMA_NEXTSEG: 15290Sstevel@tonic-gate { 15300Sstevel@tonic-gate ddi_dma_seg_t *oseg, *nseg; 15310Sstevel@tonic-gate 15320Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n")); 15330Sstevel@tonic-gate 15340Sstevel@tonic-gate oseg = (ddi_dma_seg_t *)lenp; 15350Sstevel@tonic-gate if (*oseg != NULL) 15360Sstevel@tonic-gate return (DDI_DMA_DONE); 15370Sstevel@tonic-gate nseg = (ddi_dma_seg_t *)objp; 15380Sstevel@tonic-gate *nseg = *((ddi_dma_seg_t *)offp); 15390Sstevel@tonic-gate break; 15400Sstevel@tonic-gate } 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate case DDI_DMA_SEGTOC: 15430Sstevel@tonic-gate { 15440Sstevel@tonic-gate ddi_dma_seg_impl_t *seg; 15450Sstevel@tonic-gate 15460Sstevel@tonic-gate seg = (ddi_dma_seg_impl_t *)handle; 15470Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 15480Sstevel@tonic-gate cp->dmac_notused = 0; 15490Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)seg->dmai_mapping; 15500Sstevel@tonic-gate cp->dmac_size = *lenp = seg->dmai_size; 15510Sstevel@tonic-gate cp->dmac_type = 0; 15520Sstevel@tonic-gate *offp = seg->dmai_offset; 15530Sstevel@tonic-gate break; 15540Sstevel@tonic-gate } 15550Sstevel@tonic-gate 15560Sstevel@tonic-gate case DDI_DMA_MOVWIN: 15570Sstevel@tonic-gate { 15580Sstevel@tonic-gate uint_t winsize; 15590Sstevel@tonic-gate uint_t newoff; 15600Sstevel@tonic-gate int rval; 15610Sstevel@tonic-gate 15620Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 15630Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 15640Sstevel@tonic-gate 1565*1035Smike_s DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx " 15660Sstevel@tonic-gate "winsize %x\n", *offp, *lenp, winsize)); 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) 15690Sstevel@tonic-gate return (DDI_FAILURE); 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate if (*lenp != (uint_t)-1 && *lenp != winsize) { 15720Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n")); 15730Sstevel@tonic-gate return (DDI_FAILURE); 15740Sstevel@tonic-gate } 15750Sstevel@tonic-gate newoff = (uint_t)*offp; 15760Sstevel@tonic-gate if (newoff & (winsize - 1)) { 15770Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n")); 15780Sstevel@tonic-gate return (DDI_FAILURE); 15790Sstevel@tonic-gate } 15800Sstevel@tonic-gate 15810Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 15820Sstevel@tonic-gate /* 15830Sstevel@tonic-gate * Nothing to do... 15840Sstevel@tonic-gate */ 15850Sstevel@tonic-gate break; 15860Sstevel@tonic-gate } 15870Sstevel@tonic-gate 15880Sstevel@tonic-gate /* 15890Sstevel@tonic-gate * Check out new address... 15900Sstevel@tonic-gate */ 15910Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) { 15920Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of " 15930Sstevel@tonic-gate "range\n")); 15940Sstevel@tonic-gate return (DDI_FAILURE); 15950Sstevel@tonic-gate } 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate rval = iommu_map_window(mp, newoff, winsize); 15980Sstevel@tonic-gate if (rval != DDI_SUCCESS) 15990Sstevel@tonic-gate return (rval); 16000Sstevel@tonic-gate 16010Sstevel@tonic-gate if ((cp = (ddi_dma_cookie_t *)objp) != 0) { 16020Sstevel@tonic-gate cp->dmac_notused = 0; 16030Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 16040Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 16050Sstevel@tonic-gate cp->dmac_type = 0; 16060Sstevel@tonic-gate } 16070Sstevel@tonic-gate *offp = (off_t)newoff; 16080Sstevel@tonic-gate *lenp = (uint_t)winsize; 16090Sstevel@tonic-gate break; 16100Sstevel@tonic-gate } 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate case DDI_DMA_REPWIN: 16130Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 16140Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n")); 16150Sstevel@tonic-gate return (DDI_FAILURE); 16160Sstevel@tonic-gate } 16170Sstevel@tonic-gate 16180Sstevel@tonic-gate *offp = (off_t)mp->dmai_offset; 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate addr = mp->dmai_ndvmapages - 16210Sstevel@tonic-gate iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET); 16220Sstevel@tonic-gate 16230Sstevel@tonic-gate *lenp = (uint_t)iommu_ptob(addr); 16240Sstevel@tonic-gate 1625*1035Smike_s DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n", 16260Sstevel@tonic-gate mp->dmai_offset, mp->dmai_size)); 16270Sstevel@tonic-gate 16280Sstevel@tonic-gate break; 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate case DDI_DMA_GETERR: 16310Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG, 16320Sstevel@tonic-gate ("iommu_dma_mctl: geterr\n")); 16330Sstevel@tonic-gate 16340Sstevel@tonic-gate break; 16350Sstevel@tonic-gate 16360Sstevel@tonic-gate case DDI_DMA_COFF: 16370Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)offp; 16380Sstevel@tonic-gate addr = cp->dmac_address; 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate if (addr < mp->dmai_mapping || 16410Sstevel@tonic-gate addr >= mp->dmai_mapping + mp->dmai_size) 16420Sstevel@tonic-gate return (DDI_FAILURE); 16430Sstevel@tonic-gate 16440Sstevel@tonic-gate *objp = (caddr_t)(addr - mp->dmai_mapping); 16450Sstevel@tonic-gate 1646*1035Smike_s DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx " 16470Sstevel@tonic-gate "size %x\n", (ulong_t)*objp, mp->dmai_mapping, 16480Sstevel@tonic-gate mp->dmai_size)); 16490Sstevel@tonic-gate 16500Sstevel@tonic-gate break; 16510Sstevel@tonic-gate 16520Sstevel@tonic-gate case DDI_DMA_RESERVE: 16530Sstevel@tonic-gate { 16540Sstevel@tonic-gate struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp; 16550Sstevel@tonic-gate ddi_dma_lim_t *dma_lim; 16560Sstevel@tonic-gate ddi_dma_handle_t *handlep; 16570Sstevel@tonic-gate uint_t np; 16580Sstevel@tonic-gate ioaddr_t ioaddr; 16590Sstevel@tonic-gate int i; 16600Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma; 16610Sstevel@tonic-gate struct sbus_soft_state *softsp = 16620Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 16630Sstevel@tonic-gate ddi_get_instance(dip)); 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate /* Some simple sanity checks */ 16660Sstevel@tonic-gate dma_lim = dmareq->dmar_limits; 16670Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) { 16680Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16690Sstevel@tonic-gate ("Reserve: bad burstsizes\n")); 16700Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 16710Sstevel@tonic-gate } 16720Sstevel@tonic-gate if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) { 16730Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16740Sstevel@tonic-gate ("Reserve: bad limits\n")); 16750Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 16760Sstevel@tonic-gate } 16770Sstevel@tonic-gate 16780Sstevel@tonic-gate np = dmareq->dmar_object.dmao_size; 16790Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 16800Sstevel@tonic-gate if (np > softsp->dma_reserve) { 16810Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 16820Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16830Sstevel@tonic-gate ("Reserve: dma_reserve is exhausted\n")); 16840Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 16850Sstevel@tonic-gate } 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate softsp->dma_reserve -= np; 16880Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 16890Sstevel@tonic-gate mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 16900Sstevel@tonic-gate mp->dmai_rflags = DMP_BYPASSNEXUS; 16910Sstevel@tonic-gate mp->dmai_rdip = rdip; 16920Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 16930Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 16940Sstevel@tonic-gate 1695*1035Smike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 16960Sstevel@tonic-gate iommu_ptob(np), IOMMU_PAGESIZE, 0, 1697*1035Smike_s dma_lim->dlim_cntr_max + 1, 1698*1035Smike_s (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1), 16990Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 17000Sstevel@tonic-gate 17010Sstevel@tonic-gate if (ioaddr == 0) { 17020Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 17030Sstevel@tonic-gate softsp->dma_reserve += np; 17040Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 17050Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 17060Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17070Sstevel@tonic-gate ("Reserve: No dvma resources available\n")); 17080Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 17090Sstevel@tonic-gate } 17100Sstevel@tonic-gate 17110Sstevel@tonic-gate /* create a per request structure */ 17120Sstevel@tonic-gate iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma), 17130Sstevel@tonic-gate KM_SLEEP); 17140Sstevel@tonic-gate 17150Sstevel@tonic-gate /* 17160Sstevel@tonic-gate * We need to remember the size of the transfer so that 17170Sstevel@tonic-gate * we can figure the virtual pages to sync when the transfer 17180Sstevel@tonic-gate * is complete. 17190Sstevel@tonic-gate */ 17200Sstevel@tonic-gate iommu_fast_dvma->pagecnt = kmem_zalloc(np * 17210Sstevel@tonic-gate sizeof (uint_t), KM_SLEEP); 17220Sstevel@tonic-gate 17230Sstevel@tonic-gate /* Allocate a streaming cache sync flag for each index */ 17240Sstevel@tonic-gate iommu_fast_dvma->sync_flag = kmem_zalloc(np * 17250Sstevel@tonic-gate sizeof (int), KM_SLEEP); 17260Sstevel@tonic-gate 17270Sstevel@tonic-gate /* Allocate a physical sync flag for each index */ 17280Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag = 17290Sstevel@tonic-gate kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP); 17300Sstevel@tonic-gate 17310Sstevel@tonic-gate for (i = 0; i < np; i++) 17320Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t) 17330Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[i]); 17340Sstevel@tonic-gate 17350Sstevel@tonic-gate mp->dmai_mapping = ioaddr; 17360Sstevel@tonic-gate mp->dmai_ndvmapages = np; 17370Sstevel@tonic-gate iommu_fast_dvma->ops = &iommu_dvma_ops; 17380Sstevel@tonic-gate iommu_fast_dvma->softsp = (caddr_t)softsp; 17390Sstevel@tonic-gate mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma; 17400Sstevel@tonic-gate handlep = (ddi_dma_handle_t *)objp; 17410Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17440Sstevel@tonic-gate ("Reserve: mapping object %p base addr %lx size %x\n", 17450Sstevel@tonic-gate mp, mp->dmai_mapping, mp->dmai_ndvmapages)); 17460Sstevel@tonic-gate 17470Sstevel@tonic-gate break; 17480Sstevel@tonic-gate } 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate case DDI_DMA_RELEASE: 17510Sstevel@tonic-gate { 17520Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 17530Sstevel@tonic-gate uint_t np = npages = mp->dmai_ndvmapages; 17540Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping; 17550Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 17560Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *) 17570Sstevel@tonic-gate mp->dmai_nexus_private; 17580Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 17590Sstevel@tonic-gate iommu_fast_dvma->softsp; 17600Sstevel@tonic-gate 17610Sstevel@tonic-gate ASSERT(softsp != NULL); 17620Sstevel@tonic-gate 17630Sstevel@tonic-gate /* Unload stale mappings and flush stale tlb's */ 17640Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate while (npages > (uint_t)0) { 17670Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 17680Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 17690Sstevel@tonic-gate 17700Sstevel@tonic-gate npages--; 17710Sstevel@tonic-gate iotte_ptr++; 17720Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 17730Sstevel@tonic-gate } 17740Sstevel@tonic-gate 17750Sstevel@tonic-gate ioaddr = (ioaddr_t)mp->dmai_mapping; 17760Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 17770Sstevel@tonic-gate softsp->dma_reserve += np; 17780Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 17790Sstevel@tonic-gate 17800Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1781*1035Smike_s vmem_free(softsp->dvma_arena, 1782*1035Smike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 17830Sstevel@tonic-gate else 1784*1035Smike_s vmem_xfree(softsp->dvma_arena, 1785*1035Smike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 17860Sstevel@tonic-gate 17870Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 17880Sstevel@tonic-gate kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t)); 17890Sstevel@tonic-gate kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int)); 17900Sstevel@tonic-gate kmem_free(iommu_fast_dvma->phys_sync_flag, np * 17910Sstevel@tonic-gate sizeof (uint64_t)); 17920Sstevel@tonic-gate kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma)); 17930Sstevel@tonic-gate 17940Sstevel@tonic-gate 17950Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17960Sstevel@tonic-gate ("Release: Base addr %x size %x\n", ioaddr, np)); 17970Sstevel@tonic-gate /* 17980Sstevel@tonic-gate * Now that we've freed some resource, 17990Sstevel@tonic-gate * if there is anybody waiting for it 18000Sstevel@tonic-gate * try and get them going. 18010Sstevel@tonic-gate */ 18020Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 18030Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 18040Sstevel@tonic-gate 18050Sstevel@tonic-gate break; 18060Sstevel@tonic-gate } 18070Sstevel@tonic-gate 18080Sstevel@tonic-gate default: 18090Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option " 18100Sstevel@tonic-gate "0%x\n", request)); 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate return (DDI_FAILURE); 18130Sstevel@tonic-gate } 18140Sstevel@tonic-gate return (DDI_SUCCESS); 18150Sstevel@tonic-gate } 18160Sstevel@tonic-gate 18170Sstevel@tonic-gate /*ARGSUSED*/ 18180Sstevel@tonic-gate void 18190Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 18200Sstevel@tonic-gate ddi_dma_cookie_t *cp) 18210Sstevel@tonic-gate { 18220Sstevel@tonic-gate uintptr_t addr; 18230Sstevel@tonic-gate ioaddr_t ioaddr; 18240Sstevel@tonic-gate uint_t offset; 18250Sstevel@tonic-gate pfn_t pfn; 18260Sstevel@tonic-gate int npages; 18270Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 18280Sstevel@tonic-gate uint64_t iotte_flag = 0; 18290Sstevel@tonic-gate struct as *as = NULL; 18300Sstevel@tonic-gate extern struct as kas; 18310Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 18320Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 18330Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 18340Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 18350Sstevel@tonic-gate iommu_fast_dvma->softsp; 18360Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18370Sstevel@tonic-gate struct io_mem_list *iomemp; 18380Sstevel@tonic-gate pfn_t *pfnp; 18390Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate ASSERT(softsp != NULL); 18420Sstevel@tonic-gate 18430Sstevel@tonic-gate addr = (uintptr_t)a; 18440Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 18450Sstevel@tonic-gate offset = (uint_t)(addr & IOMMU_PAGEOFFSET); 18460Sstevel@tonic-gate iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset); 18470Sstevel@tonic-gate as = &kas; 18480Sstevel@tonic-gate addr &= ~IOMMU_PAGEOFFSET; 18490Sstevel@tonic-gate npages = iommu_btopr(len + offset); 18500Sstevel@tonic-gate 18510Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18520Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 18530Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 18540Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 18550Sstevel@tonic-gate iomemp->addr = addr; 18560Sstevel@tonic-gate iomemp->npages = npages; 18570Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 18580Sstevel@tonic-gate KM_SLEEP); 18590Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18600Sstevel@tonic-gate 18610Sstevel@tonic-gate cp->dmac_address = ioaddr | offset; 18620Sstevel@tonic-gate cp->dmac_size = len; 18630Sstevel@tonic-gate 18640Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 18650Sstevel@tonic-gate /* read/write and streaming io on */ 18660Sstevel@tonic-gate iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE; 18670Sstevel@tonic-gate 18680Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) 18690Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 18700Sstevel@tonic-gate else if (!softsp->stream_buf_off) 18710Sstevel@tonic-gate iotte_flag |= IOTTE_STREAM; 18720Sstevel@tonic-gate 18730Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x " 1874*1035Smike_s "size %x offset %x index %x kaddr %lx\n", 18750Sstevel@tonic-gate ioaddr, len, offset, index, addr)); 18760Sstevel@tonic-gate ASSERT(npages > 0); 18770Sstevel@tonic-gate do { 18780Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 18790Sstevel@tonic-gate if (pfn == PFN_INVALID) { 18800Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn " 18810Sstevel@tonic-gate "from hat_getpfnum()\n")); 18820Sstevel@tonic-gate } 18830Sstevel@tonic-gate 18840Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 18850Sstevel@tonic-gate 18860Sstevel@tonic-gate /* load tte */ 18870Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 18880Sstevel@tonic-gate 18890Sstevel@tonic-gate npages--; 18900Sstevel@tonic-gate iotte_ptr++; 18910Sstevel@tonic-gate 18920Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 18930Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 18940Sstevel@tonic-gate 18950Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18960Sstevel@tonic-gate *pfnp = pfn; 18970Sstevel@tonic-gate pfnp++; 18980Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18990Sstevel@tonic-gate 19000Sstevel@tonic-gate } while (npages > 0); 19010Sstevel@tonic-gate 19020Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19030Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 19040Sstevel@tonic-gate iomemp->next = softsp->iomem; 19050Sstevel@tonic-gate softsp->iomem = iomemp; 19060Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 19070Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19080Sstevel@tonic-gate } 19090Sstevel@tonic-gate 19100Sstevel@tonic-gate /*ARGSUSED*/ 19110Sstevel@tonic-gate void 19120Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view) 19130Sstevel@tonic-gate { 19140Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 19150Sstevel@tonic-gate ioaddr_t ioaddr; 19160Sstevel@tonic-gate pgcnt_t npages; 19170Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 19180Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 19190Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 19200Sstevel@tonic-gate iommu_fast_dvma->softsp; 19210Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19220Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 19230Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19240Sstevel@tonic-gate 19250Sstevel@tonic-gate ASSERT(softsp != NULL); 19260Sstevel@tonic-gate 19270Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 19280Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 19290Sstevel@tonic-gate 19300Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19310Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 19320Sstevel@tonic-gate prevp = &softsp->iomem; 19330Sstevel@tonic-gate walk = softsp->iomem; 19340Sstevel@tonic-gate 19350Sstevel@tonic-gate while (walk != NULL) { 19360Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 19370Sstevel@tonic-gate *prevp = walk->next; 19380Sstevel@tonic-gate break; 19390Sstevel@tonic-gate } 19400Sstevel@tonic-gate prevp = &walk->next; 19410Sstevel@tonic-gate walk = walk->next; 19420Sstevel@tonic-gate } 19430Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 19440Sstevel@tonic-gate 19450Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 19460Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 19470Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19480Sstevel@tonic-gate 19490Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag " 1950*1035Smike_s "addr %p sync flag pfn %llx index %x page count %lx\n", mp, 19510Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[index], 19520Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index], 19530Sstevel@tonic-gate index, npages)); 19540Sstevel@tonic-gate 19550Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) { 19560Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 19570Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 19580Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 19590Sstevel@tonic-gate } 19600Sstevel@tonic-gate } 19610Sstevel@tonic-gate 19620Sstevel@tonic-gate /*ARGSUSED*/ 19630Sstevel@tonic-gate void 19640Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view) 19650Sstevel@tonic-gate { 19660Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 19670Sstevel@tonic-gate ioaddr_t ioaddr; 19680Sstevel@tonic-gate uint_t npages; 19690Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 19700Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 19710Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 19720Sstevel@tonic-gate iommu_fast_dvma->softsp; 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 19750Sstevel@tonic-gate return; 19760Sstevel@tonic-gate 19770Sstevel@tonic-gate ASSERT(softsp != NULL); 19780Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 19790Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 19800Sstevel@tonic-gate 19810Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, " 1982*1035Smike_s "sync flag addr %p, sync flag pfn %llx\n", mp, 19830Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[index], 19840Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index])); 19850Sstevel@tonic-gate 19860Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 19870Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 19880Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 19890Sstevel@tonic-gate } 1990