xref: /onnv-gate/usr/src/uts/sun4u/io/iommu.c (revision 7632:91aa3d8541b5)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7632SNick.Todd@Sun.COM  * Common Development and Distribution License (the "License").
6*7632SNick.Todd@Sun.COM  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*7632SNick.Todd@Sun.COM  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/types.h>
270Sstevel@tonic-gate #include <sys/param.h>
280Sstevel@tonic-gate #include <sys/conf.h>
290Sstevel@tonic-gate #include <sys/ddi.h>
300Sstevel@tonic-gate #include <sys/sunddi.h>
310Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
320Sstevel@tonic-gate #include <sys/cmn_err.h>
330Sstevel@tonic-gate #include <sys/kmem.h>
340Sstevel@tonic-gate #include <sys/vmem.h>
350Sstevel@tonic-gate #include <sys/sysmacros.h>
360Sstevel@tonic-gate 
370Sstevel@tonic-gate #include <sys/ddidmareq.h>
380Sstevel@tonic-gate #include <sys/sysiosbus.h>
390Sstevel@tonic-gate #include <sys/iommu.h>
400Sstevel@tonic-gate #include <sys/iocache.h>
410Sstevel@tonic-gate #include <sys/dvma.h>
420Sstevel@tonic-gate 
430Sstevel@tonic-gate #include <vm/as.h>
440Sstevel@tonic-gate #include <vm/hat.h>
450Sstevel@tonic-gate #include <vm/page.h>
460Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
470Sstevel@tonic-gate #include <sys/machparam.h>
480Sstevel@tonic-gate #include <sys/machsystm.h>
490Sstevel@tonic-gate #include <sys/vmsystm.h>
500Sstevel@tonic-gate #include <sys/iommutsb.h>
510Sstevel@tonic-gate 
520Sstevel@tonic-gate /* Useful debugging Stuff */
530Sstevel@tonic-gate #include <sys/nexusdebug.h>
540Sstevel@tonic-gate #include <sys/debug.h>
550Sstevel@tonic-gate /* Bitfield debugging definitions for this file */
560Sstevel@tonic-gate #define	IOMMU_GETDVMAPAGES_DEBUG	0x1
570Sstevel@tonic-gate #define	IOMMU_DMAMAP_DEBUG		0x2
580Sstevel@tonic-gate #define	IOMMU_DMAMCTL_DEBUG		0x4
590Sstevel@tonic-gate #define	IOMMU_DMAMCTL_SYNC_DEBUG	0x8
600Sstevel@tonic-gate #define	IOMMU_DMAMCTL_HTOC_DEBUG	0x10
610Sstevel@tonic-gate #define	IOMMU_DMAMCTL_KVADDR_DEBUG	0x20
620Sstevel@tonic-gate #define	IOMMU_DMAMCTL_NEXTWIN_DEBUG	0x40
630Sstevel@tonic-gate #define	IOMMU_DMAMCTL_NEXTSEG_DEBUG	0x80
640Sstevel@tonic-gate #define	IOMMU_DMAMCTL_MOVWIN_DEBUG	0x100
650Sstevel@tonic-gate #define	IOMMU_DMAMCTL_REPWIN_DEBUG	0x200
660Sstevel@tonic-gate #define	IOMMU_DMAMCTL_GETERR_DEBUG	0x400
670Sstevel@tonic-gate #define	IOMMU_DMAMCTL_COFF_DEBUG	0x800
680Sstevel@tonic-gate #define	IOMMU_DMAMCTL_DMA_FREE_DEBUG	0x1000
690Sstevel@tonic-gate #define	IOMMU_REGISTERS_DEBUG		0x2000
700Sstevel@tonic-gate #define	IOMMU_DMA_SETUP_DEBUG		0x4000
710Sstevel@tonic-gate #define	IOMMU_DMA_UNBINDHDL_DEBUG	0x8000
720Sstevel@tonic-gate #define	IOMMU_DMA_BINDHDL_DEBUG		0x10000
730Sstevel@tonic-gate #define	IOMMU_DMA_WIN_DEBUG		0x20000
740Sstevel@tonic-gate #define	IOMMU_DMA_ALLOCHDL_DEBUG	0x40000
750Sstevel@tonic-gate #define	IOMMU_DMA_LIM_SETUP_DEBUG	0x80000
760Sstevel@tonic-gate #define	IOMMU_FASTDMA_RESERVE		0x100000
770Sstevel@tonic-gate #define	IOMMU_FASTDMA_LOAD		0x200000
780Sstevel@tonic-gate #define	IOMMU_INTER_INTRA_XFER		0x400000
790Sstevel@tonic-gate #define	IOMMU_TTE			0x800000
800Sstevel@tonic-gate #define	IOMMU_TLB			0x1000000
810Sstevel@tonic-gate #define	IOMMU_FASTDMA_SYNC		0x2000000
820Sstevel@tonic-gate 
830Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */
840Sstevel@tonic-gate /* #define	IO_MEMUSAGE */
850Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */
860Sstevel@tonic-gate /* #define	IO_MEMDEBUG */
870Sstevel@tonic-gate 
880Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = {
890Sstevel@tonic-gate 	DVMAO_REV,
900Sstevel@tonic-gate 	iommu_dvma_kaddr_load,
910Sstevel@tonic-gate 	iommu_dvma_unload,
920Sstevel@tonic-gate 	iommu_dvma_sync
930Sstevel@tonic-gate };
940Sstevel@tonic-gate 
950Sstevel@tonic-gate extern void *sbusp;		/* sbus soft state hook */
960Sstevel@tonic-gate 
970Sstevel@tonic-gate #define	DVMA_MAX_CACHE	65536
980Sstevel@tonic-gate 
990Sstevel@tonic-gate /*
1000Sstevel@tonic-gate  * This is the number of pages that a mapping request needs before we force
1010Sstevel@tonic-gate  * the TLB flush code to use diagnostic registers.  This value was determined
1020Sstevel@tonic-gate  * through a series of test runs measuring dma mapping settup performance.
1030Sstevel@tonic-gate  */
1040Sstevel@tonic-gate int tlb_flush_using_diag = 16;
1050Sstevel@tonic-gate 
1060Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = {
1070Sstevel@tonic-gate 	IOMMU_TSB_SIZE_8M,
1080Sstevel@tonic-gate 	IOMMU_TSB_SIZE_16M,
1090Sstevel@tonic-gate 	IOMMU_TSB_SIZE_32M,
1100Sstevel@tonic-gate 	IOMMU_TSB_SIZE_64M,
1110Sstevel@tonic-gate 	IOMMU_TSB_SIZE_128M,
1120Sstevel@tonic-gate 	IOMMU_TSB_SIZE_256M,
1130Sstevel@tonic-gate 	IOMMU_TSB_SIZE_512M,
1140Sstevel@tonic-gate 	IOMMU_TSB_SIZE_1G
1150Sstevel@tonic-gate };
1160Sstevel@tonic-gate 
1170Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate int
iommu_init(struct sbus_soft_state * softsp,caddr_t address)1200Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address)
1210Sstevel@tonic-gate {
1220Sstevel@tonic-gate 	int i;
1230Sstevel@tonic-gate 	char name[40];
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate #ifdef DEBUG
1260Sstevel@tonic-gate 	debug_info = 1;
1270Sstevel@tonic-gate #endif
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate 	/*
1300Sstevel@tonic-gate 	 * Simply add each registers offset to the base address
1310Sstevel@tonic-gate 	 * to calculate the already mapped virtual address of
1320Sstevel@tonic-gate 	 * the device register...
1330Sstevel@tonic-gate 	 *
1340Sstevel@tonic-gate 	 * define a macro for the pointer arithmetic; all registers
1350Sstevel@tonic-gate 	 * are 64 bits wide and are defined as uint64_t's.
1360Sstevel@tonic-gate 	 */
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate #define	REG_ADDR(b, o)	(uint64_t *)((caddr_t)(b) + (o))
1390Sstevel@tonic-gate 
1400Sstevel@tonic-gate 	softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
1410Sstevel@tonic-gate 	softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
1420Sstevel@tonic-gate 	softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
1430Sstevel@tonic-gate 	softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
1440Sstevel@tonic-gate 	softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
1450Sstevel@tonic-gate 
1460Sstevel@tonic-gate #undef REG_ADDR
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate 	mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
1490Sstevel@tonic-gate 	mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	/* Set up the DVMA resource sizes */
1520Sstevel@tonic-gate 	if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
1530Sstevel@tonic-gate 	    IOMMU_TSB_COOKIE_NONE) {
1540Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
1550Sstevel@tonic-gate 		    ddi_driver_name(softsp->dip),
1560Sstevel@tonic-gate 		    ddi_get_instance(softsp->dip));
1570Sstevel@tonic-gate 		return (DDI_FAILURE);
1580Sstevel@tonic-gate 	}
1590Sstevel@tonic-gate 	softsp->soft_tsb_base_addr =
1600Sstevel@tonic-gate 	    iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
1610Sstevel@tonic-gate 	softsp->iommu_dvma_size =
1620Sstevel@tonic-gate 	    iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
1630Sstevel@tonic-gate 	    IOMMU_TSB_TO_RNG;
1640Sstevel@tonic-gate 	softsp->iommu_dvma_base = (ioaddr_t)
1650Sstevel@tonic-gate 	    (0 - (ioaddr_t)softsp->iommu_dvma_size);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 	(void) snprintf(name, sizeof (name), "%s%d_dvma",
1680Sstevel@tonic-gate 	    ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
1690Sstevel@tonic-gate 
1700Sstevel@tonic-gate 	/*
1710Sstevel@tonic-gate 	 * Initialize the DVMA vmem arena.
1720Sstevel@tonic-gate 	 */
1731035Smike_s 	softsp->dvma_arena = vmem_create(name,
1741035Smike_s 	    (void *)(uintptr_t)softsp->iommu_dvma_base,
1750Sstevel@tonic-gate 	    softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
1760Sstevel@tonic-gate 	    DVMA_MAX_CACHE, VM_SLEEP);
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate 	/* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
1790Sstevel@tonic-gate 	softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
1820Sstevel@tonic-gate 	mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
1830Sstevel@tonic-gate 	softsp->iomem = (struct io_mem_list *)0;
1840Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
1850Sstevel@tonic-gate 	/*
1860Sstevel@tonic-gate 	 * Get the base address of the TSB table and store it in the hardware
1870Sstevel@tonic-gate 	 */
1880Sstevel@tonic-gate 
1890Sstevel@tonic-gate 	/*
1900Sstevel@tonic-gate 	 * We plan on the PROM flushing all TLB entries.  If this is not the
1910Sstevel@tonic-gate 	 * case, this is where we should flush the hardware TLB.
1920Sstevel@tonic-gate 	 */
1930Sstevel@tonic-gate 
1940Sstevel@tonic-gate 	/* Set the IOMMU registers */
1950Sstevel@tonic-gate 	(void) iommu_resume_init(softsp);
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 	/* check the convenient copy of TSB base, and flush write buffers */
1980Sstevel@tonic-gate 	if (*softsp->tsb_base_addr !=
1990Sstevel@tonic-gate 	    va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
2000Sstevel@tonic-gate 		iommu_tsb_free(softsp->iommu_tsb_cookie);
2010Sstevel@tonic-gate 		return (DDI_FAILURE);
2020Sstevel@tonic-gate 	}
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	softsp->sbus_io_lo_pfn = UINT32_MAX;
2050Sstevel@tonic-gate 	softsp->sbus_io_hi_pfn = 0;
2060Sstevel@tonic-gate 	for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
2070Sstevel@tonic-gate 		struct rangespec *rangep;
2080Sstevel@tonic-gate 		uint64_t addr;
2090Sstevel@tonic-gate 		pfn_t hipfn, lopfn;
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 		rangep = sysio_pd_getrng(softsp->dip, i);
2120Sstevel@tonic-gate 		addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
2130Sstevel@tonic-gate 		addr |= (uint64_t)rangep->rng_offset;
2140Sstevel@tonic-gate 		lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
2150Sstevel@tonic-gate 		addr += (uint64_t)(rangep->rng_size - 1);
2160Sstevel@tonic-gate 		hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate 		softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
2190Sstevel@tonic-gate 		    lopfn : softsp->sbus_io_lo_pfn;
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 		softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
2220Sstevel@tonic-gate 		    hipfn : softsp->sbus_io_hi_pfn;
2230Sstevel@tonic-gate 	}
2240Sstevel@tonic-gate 
2250Sstevel@tonic-gate 	DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
2260Sstevel@tonic-gate 	    "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
227*7632SNick.Todd@Sun.COM 	    (void *)softsp->iommu_ctrl_reg, (void *)softsp->tsb_base_addr,
228*7632SNick.Todd@Sun.COM 	    (void *)softsp->iommu_flush_reg,
229*7632SNick.Todd@Sun.COM 	    (void *)softsp->soft_tsb_base_addr));
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	return (DDI_SUCCESS);
2320Sstevel@tonic-gate }
2330Sstevel@tonic-gate 
2340Sstevel@tonic-gate /*
2350Sstevel@tonic-gate  * function to uninitialize the iommu and release the tsb back to
2360Sstevel@tonic-gate  * the spare pool.  See startup.c for tsb spare management.
2370Sstevel@tonic-gate  */
2380Sstevel@tonic-gate 
2390Sstevel@tonic-gate int
iommu_uninit(struct sbus_soft_state * softsp)2400Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp)
2410Sstevel@tonic-gate {
2420Sstevel@tonic-gate 	vmem_destroy(softsp->dvma_arena);
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 	/* flip off the IOMMU enable switch */
2450Sstevel@tonic-gate 	*softsp->iommu_ctrl_reg &=
246*7632SNick.Todd@Sun.COM 	    (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate 	iommu_tsb_free(softsp->iommu_tsb_cookie);
2490Sstevel@tonic-gate 
2500Sstevel@tonic-gate 	return (DDI_SUCCESS);
2510Sstevel@tonic-gate }
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate /*
2540Sstevel@tonic-gate  * Initialize iommu hardware registers when the system is being resumed.
2550Sstevel@tonic-gate  * (Subset of iommu_init())
2560Sstevel@tonic-gate  */
2570Sstevel@tonic-gate int
iommu_resume_init(struct sbus_soft_state * softsp)2580Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp)
2590Sstevel@tonic-gate {
2600Sstevel@tonic-gate 	int i;
2610Sstevel@tonic-gate 	uint_t tsb_size;
2620Sstevel@tonic-gate 	uint_t tsb_bytes;
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	/*
2650Sstevel@tonic-gate 	 * Reset the base address of the TSB table in the hardware
2660Sstevel@tonic-gate 	 */
2670Sstevel@tonic-gate 	*softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
2680Sstevel@tonic-gate 
2690Sstevel@tonic-gate 	/*
2700Sstevel@tonic-gate 	 * Figure out the correct size of the IOMMU TSB entries.  If we
2710Sstevel@tonic-gate 	 * end up with a size smaller than that needed for 8M of IOMMU
2720Sstevel@tonic-gate 	 * space, default the size to 8M.  XXX We could probably panic here
2730Sstevel@tonic-gate 	 */
2740Sstevel@tonic-gate 	i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
2750Sstevel@tonic-gate 	    - 1;
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate 	tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate 	while (i > 0) {
2800Sstevel@tonic-gate 		if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
2810Sstevel@tonic-gate 			break;
2820Sstevel@tonic-gate 		i--;
2830Sstevel@tonic-gate 	}
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	tsb_size = i;
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate 	/* OK, lets flip the "on" switch of the IOMMU */
2880Sstevel@tonic-gate 	*softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
2890Sstevel@tonic-gate 	    | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate 	return (DDI_SUCCESS);
2920Sstevel@tonic-gate }
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate void
iommu_tlb_flush(struct sbus_soft_state * softsp,ioaddr_t addr,pgcnt_t npages)2950Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
2960Sstevel@tonic-gate {
2970Sstevel@tonic-gate 	volatile uint64_t tmpreg;
2980Sstevel@tonic-gate 	volatile uint64_t *vaddr_reg, *valid_bit_reg;
2990Sstevel@tonic-gate 	ioaddr_t hiaddr, ioaddr;
3000Sstevel@tonic-gate 	int i, do_flush = 0;
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 	if (npages == 1) {
3030Sstevel@tonic-gate 		*softsp->iommu_flush_reg = (uint64_t)addr;
3040Sstevel@tonic-gate 		tmpreg = *softsp->sbus_ctrl_reg;
3050Sstevel@tonic-gate 		return;
3060Sstevel@tonic-gate 	}
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate 	hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
3090Sstevel@tonic-gate 	for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
3100Sstevel@tonic-gate 	    valid_bit_reg = softsp->iommu_tlb_data;
3110Sstevel@tonic-gate 	    i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
3120Sstevel@tonic-gate 		tmpreg = *vaddr_reg;
3130Sstevel@tonic-gate 		ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
3140Sstevel@tonic-gate 		    IOMMU_TLBTAG_VA_SHIFT);
3150Sstevel@tonic-gate 
3161035Smike_s 		DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, "
3171035Smike_s 		    "TLB vaddr reg %lx, IO addr 0x%x "
3180Sstevel@tonic-gate 		    "Base addr 0x%x, Hi addr 0x%x\n",
319*7632SNick.Todd@Sun.COM 		    (void *)vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate 		if (ioaddr >= addr && ioaddr <= hiaddr) {
3220Sstevel@tonic-gate 			tmpreg = *valid_bit_reg;
3230Sstevel@tonic-gate 
3241035Smike_s 			DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, "
3251035Smike_s 			    "TLB valid reg %lx\n",
326*7632SNick.Todd@Sun.COM 			    (void *)valid_bit_reg, tmpreg));
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate 			if (tmpreg & IOMMU_TLB_VALID) {
3290Sstevel@tonic-gate 				*softsp->iommu_flush_reg = (uint64_t)ioaddr;
3300Sstevel@tonic-gate 				do_flush = 1;
3310Sstevel@tonic-gate 			}
3320Sstevel@tonic-gate 		}
3330Sstevel@tonic-gate 	}
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	if (do_flush)
3360Sstevel@tonic-gate 		tmpreg = *softsp->sbus_ctrl_reg;
3370Sstevel@tonic-gate }
3380Sstevel@tonic-gate 
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate /*
3410Sstevel@tonic-gate  * Shorthand defines
3420Sstevel@tonic-gate  */
3430Sstevel@tonic-gate 
3440Sstevel@tonic-gate #define	ALO		dma_lim->dlim_addr_lo
3450Sstevel@tonic-gate #define	AHI		dma_lim->dlim_addr_hi
3460Sstevel@tonic-gate #define	OBJSIZE		dmareq->dmar_object.dmao_size
3470Sstevel@tonic-gate #define	IOTTE_NDX(vaddr, base) (base + \
3480Sstevel@tonic-gate 		(int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
3490Sstevel@tonic-gate 		softsp->iommu_dvma_base)))
3500Sstevel@tonic-gate /*
3510Sstevel@tonic-gate  * If DDI_DMA_PARTIAL flag is set and the request is for
3520Sstevel@tonic-gate  * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
3530Sstevel@tonic-gate  * we turn off the DDI_DMA_PARTIAL flag
3540Sstevel@tonic-gate  */
3550Sstevel@tonic-gate #define	MIN_DVMA_WIN_SIZE	(128)
3560Sstevel@tonic-gate 
3570Sstevel@tonic-gate /* ARGSUSED */
3580Sstevel@tonic-gate void
iommu_remove_mappings(ddi_dma_impl_t * mp)3590Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp)
3600Sstevel@tonic-gate {
3610Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
3620Sstevel@tonic-gate 	pgcnt_t npages;
3630Sstevel@tonic-gate 	ioaddr_t ioaddr;
3640Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
3650Sstevel@tonic-gate 	ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
3660Sstevel@tonic-gate 	pgcnt_t npages = mp->dmai_ndvmapages;
3670Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
3680Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate #if defined(IO_MEMUSAGE)
3710Sstevel@tonic-gate 	struct io_mem_list **prevp, *walk;
3720Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate 	ASSERT(softsp != NULL);
3750Sstevel@tonic-gate 	/*
3760Sstevel@tonic-gate 	 * Run thru the mapped entries and free 'em
3770Sstevel@tonic-gate 	 */
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 	ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
3800Sstevel@tonic-gate 	npages = mp->dmai_ndvmapages;
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate #if defined(IO_MEMUSAGE)
3830Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
3840Sstevel@tonic-gate 	prevp = &softsp->iomem;
3850Sstevel@tonic-gate 	walk = softsp->iomem;
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 	while (walk) {
3880Sstevel@tonic-gate 		if (walk->ioaddr == ioaddr) {
3890Sstevel@tonic-gate 			*prevp = walk->next;
3900Sstevel@tonic-gate 			break;
3910Sstevel@tonic-gate 		}
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate 		prevp = &walk->next;
3940Sstevel@tonic-gate 		walk = walk->next;
3950Sstevel@tonic-gate 	}
3960Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
3970Sstevel@tonic-gate 
3980Sstevel@tonic-gate 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
3990Sstevel@tonic-gate 	kmem_free(walk, sizeof (struct io_mem_list));
4000Sstevel@tonic-gate #endif /* IO_MEMUSAGE */
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate 	while (npages) {
4050Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DEBUG,
4060Sstevel@tonic-gate 		    ("dma_mctl: freeing ioaddr %x iotte %p\n",
4070Sstevel@tonic-gate 		    ioaddr, iotte_ptr));
4080Sstevel@tonic-gate 		*iotte_ptr = (uint64_t)0;	/* unload tte */
4090Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, 1);
4100Sstevel@tonic-gate 		npages--;
4110Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
4120Sstevel@tonic-gate 		iotte_ptr++;
4130Sstevel@tonic-gate 	}
4140Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 
4180Sstevel@tonic-gate int
iommu_create_vaddr_mappings(ddi_dma_impl_t * mp,uintptr_t addr)4190Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
4200Sstevel@tonic-gate {
4210Sstevel@tonic-gate 	pfn_t pfn;
4220Sstevel@tonic-gate 	struct as *as = NULL;
4230Sstevel@tonic-gate 	pgcnt_t npages;
4240Sstevel@tonic-gate 	ioaddr_t ioaddr;
4250Sstevel@tonic-gate 	uint_t offset;
4260Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
4270Sstevel@tonic-gate 	uint64_t tmp_iotte_flag;
4280Sstevel@tonic-gate 	int rval = DDI_DMA_MAPPED;
4290Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
4300Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
4310Sstevel@tonic-gate 	int diag_tlb_flush;
4320Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
4330Sstevel@tonic-gate 	struct io_mem_list *iomemp;
4340Sstevel@tonic-gate 	pfn_t *pfnp;
4350Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 	ASSERT(softsp != NULL);
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate 	/* Set Valid and Cache for mem xfer */
4400Sstevel@tonic-gate 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
4430Sstevel@tonic-gate 	npages = iommu_btopr(mp->dmai_size + offset);
4440Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
4450Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
4460Sstevel@tonic-gate 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	as = mp->dmai_object.dmao_obj.virt_obj.v_as;
4490Sstevel@tonic-gate 	if (as == NULL)
4500Sstevel@tonic-gate 		as = &kas;
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate 	/*
4530Sstevel@tonic-gate 	 * Set the per object bits of the TTE here. We optimize this for
4540Sstevel@tonic-gate 	 * the memory case so that the while loop overhead is minimal.
4550Sstevel@tonic-gate 	 */
4560Sstevel@tonic-gate 	/* Turn on NOSYNC if we need consistent mem */
4570Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
4580Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
4590Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
4600Sstevel@tonic-gate 	/* Set streaming mode if not consistent mem */
4610Sstevel@tonic-gate 	} else if (softsp->stream_buf_off) {
4620Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
4630Sstevel@tonic-gate 	}
4640Sstevel@tonic-gate 
4650Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
4660Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
4670Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
4680Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
4690Sstevel@tonic-gate 	iomemp->addr = addr;
4700Sstevel@tonic-gate 	iomemp->npages = npages;
4710Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
4720Sstevel@tonic-gate 	    KM_SLEEP);
4730Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
4740Sstevel@tonic-gate 	/*
4750Sstevel@tonic-gate 	 * Grab the mappings from the dmmu and stick 'em into the
4760Sstevel@tonic-gate 	 * iommu.
4770Sstevel@tonic-gate 	 */
4780Sstevel@tonic-gate 	ASSERT(npages != 0);
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate 	/* If we're going to flush the TLB using diag mode, do it now. */
4810Sstevel@tonic-gate 	if (diag_tlb_flush)
4820Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, npages);
4830Sstevel@tonic-gate 
4840Sstevel@tonic-gate 	do {
4850Sstevel@tonic-gate 		uint64_t iotte_flag = tmp_iotte_flag;
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate 		/*
4880Sstevel@tonic-gate 		 * Fetch the pfn for the DMA object
4890Sstevel@tonic-gate 		 */
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 		ASSERT(as);
4920Sstevel@tonic-gate 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
4930Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 		if (!pf_is_memory(pfn)) {
4960Sstevel@tonic-gate 			/* DVMA'ing to IO space */
4970Sstevel@tonic-gate 
4980Sstevel@tonic-gate 			/* Turn off cache bit if set */
4990Sstevel@tonic-gate 			if (iotte_flag & IOTTE_CACHE)
5000Sstevel@tonic-gate 				iotte_flag ^= IOTTE_CACHE;
5010Sstevel@tonic-gate 
5020Sstevel@tonic-gate 			/* Turn off stream bit if set */
5030Sstevel@tonic-gate 			if (iotte_flag & IOTTE_STREAM)
5040Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate 			if (IS_INTRA_SBUS(softsp, pfn)) {
5070Sstevel@tonic-gate 				/* Intra sbus transfer */
5080Sstevel@tonic-gate 
5090Sstevel@tonic-gate 				/* Turn on intra flag */
5100Sstevel@tonic-gate 				iotte_flag |= IOTTE_INTRA;
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 				DPRINTF(IOMMU_INTER_INTRA_XFER, (
5131035Smike_s 				    "Intra xfer pfnum %lx TTE %lx\n",
5140Sstevel@tonic-gate 				    pfn, iotte_flag));
5150Sstevel@tonic-gate 			} else {
5160Sstevel@tonic-gate 				if (pf_is_dmacapable(pfn) == 1) {
5170Sstevel@tonic-gate 					/*EMPTY*/
5180Sstevel@tonic-gate 					DPRINTF(IOMMU_INTER_INTRA_XFER,
5190Sstevel@tonic-gate 					    ("Inter xfer pfnum %lx "
5201035Smike_s 					    "tte hi %lx\n",
5210Sstevel@tonic-gate 					    pfn, iotte_flag));
5220Sstevel@tonic-gate 				} else {
5230Sstevel@tonic-gate 					rval = DDI_DMA_NOMAPPING;
5240Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
5250Sstevel@tonic-gate 					goto bad;
5260Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
5270Sstevel@tonic-gate 				}
5280Sstevel@tonic-gate 			}
5290Sstevel@tonic-gate 		}
5300Sstevel@tonic-gate 		addr += IOMMU_PAGESIZE;
5310Sstevel@tonic-gate 
5321035Smike_s 		DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx "
5331035Smike_s 		    "tte flag %lx addr %lx ioaddr %x\n",
534*7632SNick.Todd@Sun.COM 		    (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 		/* Flush the IOMMU TLB before loading a new mapping */
5370Sstevel@tonic-gate 		if (!diag_tlb_flush)
5380Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 		/* Set the hardware IO TTE */
5410Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
5440Sstevel@tonic-gate 		npages--;
5450Sstevel@tonic-gate 		iotte_ptr++;
5460Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5470Sstevel@tonic-gate 		*pfnp = pfn;
5480Sstevel@tonic-gate 		pfnp++;
5490Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5500Sstevel@tonic-gate 	} while (npages != 0);
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5530Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
5540Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
5550Sstevel@tonic-gate 	softsp->iomem = iomemp;
5560Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
5570Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	return (rval);
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
5620Sstevel@tonic-gate bad:
5630Sstevel@tonic-gate 	/* If we fail a mapping, free up any mapping resources used */
5640Sstevel@tonic-gate 	iommu_remove_mappings(mp);
5650Sstevel@tonic-gate 	return (rval);
5660Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
5670Sstevel@tonic-gate }
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate int
iommu_create_pp_mappings(ddi_dma_impl_t * mp,page_t * pp,page_t ** pplist)5710Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
5720Sstevel@tonic-gate {
5730Sstevel@tonic-gate 	pfn_t pfn;
5740Sstevel@tonic-gate 	pgcnt_t npages;
5750Sstevel@tonic-gate 	ioaddr_t ioaddr;
5760Sstevel@tonic-gate 	uint_t offset;
5770Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
5780Sstevel@tonic-gate 	uint64_t tmp_iotte_flag;
5790Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
5800Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
5810Sstevel@tonic-gate 	int diag_tlb_flush;
5820Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5830Sstevel@tonic-gate 	struct io_mem_list *iomemp;
5840Sstevel@tonic-gate 	pfn_t *pfnp;
5850Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5860Sstevel@tonic-gate 	int rval = DDI_DMA_MAPPED;
5870Sstevel@tonic-gate 
5880Sstevel@tonic-gate 	/* Set Valid and Cache for mem xfer */
5890Sstevel@tonic-gate 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
5900Sstevel@tonic-gate 
5910Sstevel@tonic-gate 	ASSERT(softsp != NULL);
5920Sstevel@tonic-gate 
5930Sstevel@tonic-gate 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
5940Sstevel@tonic-gate 	npages = iommu_btopr(mp->dmai_size + offset);
5950Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
5960Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
5970Sstevel@tonic-gate 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 	/*
6000Sstevel@tonic-gate 	 * Set the per object bits of the TTE here. We optimize this for
6010Sstevel@tonic-gate 	 * the memory case so that the while loop overhead is minimal.
6020Sstevel@tonic-gate 	 */
6030Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
6040Sstevel@tonic-gate 		/* Turn on NOSYNC if we need consistent mem */
6050Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
6060Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
6070Sstevel@tonic-gate 	} else if (softsp->stream_buf_off) {
6080Sstevel@tonic-gate 		/* Set streaming mode if not consistent mem */
6090Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
6100Sstevel@tonic-gate 	}
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6130Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
6140Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
6150Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
6160Sstevel@tonic-gate 	iomemp->npages = npages;
6170Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
6180Sstevel@tonic-gate 	    KM_SLEEP);
6190Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6200Sstevel@tonic-gate 	/*
6210Sstevel@tonic-gate 	 * Grab the mappings from the dmmu and stick 'em into the
6220Sstevel@tonic-gate 	 * iommu.
6230Sstevel@tonic-gate 	 */
6240Sstevel@tonic-gate 	ASSERT(npages != 0);
6250Sstevel@tonic-gate 
6260Sstevel@tonic-gate 	/* If we're going to flush the TLB using diag mode, do it now. */
6270Sstevel@tonic-gate 	if (diag_tlb_flush)
6280Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, npages);
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate 	do {
6310Sstevel@tonic-gate 		uint64_t iotte_flag;
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate 		iotte_flag = tmp_iotte_flag;
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate 		if (pp != NULL) {
6360Sstevel@tonic-gate 			pfn = pp->p_pagenum;
6370Sstevel@tonic-gate 			pp = pp->p_next;
6380Sstevel@tonic-gate 		} else {
6390Sstevel@tonic-gate 			pfn = (*pplist)->p_pagenum;
6400Sstevel@tonic-gate 			pplist++;
6410Sstevel@tonic-gate 		}
6420Sstevel@tonic-gate 
6431035Smike_s 		DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx "
644*7632SNick.Todd@Sun.COM 		    "tte flag %lx ioaddr %x\n", (void *)iotte_ptr,
6450Sstevel@tonic-gate 		    pfn, iotte_flag, ioaddr));
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 		/* Flush the IOMMU TLB before loading a new mapping */
6480Sstevel@tonic-gate 		if (!diag_tlb_flush)
6490Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 		/* Set the hardware IO TTE */
6520Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
6530Sstevel@tonic-gate 
6540Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
6550Sstevel@tonic-gate 		npages--;
6560Sstevel@tonic-gate 		iotte_ptr++;
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6590Sstevel@tonic-gate 		*pfnp = pfn;
6600Sstevel@tonic-gate 		pfnp++;
6610Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate 	} while (npages != 0);
6640Sstevel@tonic-gate 
6650Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6660Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
6670Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
6680Sstevel@tonic-gate 	softsp->iomem = iomemp;
6690Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
6700Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6710Sstevel@tonic-gate 
6720Sstevel@tonic-gate 	return (rval);
6730Sstevel@tonic-gate }
6740Sstevel@tonic-gate 
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate int
iommu_dma_lim_setup(dev_info_t * dip,dev_info_t * rdip,struct sbus_soft_state * softsp,uint_t * burstsizep,uint_t burstsize64,uint_t * minxferp,uint_t dma_flags)6770Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
6780Sstevel@tonic-gate     struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
6790Sstevel@tonic-gate     uint_t *minxferp, uint_t dma_flags)
6800Sstevel@tonic-gate {
6810Sstevel@tonic-gate 	struct regspec *rp;
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 	/* Take care of 64 bit limits. */
6840Sstevel@tonic-gate 	if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
6850Sstevel@tonic-gate 		/*
6860Sstevel@tonic-gate 		 * return burst size for 32-bit mode
6870Sstevel@tonic-gate 		 */
6880Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
6890Sstevel@tonic-gate 		return (DDI_FAILURE);
6900Sstevel@tonic-gate 	}
6910Sstevel@tonic-gate 
6920Sstevel@tonic-gate 	/*
6930Sstevel@tonic-gate 	 * check if SBus supports 64 bit and if caller
6940Sstevel@tonic-gate 	 * is child of SBus. No support through bridges
6950Sstevel@tonic-gate 	 */
6960Sstevel@tonic-gate 	if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
6970Sstevel@tonic-gate 		/*
6980Sstevel@tonic-gate 		 * SBus doesn't support it or bridge. Do 32-bit
6990Sstevel@tonic-gate 		 * xfers
7000Sstevel@tonic-gate 		 */
7010Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
7020Sstevel@tonic-gate 		return (DDI_FAILURE);
7030Sstevel@tonic-gate 	}
7040Sstevel@tonic-gate 
7050Sstevel@tonic-gate 	rp = ddi_rnumber_to_regspec(rdip, 0);
7060Sstevel@tonic-gate 	if (rp == NULL) {
7070Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
7080Sstevel@tonic-gate 		return (DDI_FAILURE);
7090Sstevel@tonic-gate 	}
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	/* Check for old-style 64 bit burstsizes */
7120Sstevel@tonic-gate 	if (burstsize64 & SYSIO64_BURST_MASK) {
7130Sstevel@tonic-gate 		/* Scale back burstsizes if Necessary */
7140Sstevel@tonic-gate 		*burstsizep &= (softsp->sbus64_burst_sizes |
7150Sstevel@tonic-gate 		    softsp->sbus_burst_sizes);
7160Sstevel@tonic-gate 	} else {
7170Sstevel@tonic-gate 		/* Get the 64 bit burstsizes. */
7180Sstevel@tonic-gate 		*burstsizep = burstsize64;
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 		/* Scale back burstsizes if Necessary */
7210Sstevel@tonic-gate 		*burstsizep &= (softsp->sbus64_burst_sizes >>
7220Sstevel@tonic-gate 		    SYSIO64_BURST_SHIFT);
7230Sstevel@tonic-gate 	}
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate 	/*
7260Sstevel@tonic-gate 	 * Set the largest value of the smallest burstsize that the
7270Sstevel@tonic-gate 	 * device or the bus can manage.
7280Sstevel@tonic-gate 	 */
7290Sstevel@tonic-gate 	*minxferp = MAX(*minxferp,
7300Sstevel@tonic-gate 	    (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
7310Sstevel@tonic-gate 
7320Sstevel@tonic-gate 	return (DDI_SUCCESS);
7330Sstevel@tonic-gate }
7340Sstevel@tonic-gate 
7350Sstevel@tonic-gate 
7360Sstevel@tonic-gate int
iommu_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * dma_attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)7370Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
7380Sstevel@tonic-gate     ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
7390Sstevel@tonic-gate     ddi_dma_handle_t *handlep)
7400Sstevel@tonic-gate {
7410Sstevel@tonic-gate 	ioaddr_t addrlow, addrhigh, segalign;
7420Sstevel@tonic-gate 	ddi_dma_impl_t *mp;
7430Sstevel@tonic-gate 	struct dma_impl_priv *mppriv;
7440Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
7450Sstevel@tonic-gate 	    ddi_get_soft_state(sbusp, ddi_get_instance(dip));
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate 	/*
7480Sstevel@tonic-gate 	 * Setup dma burstsizes and min-xfer counts.
7490Sstevel@tonic-gate 	 */
7500Sstevel@tonic-gate 	(void) iommu_dma_lim_setup(dip, rdip, softsp,
7510Sstevel@tonic-gate 	    &dma_attr->dma_attr_burstsizes,
7520Sstevel@tonic-gate 	    dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
7530Sstevel@tonic-gate 	    dma_attr->dma_attr_flags);
7540Sstevel@tonic-gate 
7550Sstevel@tonic-gate 	if (dma_attr->dma_attr_burstsizes == 0)
7560Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7570Sstevel@tonic-gate 
7580Sstevel@tonic-gate 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
7590Sstevel@tonic-gate 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
7600Sstevel@tonic-gate 	segalign = (ioaddr_t)dma_attr->dma_attr_seg;
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate 	/*
7630Sstevel@tonic-gate 	 * Check sanity for hi and lo address limits
7640Sstevel@tonic-gate 	 */
7650Sstevel@tonic-gate 	if ((addrhigh <= addrlow) ||
7660Sstevel@tonic-gate 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
7670Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7680Sstevel@tonic-gate 	}
7690Sstevel@tonic-gate 	if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
7700Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7710Sstevel@tonic-gate 
7720Sstevel@tonic-gate 	mppriv = kmem_zalloc(sizeof (*mppriv),
7730Sstevel@tonic-gate 	    (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
7740Sstevel@tonic-gate 
7750Sstevel@tonic-gate 	if (mppriv == NULL) {
7760Sstevel@tonic-gate 		if (waitfp != DDI_DMA_DONTWAIT) {
777*7632SNick.Todd@Sun.COM 			ddi_set_callback(waitfp, arg,
778*7632SNick.Todd@Sun.COM 			    &softsp->dvma_call_list_id);
7790Sstevel@tonic-gate 		}
7800Sstevel@tonic-gate 		return (DDI_DMA_NORESOURCES);
7810Sstevel@tonic-gate 	}
7820Sstevel@tonic-gate 	mp = (ddi_dma_impl_t *)mppriv;
7830Sstevel@tonic-gate 
7841035Smike_s 	DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p "
7850Sstevel@tonic-gate 	    "hi %x lo %x min %x burst %x\n",
786*7632SNick.Todd@Sun.COM 	    ddi_get_name(dip), (void *)mp, addrhigh, addrlow,
7870Sstevel@tonic-gate 	    dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	mp->dmai_rdip = rdip;
7900Sstevel@tonic-gate 	mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
7910Sstevel@tonic-gate 	mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
7920Sstevel@tonic-gate 	mp->dmai_attr = *dma_attr;
7930Sstevel@tonic-gate 	/* See if the DMA engine has any limit restrictions. */
7940Sstevel@tonic-gate 	if (segalign == (ioaddr_t)UINT32_MAX &&
7950Sstevel@tonic-gate 	    addrhigh == (ioaddr_t)UINT32_MAX &&
7960Sstevel@tonic-gate 	    (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
7970Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOLIMIT;
7980Sstevel@tonic-gate 	}
7990Sstevel@tonic-gate 	mppriv->softsp = softsp;
8000Sstevel@tonic-gate 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
8010Sstevel@tonic-gate 
8020Sstevel@tonic-gate 	*handlep = (ddi_dma_handle_t)mp;
8030Sstevel@tonic-gate 	return (DDI_SUCCESS);
8040Sstevel@tonic-gate }
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate /*ARGSUSED*/
8070Sstevel@tonic-gate int
iommu_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)8080Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
8090Sstevel@tonic-gate {
8100Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
8110Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
8120Sstevel@tonic-gate 	ASSERT(softsp != NULL);
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 	kmem_free(mppriv, sizeof (*mppriv));
8150Sstevel@tonic-gate 
8160Sstevel@tonic-gate 	if (softsp->dvma_call_list_id != 0) {
8170Sstevel@tonic-gate 		ddi_run_callback(&softsp->dvma_call_list_id);
8180Sstevel@tonic-gate 	}
8190Sstevel@tonic-gate 	return (DDI_SUCCESS);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate 
8220Sstevel@tonic-gate static int
check_dma_attr(struct ddi_dma_req * dmareq,ddi_dma_attr_t * dma_attr,uint32_t * size)8230Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
8240Sstevel@tonic-gate     uint32_t *size)
8250Sstevel@tonic-gate {
8260Sstevel@tonic-gate 	ioaddr_t addrlow;
8270Sstevel@tonic-gate 	ioaddr_t addrhigh;
8280Sstevel@tonic-gate 	uint32_t segalign;
8290Sstevel@tonic-gate 	uint32_t smask;
8300Sstevel@tonic-gate 
8310Sstevel@tonic-gate 	smask = *size - 1;
8320Sstevel@tonic-gate 	segalign = dma_attr->dma_attr_seg;
8330Sstevel@tonic-gate 	if (smask > segalign) {
8340Sstevel@tonic-gate 		if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
8350Sstevel@tonic-gate 			return (DDI_DMA_TOOBIG);
8360Sstevel@tonic-gate 		*size = segalign + 1;
8370Sstevel@tonic-gate 	}
8380Sstevel@tonic-gate 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
8390Sstevel@tonic-gate 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
8400Sstevel@tonic-gate 	if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
8410Sstevel@tonic-gate 		if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
8420Sstevel@tonic-gate 		    (addrhigh == (ioaddr_t)-1))) {
8430Sstevel@tonic-gate 			if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
8440Sstevel@tonic-gate 				return (DDI_DMA_TOOBIG);
8450Sstevel@tonic-gate 			*size = MIN(addrhigh - addrlow + 1, *size);
8460Sstevel@tonic-gate 		}
8470Sstevel@tonic-gate 	}
8480Sstevel@tonic-gate 	return (DDI_DMA_MAPOK);
8490Sstevel@tonic-gate }
8500Sstevel@tonic-gate 
8510Sstevel@tonic-gate int
iommu_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)8520Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
8530Sstevel@tonic-gate     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
8540Sstevel@tonic-gate     ddi_dma_cookie_t *cp, uint_t *ccountp)
8550Sstevel@tonic-gate {
8560Sstevel@tonic-gate 	page_t *pp;
8570Sstevel@tonic-gate 	uint32_t size;
8580Sstevel@tonic-gate 	ioaddr_t ioaddr;
8590Sstevel@tonic-gate 	uint_t offset;
8600Sstevel@tonic-gate 	uintptr_t addr = 0;
8610Sstevel@tonic-gate 	pgcnt_t npages;
8620Sstevel@tonic-gate 	int rval;
8630Sstevel@tonic-gate 	ddi_dma_attr_t *dma_attr;
8640Sstevel@tonic-gate 	struct sbus_soft_state *softsp;
8650Sstevel@tonic-gate 	struct page **pplist = NULL;
8660Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
8670Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
8680Sstevel@tonic-gate 
8690Sstevel@tonic-gate #ifdef lint
8700Sstevel@tonic-gate 	dip = dip;
8710Sstevel@tonic-gate 	rdip = rdip;
8720Sstevel@tonic-gate #endif
8730Sstevel@tonic-gate 
8740Sstevel@tonic-gate 	if (mp->dmai_inuse)
8750Sstevel@tonic-gate 		return (DDI_DMA_INUSE);
8760Sstevel@tonic-gate 
8770Sstevel@tonic-gate 	dma_attr = &mp->dmai_attr;
8780Sstevel@tonic-gate 	size = (uint32_t)dmareq->dmar_object.dmao_size;
8790Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
8800Sstevel@tonic-gate 		rval = check_dma_attr(dmareq, dma_attr, &size);
8810Sstevel@tonic-gate 		if (rval != DDI_DMA_MAPOK)
8820Sstevel@tonic-gate 			return (rval);
8830Sstevel@tonic-gate 	}
8840Sstevel@tonic-gate 	mp->dmai_inuse = 1;
8850Sstevel@tonic-gate 	mp->dmai_offset = 0;
8860Sstevel@tonic-gate 	mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
8870Sstevel@tonic-gate 	    (mp->dmai_rflags & DMP_NOLIMIT);
8880Sstevel@tonic-gate 
8890Sstevel@tonic-gate 	switch (dmareq->dmar_object.dmao_type) {
8900Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
8910Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
8920Sstevel@tonic-gate 		addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
8930Sstevel@tonic-gate 		offset = addr & IOMMU_PAGEOFFSET;
8940Sstevel@tonic-gate 		pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
8950Sstevel@tonic-gate 		npages = iommu_btopr(OBJSIZE + offset);
8960Sstevel@tonic-gate 
8971035Smike_s 		DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages "
8980Sstevel@tonic-gate 		    "req addr %lx off %x OBJSIZE %x\n",
8990Sstevel@tonic-gate 		    npages, addr, offset, OBJSIZE));
9000Sstevel@tonic-gate 
9010Sstevel@tonic-gate 		/* We don't need the addr anymore if we have a shadow list */
9020Sstevel@tonic-gate 		if (pplist != NULL)
9030Sstevel@tonic-gate 			addr = NULL;
9040Sstevel@tonic-gate 		pp = NULL;
9050Sstevel@tonic-gate 		break;
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
9080Sstevel@tonic-gate 		pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
9090Sstevel@tonic-gate 		offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
9100Sstevel@tonic-gate 		npages = iommu_btopr(OBJSIZE + offset);
9110Sstevel@tonic-gate 		break;
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 	case DMA_OTYP_PADDR:
9140Sstevel@tonic-gate 	default:
9150Sstevel@tonic-gate 		/*
9160Sstevel@tonic-gate 		 * Not a supported type for this implementation
9170Sstevel@tonic-gate 		 */
9180Sstevel@tonic-gate 		rval = DDI_DMA_NOMAPPING;
9190Sstevel@tonic-gate 		goto bad;
9200Sstevel@tonic-gate 	}
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 	/* Get our soft state once we know we're mapping an object. */
9230Sstevel@tonic-gate 	softsp = mppriv->softsp;
9240Sstevel@tonic-gate 	ASSERT(softsp != NULL);
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
9270Sstevel@tonic-gate 		if (size != OBJSIZE) {
9280Sstevel@tonic-gate 			/*
9290Sstevel@tonic-gate 			 * If the request is for partial mapping arrangement,
9300Sstevel@tonic-gate 			 * the device has to be able to address at least the
9310Sstevel@tonic-gate 			 * size of the window we are establishing.
9320Sstevel@tonic-gate 			 */
9330Sstevel@tonic-gate 			if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
9340Sstevel@tonic-gate 				rval = DDI_DMA_NOMAPPING;
9350Sstevel@tonic-gate 				goto bad;
9360Sstevel@tonic-gate 			}
9370Sstevel@tonic-gate 			npages = iommu_btopr(size + offset);
9380Sstevel@tonic-gate 		}
9390Sstevel@tonic-gate 		/*
9400Sstevel@tonic-gate 		 * If the size requested is less than a moderate amt,
9410Sstevel@tonic-gate 		 * skip the partial mapping stuff- it's not worth the
9420Sstevel@tonic-gate 		 * effort.
9430Sstevel@tonic-gate 		 */
9440Sstevel@tonic-gate 		if (npages > MIN_DVMA_WIN_SIZE) {
9450Sstevel@tonic-gate 			npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
9460Sstevel@tonic-gate 			size = iommu_ptob(MIN_DVMA_WIN_SIZE);
9470Sstevel@tonic-gate 			DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
9481035Smike_s 			    "%lx sz %x\n", OBJSIZE, npages, size));
9490Sstevel@tonic-gate 			if (pplist != NULL) {
9500Sstevel@tonic-gate 				mp->dmai_minfo = (void *)pplist;
9510Sstevel@tonic-gate 				mp->dmai_rflags |= DMP_SHADOW;
9520Sstevel@tonic-gate 			}
9530Sstevel@tonic-gate 		} else {
9540Sstevel@tonic-gate 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
9550Sstevel@tonic-gate 		}
9560Sstevel@tonic-gate 	} else {
9570Sstevel@tonic-gate 		if (npages >= iommu_btop(softsp->iommu_dvma_size) -
9580Sstevel@tonic-gate 		    MIN_DVMA_WIN_SIZE) {
9590Sstevel@tonic-gate 			rval = DDI_DMA_TOOBIG;
9600Sstevel@tonic-gate 			goto bad;
9610Sstevel@tonic-gate 		}
9620Sstevel@tonic-gate 	}
9630Sstevel@tonic-gate 
9640Sstevel@tonic-gate 	/*
9650Sstevel@tonic-gate 	 * save dmareq-object, size and npages into mp
9660Sstevel@tonic-gate 	 */
9670Sstevel@tonic-gate 	mp->dmai_object = dmareq->dmar_object;
9680Sstevel@tonic-gate 	mp->dmai_size = size;
9690Sstevel@tonic-gate 	mp->dmai_ndvmapages = npages;
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT) {
9721035Smike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena,
9730Sstevel@tonic-gate 		    iommu_ptob(npages),
9740Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
9750Sstevel@tonic-gate 		if (ioaddr == 0) {
9760Sstevel@tonic-gate 			rval = DDI_DMA_NORESOURCES;
9770Sstevel@tonic-gate 			goto bad;
9780Sstevel@tonic-gate 		}
9790Sstevel@tonic-gate 
9800Sstevel@tonic-gate 		/*
9810Sstevel@tonic-gate 		 * If we have a 1 page request and we're working with a page
9820Sstevel@tonic-gate 		 * list, we're going to speed load an IOMMU entry.
9830Sstevel@tonic-gate 		 */
9840Sstevel@tonic-gate 		if (npages == 1 && !addr) {
9850Sstevel@tonic-gate 			uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
9860Sstevel@tonic-gate 			    IOTTE_WRITE | IOTTE_STREAM;
9870Sstevel@tonic-gate 			volatile uint64_t *iotte_ptr;
9880Sstevel@tonic-gate 			pfn_t pfn;
9890Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
9900Sstevel@tonic-gate 			struct io_mem_list *iomemp;
9910Sstevel@tonic-gate 			pfn_t *pfnp;
9920Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate 			iotte_ptr = IOTTE_NDX(ioaddr,
9950Sstevel@tonic-gate 			    softsp->soft_tsb_base_addr);
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 			if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
9980Sstevel@tonic-gate 				mp->dmai_rflags |= DMP_NOSYNC;
9990Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
10000Sstevel@tonic-gate 			} else if (softsp->stream_buf_off)
10010Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
10020Sstevel@tonic-gate 
10030Sstevel@tonic-gate 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
10040Sstevel@tonic-gate 
10050Sstevel@tonic-gate 			if (pp != NULL)
10060Sstevel@tonic-gate 				pfn = pp->p_pagenum;
10070Sstevel@tonic-gate 			else
10080Sstevel@tonic-gate 				pfn = (*pplist)->p_pagenum;
10090Sstevel@tonic-gate 
10100Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 			*iotte_ptr =
10130Sstevel@tonic-gate 			    ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate 			mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
10160Sstevel@tonic-gate 			mp->dmai_nwin = 0;
10170Sstevel@tonic-gate 			if (cp != NULL) {
10180Sstevel@tonic-gate 				cp->dmac_notused = 0;
10190Sstevel@tonic-gate 				cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
10200Sstevel@tonic-gate 				cp->dmac_size = mp->dmai_size;
10210Sstevel@tonic-gate 				cp->dmac_type = 0;
10220Sstevel@tonic-gate 				*ccountp = 1;
10230Sstevel@tonic-gate 			}
10240Sstevel@tonic-gate 
10251035Smike_s 			DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p "
10261035Smike_s 			    "pfn %lx tte flag %lx addr %lx ioaddr %x\n",
1027*7632SNick.Todd@Sun.COM 			    (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
10300Sstevel@tonic-gate 			iomemp = kmem_alloc(sizeof (struct io_mem_list),
10310Sstevel@tonic-gate 			    KM_SLEEP);
10320Sstevel@tonic-gate 			iomemp->rdip = mp->dmai_rdip;
10330Sstevel@tonic-gate 			iomemp->ioaddr = ioaddr;
10340Sstevel@tonic-gate 			iomemp->addr = addr;
10350Sstevel@tonic-gate 			iomemp->npages = npages;
10360Sstevel@tonic-gate 			pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
10370Sstevel@tonic-gate 			    (npages + 1), KM_SLEEP);
10380Sstevel@tonic-gate 			*pfnp = pfn;
10390Sstevel@tonic-gate 			mutex_enter(&softsp->iomemlock);
10400Sstevel@tonic-gate 			iomemp->next = softsp->iomem;
10410Sstevel@tonic-gate 			softsp->iomem = iomemp;
10420Sstevel@tonic-gate 			mutex_exit(&softsp->iomemlock);
10430Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate 			return (DDI_DMA_MAPPED);
10460Sstevel@tonic-gate 		}
10470Sstevel@tonic-gate 	} else {
10481035Smike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
10490Sstevel@tonic-gate 		    iommu_ptob(npages),
10500Sstevel@tonic-gate 		    MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
10510Sstevel@tonic-gate 		    (uint_t)dma_attr->dma_attr_seg + 1,
10521035Smike_s 		    (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo,
10531035Smike_s 		    (void *)(uintptr_t)
1054*7632SNick.Todd@Sun.COM 		    ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
10550Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
10560Sstevel@tonic-gate 	}
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	if (ioaddr == 0) {
10590Sstevel@tonic-gate 		if (dmareq->dmar_fp == DDI_DMA_SLEEP)
10600Sstevel@tonic-gate 			rval = DDI_DMA_NOMAPPING;
10610Sstevel@tonic-gate 		else
10620Sstevel@tonic-gate 			rval = DDI_DMA_NORESOURCES;
10630Sstevel@tonic-gate 		goto bad;
10640Sstevel@tonic-gate 	}
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate 	mp->dmai_mapping = ioaddr + offset;
10670Sstevel@tonic-gate 	ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 	/*
10700Sstevel@tonic-gate 	 * At this point we have a range of virtual address allocated
10710Sstevel@tonic-gate 	 * with which we now have to map to the requested object.
10720Sstevel@tonic-gate 	 */
10730Sstevel@tonic-gate 	if (addr) {
10740Sstevel@tonic-gate 		rval = iommu_create_vaddr_mappings(mp,
10750Sstevel@tonic-gate 		    addr & ~IOMMU_PAGEOFFSET);
10760Sstevel@tonic-gate 		if (rval == DDI_DMA_NOMAPPING)
10770Sstevel@tonic-gate 			goto bad_nomap;
10780Sstevel@tonic-gate 	} else {
10790Sstevel@tonic-gate 		rval = iommu_create_pp_mappings(mp, pp, pplist);
10800Sstevel@tonic-gate 		if (rval == DDI_DMA_NOMAPPING)
10810Sstevel@tonic-gate 			goto bad_nomap;
10820Sstevel@tonic-gate 	}
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate 	if (cp) {
10850Sstevel@tonic-gate 		cp->dmac_notused = 0;
10860Sstevel@tonic-gate 		cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
10870Sstevel@tonic-gate 		cp->dmac_size = mp->dmai_size;
10880Sstevel@tonic-gate 		cp->dmac_type = 0;
10890Sstevel@tonic-gate 		*ccountp = 1;
10900Sstevel@tonic-gate 	}
10910Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
10920Sstevel@tonic-gate 		size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
10930Sstevel@tonic-gate 		mp->dmai_nwin =
10940Sstevel@tonic-gate 		    (dmareq->dmar_object.dmao_size + (size - 1)) / size;
10950Sstevel@tonic-gate 		return (DDI_DMA_PARTIAL_MAP);
10960Sstevel@tonic-gate 	} else {
10970Sstevel@tonic-gate 		mp->dmai_nwin = 0;
10980Sstevel@tonic-gate 		return (DDI_DMA_MAPPED);
10990Sstevel@tonic-gate 	}
11000Sstevel@tonic-gate 
11010Sstevel@tonic-gate bad_nomap:
11020Sstevel@tonic-gate 	/*
11030Sstevel@tonic-gate 	 * Could not create mmu mappings.
11040Sstevel@tonic-gate 	 */
11050Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT) {
11061035Smike_s 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
11070Sstevel@tonic-gate 		    iommu_ptob(npages));
11080Sstevel@tonic-gate 	} else {
11091035Smike_s 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
11100Sstevel@tonic-gate 		    iommu_ptob(npages));
11110Sstevel@tonic-gate 	}
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate bad:
11140Sstevel@tonic-gate 	if (rval == DDI_DMA_NORESOURCES &&
11150Sstevel@tonic-gate 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
11160Sstevel@tonic-gate 		ddi_set_callback(dmareq->dmar_fp,
11170Sstevel@tonic-gate 		    dmareq->dmar_arg, &softsp->dvma_call_list_id);
11180Sstevel@tonic-gate 	}
11190Sstevel@tonic-gate 	mp->dmai_inuse = 0;
11200Sstevel@tonic-gate 	return (rval);
11210Sstevel@tonic-gate }
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate /* ARGSUSED */
11240Sstevel@tonic-gate int
iommu_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)11250Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
11260Sstevel@tonic-gate     ddi_dma_handle_t handle)
11270Sstevel@tonic-gate {
11280Sstevel@tonic-gate 	ioaddr_t addr;
11290Sstevel@tonic-gate 	uint_t npages;
11300Sstevel@tonic-gate 	size_t size;
11310Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11320Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
11330Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
11340Sstevel@tonic-gate 	ASSERT(softsp != NULL);
11350Sstevel@tonic-gate 
11360Sstevel@tonic-gate 	addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
11370Sstevel@tonic-gate 	npages = mp->dmai_ndvmapages;
11380Sstevel@tonic-gate 	size = iommu_ptob(npages);
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate 	DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
11410Sstevel@tonic-gate 	    "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	/* sync the entire object */
11440Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
11450Sstevel@tonic-gate 		/* flush stream write buffers */
11460Sstevel@tonic-gate 		sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
11470Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
11480Sstevel@tonic-gate 	}
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
11510Sstevel@tonic-gate 	/*
11520Sstevel@tonic-gate 	 * 'Free' the dma mappings.
11530Sstevel@tonic-gate 	 */
11540Sstevel@tonic-gate 	iommu_remove_mappings(mp);
11550Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
11560Sstevel@tonic-gate 
11570Sstevel@tonic-gate 	ASSERT(npages > (uint_t)0);
11580Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT)
11591035Smike_s 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
11600Sstevel@tonic-gate 	else
11611035Smike_s 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
11620Sstevel@tonic-gate 
11630Sstevel@tonic-gate 	mp->dmai_ndvmapages = 0;
11640Sstevel@tonic-gate 	mp->dmai_inuse = 0;
11650Sstevel@tonic-gate 	mp->dmai_minfo = NULL;
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 	if (softsp->dvma_call_list_id != 0)
11680Sstevel@tonic-gate 		ddi_run_callback(&softsp->dvma_call_list_id);
11690Sstevel@tonic-gate 
11700Sstevel@tonic-gate 	return (DDI_SUCCESS);
11710Sstevel@tonic-gate }
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate /*ARGSUSED*/
11740Sstevel@tonic-gate int
iommu_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)11750Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
11760Sstevel@tonic-gate     ddi_dma_handle_t handle, off_t off, size_t len,
11770Sstevel@tonic-gate     uint_t cache_flags)
11780Sstevel@tonic-gate {
11790Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11800Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
11810Sstevel@tonic-gate 
11820Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
11830Sstevel@tonic-gate 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
11840Sstevel@tonic-gate 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
11850Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
11860Sstevel@tonic-gate 	}
11870Sstevel@tonic-gate 	return (DDI_SUCCESS);
11880Sstevel@tonic-gate }
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate /*ARGSUSED*/
11910Sstevel@tonic-gate int
iommu_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)11920Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
11930Sstevel@tonic-gate     ddi_dma_handle_t handle, uint_t win, off_t *offp,
11940Sstevel@tonic-gate     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
11950Sstevel@tonic-gate {
11960Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11970Sstevel@tonic-gate 	off_t offset;
11980Sstevel@tonic-gate 	uint_t winsize;
11990Sstevel@tonic-gate 	uint_t newoff;
12000Sstevel@tonic-gate 	int rval;
12010Sstevel@tonic-gate 
12020Sstevel@tonic-gate 	offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
12030Sstevel@tonic-gate 	winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate 	DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
12060Sstevel@tonic-gate 	    winsize));
12070Sstevel@tonic-gate 
12080Sstevel@tonic-gate 	/*
12090Sstevel@tonic-gate 	 * win is in the range [0 .. dmai_nwin-1]
12100Sstevel@tonic-gate 	 */
12110Sstevel@tonic-gate 	if (win >= mp->dmai_nwin)
12120Sstevel@tonic-gate 		return (DDI_FAILURE);
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate 	newoff = win * winsize;
12150Sstevel@tonic-gate 	if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
12160Sstevel@tonic-gate 		return (DDI_FAILURE);
12170Sstevel@tonic-gate 
12180Sstevel@tonic-gate 	ASSERT(cookiep);
12190Sstevel@tonic-gate 	cookiep->dmac_notused = 0;
12200Sstevel@tonic-gate 	cookiep->dmac_type = 0;
12210Sstevel@tonic-gate 	cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
12220Sstevel@tonic-gate 	cookiep->dmac_size = mp->dmai_size;
12230Sstevel@tonic-gate 	*ccountp = 1;
12240Sstevel@tonic-gate 	*offp = (off_t)newoff;
12250Sstevel@tonic-gate 	*lenp = (uint_t)winsize;
12260Sstevel@tonic-gate 
12270Sstevel@tonic-gate 	if (newoff == mp->dmai_offset) {
12280Sstevel@tonic-gate 		/*
12290Sstevel@tonic-gate 		 * Nothing to do...
12300Sstevel@tonic-gate 		 */
12310Sstevel@tonic-gate 		return (DDI_SUCCESS);
12320Sstevel@tonic-gate 	}
12330Sstevel@tonic-gate 
12340Sstevel@tonic-gate 	if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
12350Sstevel@tonic-gate 		return (rval);
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	/*
12380Sstevel@tonic-gate 	 * Set this again in case iommu_map_window() has changed it
12390Sstevel@tonic-gate 	 */
12400Sstevel@tonic-gate 	cookiep->dmac_size = mp->dmai_size;
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 	return (DDI_SUCCESS);
12430Sstevel@tonic-gate }
12440Sstevel@tonic-gate 
12450Sstevel@tonic-gate static int
iommu_map_window(ddi_dma_impl_t * mp,off_t newoff,size_t winsize)12460Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
12470Sstevel@tonic-gate {
12480Sstevel@tonic-gate 	uintptr_t addr = 0;
12490Sstevel@tonic-gate 	page_t *pp;
12500Sstevel@tonic-gate 	uint_t flags;
12510Sstevel@tonic-gate 	struct page **pplist = NULL;
12520Sstevel@tonic-gate 
12530Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
12540Sstevel@tonic-gate 	/* Free mappings for current window */
12550Sstevel@tonic-gate 	iommu_remove_mappings(mp);
12560Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate 	mp->dmai_offset = newoff;
12590Sstevel@tonic-gate 	mp->dmai_size = mp->dmai_object.dmao_size - newoff;
12600Sstevel@tonic-gate 	mp->dmai_size = MIN(mp->dmai_size, winsize);
12610Sstevel@tonic-gate 
12620Sstevel@tonic-gate 	if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
12630Sstevel@tonic-gate 	    mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
12640Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_SHADOW) {
12650Sstevel@tonic-gate 			pplist = (struct page **)mp->dmai_minfo;
12660Sstevel@tonic-gate 			ASSERT(pplist != NULL);
12670Sstevel@tonic-gate 			pplist = pplist + (newoff >> MMU_PAGESHIFT);
12680Sstevel@tonic-gate 		} else {
12690Sstevel@tonic-gate 			addr = (uintptr_t)
12700Sstevel@tonic-gate 			    mp->dmai_object.dmao_obj.virt_obj.v_addr;
12710Sstevel@tonic-gate 			addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
12720Sstevel@tonic-gate 		}
12730Sstevel@tonic-gate 		pp = NULL;
12740Sstevel@tonic-gate 	} else {
12750Sstevel@tonic-gate 		pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
12760Sstevel@tonic-gate 		flags = 0;
12770Sstevel@tonic-gate 		while (flags < newoff) {
12780Sstevel@tonic-gate 			pp = pp->p_next;
12790Sstevel@tonic-gate 			flags += MMU_PAGESIZE;
12800Sstevel@tonic-gate 		}
12810Sstevel@tonic-gate 	}
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate 	/* Set up mappings for next window */
12840Sstevel@tonic-gate 	if (addr) {
12850Sstevel@tonic-gate 		if (iommu_create_vaddr_mappings(mp, addr) < 0)
12860Sstevel@tonic-gate 			return (DDI_FAILURE);
12870Sstevel@tonic-gate 	} else {
12880Sstevel@tonic-gate 		if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
12890Sstevel@tonic-gate 			return (DDI_FAILURE);
12900Sstevel@tonic-gate 	}
12910Sstevel@tonic-gate 
12920Sstevel@tonic-gate 	/*
12930Sstevel@tonic-gate 	 * also invalidate read stream buffer
12940Sstevel@tonic-gate 	 */
12950Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
12960Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
12990Sstevel@tonic-gate 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
13000Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
13010Sstevel@tonic-gate 	}
13020Sstevel@tonic-gate 
13030Sstevel@tonic-gate 	return (DDI_SUCCESS);
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate }
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate int
iommu_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareq,ddi_dma_handle_t * handlep)13080Sstevel@tonic-gate iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
13090Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
13100Sstevel@tonic-gate {
13110Sstevel@tonic-gate 	ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
13120Sstevel@tonic-gate 	ddi_dma_impl_t *mp;
13130Sstevel@tonic-gate 	ddi_dma_attr_t *dma_attr;
13140Sstevel@tonic-gate 	struct dma_impl_priv *mppriv;
13150Sstevel@tonic-gate 	ioaddr_t addrlow, addrhigh;
13160Sstevel@tonic-gate 	ioaddr_t segalign;
13170Sstevel@tonic-gate 	int rval;
13180Sstevel@tonic-gate 	struct sbus_soft_state *softsp =
1319*7632SNick.Todd@Sun.COM 	    (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1320*7632SNick.Todd@Sun.COM 	    ddi_get_instance(dip));
13210Sstevel@tonic-gate 
13220Sstevel@tonic-gate 	addrlow = dma_lim->dlim_addr_lo;
13230Sstevel@tonic-gate 	addrhigh = dma_lim->dlim_addr_hi;
13240Sstevel@tonic-gate 	if ((addrhigh <= addrlow) ||
13250Sstevel@tonic-gate 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
13260Sstevel@tonic-gate 		return (DDI_DMA_NOMAPPING);
13270Sstevel@tonic-gate 	}
13280Sstevel@tonic-gate 
13290Sstevel@tonic-gate 	/*
13300Sstevel@tonic-gate 	 * Setup DMA burstsizes and min-xfer counts.
13310Sstevel@tonic-gate 	 */
13320Sstevel@tonic-gate 	(void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes,
1333*7632SNick.Todd@Sun.COM 	    (uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer,
1334*7632SNick.Todd@Sun.COM 	    dmareq->dmar_flags);
13350Sstevel@tonic-gate 
13360Sstevel@tonic-gate 	if (dma_lim->dlim_burstsizes == 0)
13370Sstevel@tonic-gate 		return (DDI_DMA_NOMAPPING);
13380Sstevel@tonic-gate 	/*
13390Sstevel@tonic-gate 	 * If not an advisory call, get a DMA handle
13400Sstevel@tonic-gate 	 */
13410Sstevel@tonic-gate 	if (!handlep) {
13420Sstevel@tonic-gate 		return (DDI_DMA_MAPOK);
13430Sstevel@tonic-gate 	}
13440Sstevel@tonic-gate 
13450Sstevel@tonic-gate 	mppriv = kmem_zalloc(sizeof (*mppriv),
13460Sstevel@tonic-gate 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
13470Sstevel@tonic-gate 	if (mppriv == NULL) {
13480Sstevel@tonic-gate 		if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
13490Sstevel@tonic-gate 			ddi_set_callback(dmareq->dmar_fp,
13500Sstevel@tonic-gate 			    dmareq->dmar_arg, &softsp->dvma_call_list_id);
13510Sstevel@tonic-gate 		}
13520Sstevel@tonic-gate 		return (DDI_DMA_NORESOURCES);
13530Sstevel@tonic-gate 	}
13540Sstevel@tonic-gate 	mp = (ddi_dma_impl_t *)mppriv;
13550Sstevel@tonic-gate 	mp->dmai_rdip = rdip;
13560Sstevel@tonic-gate 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
13570Sstevel@tonic-gate 	mp->dmai_minxfer = dma_lim->dlim_minxfer;
13580Sstevel@tonic-gate 	mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
13590Sstevel@tonic-gate 	mp->dmai_offset = 0;
13600Sstevel@tonic-gate 	mp->dmai_ndvmapages = 0;
13610Sstevel@tonic-gate 	mp->dmai_minfo = 0;
13620Sstevel@tonic-gate 	mp->dmai_inuse = 0;
13630Sstevel@tonic-gate 	segalign = dma_lim->dlim_cntr_max;
13640Sstevel@tonic-gate 	/* See if the DMA engine has any limit restrictions. */
13650Sstevel@tonic-gate 	if (segalign == UINT32_MAX && addrhigh == UINT32_MAX &&
13660Sstevel@tonic-gate 	    addrlow == 0) {
13670Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOLIMIT;
13680Sstevel@tonic-gate 	}
13690Sstevel@tonic-gate 	mppriv->softsp = softsp;
13700Sstevel@tonic-gate 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
13710Sstevel@tonic-gate 	dma_attr = &mp->dmai_attr;
13720Sstevel@tonic-gate 	dma_attr->dma_attr_align = 1;
13730Sstevel@tonic-gate 	dma_attr->dma_attr_addr_lo = addrlow;
13740Sstevel@tonic-gate 	dma_attr->dma_attr_addr_hi = addrhigh;
13750Sstevel@tonic-gate 	dma_attr->dma_attr_seg = segalign;
13760Sstevel@tonic-gate 	dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes;
13770Sstevel@tonic-gate 	rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp,
1378*7632SNick.Todd@Sun.COM 	    dmareq, NULL, NULL);
13790Sstevel@tonic-gate 	if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
13800Sstevel@tonic-gate 		kmem_free(mppriv, sizeof (*mppriv));
13810Sstevel@tonic-gate 	} else {
13820Sstevel@tonic-gate 		*handlep = (ddi_dma_handle_t)mp;
13830Sstevel@tonic-gate 	}
13840Sstevel@tonic-gate 	return (rval);
13850Sstevel@tonic-gate }
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate /*ARGSUSED*/
13880Sstevel@tonic-gate int
iommu_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t cache_flags)13890Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
13900Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
13910Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
13920Sstevel@tonic-gate {
13930Sstevel@tonic-gate 	ioaddr_t addr;
13940Sstevel@tonic-gate 	uint_t offset;
13950Sstevel@tonic-gate 	pgcnt_t npages;
13960Sstevel@tonic-gate 	size_t size;
13970Sstevel@tonic-gate 	ddi_dma_cookie_t *cp;
13980Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
13990Sstevel@tonic-gate 
1400*7632SNick.Todd@Sun.COM 	DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", (void *)mp));
14010Sstevel@tonic-gate 	switch (request) {
14020Sstevel@tonic-gate 	case DDI_DMA_FREE:
14030Sstevel@tonic-gate 	{
14040Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
14050Sstevel@tonic-gate 		struct sbus_soft_state *softsp = mppriv->softsp;
14060Sstevel@tonic-gate 		ASSERT(softsp != NULL);
14070Sstevel@tonic-gate 
14080Sstevel@tonic-gate 		/*
14090Sstevel@tonic-gate 		 * 'Free' the dma mappings.
14100Sstevel@tonic-gate 		 */
14110Sstevel@tonic-gate 		addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
14120Sstevel@tonic-gate 		npages = mp->dmai_ndvmapages;
14130Sstevel@tonic-gate 		size = iommu_ptob(npages);
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:"
14160Sstevel@tonic-gate 		    "freeing vaddr %x for %x pages.\n", addr,
14170Sstevel@tonic-gate 		    mp->dmai_ndvmapages));
14180Sstevel@tonic-gate 		/* sync the entire object */
14190Sstevel@tonic-gate 		if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
14200Sstevel@tonic-gate 			/* flush stream write buffers */
14210Sstevel@tonic-gate 			sync_stream_buf(softsp, addr, npages,
14220Sstevel@tonic-gate 			    (int *)&mppriv->sync_flag, mppriv->phys_sync_flag);
14230Sstevel@tonic-gate 		}
14240Sstevel@tonic-gate 
14250Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
14260Sstevel@tonic-gate 		iommu_remove_mappings(mp);
14270Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
14280Sstevel@tonic-gate 
14290Sstevel@tonic-gate 		ASSERT(npages > (uint_t)0);
14300Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_NOLIMIT)
14311035Smike_s 			vmem_free(softsp->dvma_arena,
14321035Smike_s 			    (void *)(uintptr_t)addr, size);
14330Sstevel@tonic-gate 		else
14341035Smike_s 			vmem_xfree(softsp->dvma_arena,
14351035Smike_s 			    (void *)(uintptr_t)addr, size);
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 		kmem_free(mppriv, sizeof (*mppriv));
14380Sstevel@tonic-gate 
14390Sstevel@tonic-gate 		if (softsp->dvma_call_list_id != 0)
14400Sstevel@tonic-gate 			ddi_run_callback(&softsp->dvma_call_list_id);
14410Sstevel@tonic-gate 
14420Sstevel@tonic-gate 		break;
14430Sstevel@tonic-gate 	}
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate 	case DDI_DMA_SET_SBUS64:
14460Sstevel@tonic-gate 	{
14470Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
14480Sstevel@tonic-gate 
14490Sstevel@tonic-gate 		return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
14500Sstevel@tonic-gate 		    &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
14510Sstevel@tonic-gate 		    DDI_DMA_SBUS_64BIT));
14520Sstevel@tonic-gate 	}
14530Sstevel@tonic-gate 
14540Sstevel@tonic-gate 	case DDI_DMA_HTOC:
14550Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx "
14561035Smike_s 		    "size %x\n", *offp, mp->dmai_mapping,
14570Sstevel@tonic-gate 		    mp->dmai_size));
14580Sstevel@tonic-gate 
14590Sstevel@tonic-gate 		if ((uint_t)(*offp) >= mp->dmai_size)
14600Sstevel@tonic-gate 			return (DDI_FAILURE);
14610Sstevel@tonic-gate 
14620Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)objp;
14630Sstevel@tonic-gate 		cp->dmac_notused = 0;
14640Sstevel@tonic-gate 		cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp));
14650Sstevel@tonic-gate 		cp->dmac_size =
14660Sstevel@tonic-gate 		    mp->dmai_mapping + mp->dmai_size - cp->dmac_address;
14670Sstevel@tonic-gate 		cp->dmac_type = 0;
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 		break;
14700Sstevel@tonic-gate 
14710Sstevel@tonic-gate 	case DDI_DMA_KVADDR:
14720Sstevel@tonic-gate 		/*
14730Sstevel@tonic-gate 		 * If a physical address mapping has percolated this high,
14740Sstevel@tonic-gate 		 * that is an error (maybe?).
14750Sstevel@tonic-gate 		 */
14760Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_PHYSADDR) {
14770Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys "
14780Sstevel@tonic-gate 			    "mapping\n"));
14790Sstevel@tonic-gate 			return (DDI_FAILURE);
14800Sstevel@tonic-gate 		}
14810Sstevel@tonic-gate 
14820Sstevel@tonic-gate 		return (DDI_FAILURE);
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate 	case DDI_DMA_NEXTWIN:
14850Sstevel@tonic-gate 	{
14860Sstevel@tonic-gate 		ddi_dma_win_t *owin, *nwin;
14870Sstevel@tonic-gate 		uint_t winsize, newoff;
14880Sstevel@tonic-gate 		int rval;
14890Sstevel@tonic-gate 
14900Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n"));
14910Sstevel@tonic-gate 
14920Sstevel@tonic-gate 		mp = (ddi_dma_impl_t *)handle;
14930Sstevel@tonic-gate 		owin = (ddi_dma_win_t *)offp;
14940Sstevel@tonic-gate 		nwin = (ddi_dma_win_t *)objp;
14950Sstevel@tonic-gate 		if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
14960Sstevel@tonic-gate 			if (*owin == NULL) {
14970Sstevel@tonic-gate 				DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG,
14980Sstevel@tonic-gate 				    ("nextwin: win == NULL\n"));
14990Sstevel@tonic-gate 				mp->dmai_offset = 0;
15000Sstevel@tonic-gate 				*nwin = (ddi_dma_win_t)mp;
15010Sstevel@tonic-gate 				return (DDI_SUCCESS);
15020Sstevel@tonic-gate 			}
15030Sstevel@tonic-gate 
15040Sstevel@tonic-gate 			offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
15050Sstevel@tonic-gate 			winsize = iommu_ptob(mp->dmai_ndvmapages -
15060Sstevel@tonic-gate 			    iommu_btopr(offset));
15070Sstevel@tonic-gate 
15080Sstevel@tonic-gate 			newoff = (uint_t)(mp->dmai_offset + winsize);
15090Sstevel@tonic-gate 			if (newoff > mp->dmai_object.dmao_size -
15100Sstevel@tonic-gate 			    mp->dmai_minxfer)
15110Sstevel@tonic-gate 				return (DDI_DMA_DONE);
15120Sstevel@tonic-gate 
15130Sstevel@tonic-gate 			if ((rval = iommu_map_window(mp, newoff, winsize))
15140Sstevel@tonic-gate 			    != DDI_SUCCESS)
15150Sstevel@tonic-gate 				return (rval);
15160Sstevel@tonic-gate 		} else {
15170Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no "
15180Sstevel@tonic-gate 			    "partial mapping\n"));
15190Sstevel@tonic-gate 			if (*owin != NULL)
15200Sstevel@tonic-gate 				return (DDI_DMA_DONE);
15210Sstevel@tonic-gate 			mp->dmai_offset = 0;
15220Sstevel@tonic-gate 			*nwin = (ddi_dma_win_t)mp;
15230Sstevel@tonic-gate 		}
15240Sstevel@tonic-gate 		break;
15250Sstevel@tonic-gate 	}
15260Sstevel@tonic-gate 
15270Sstevel@tonic-gate 	case DDI_DMA_NEXTSEG:
15280Sstevel@tonic-gate 	{
15290Sstevel@tonic-gate 		ddi_dma_seg_t *oseg, *nseg;
15300Sstevel@tonic-gate 
15310Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n"));
15320Sstevel@tonic-gate 
15330Sstevel@tonic-gate 		oseg = (ddi_dma_seg_t *)lenp;
15340Sstevel@tonic-gate 		if (*oseg != NULL)
15350Sstevel@tonic-gate 			return (DDI_DMA_DONE);
15360Sstevel@tonic-gate 		nseg = (ddi_dma_seg_t *)objp;
15370Sstevel@tonic-gate 		*nseg = *((ddi_dma_seg_t *)offp);
15380Sstevel@tonic-gate 		break;
15390Sstevel@tonic-gate 	}
15400Sstevel@tonic-gate 
15410Sstevel@tonic-gate 	case DDI_DMA_SEGTOC:
15420Sstevel@tonic-gate 	{
15430Sstevel@tonic-gate 		ddi_dma_seg_impl_t *seg;
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate 		seg = (ddi_dma_seg_impl_t *)handle;
15460Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)objp;
15470Sstevel@tonic-gate 		cp->dmac_notused = 0;
15480Sstevel@tonic-gate 		cp->dmac_address = (ioaddr_t)seg->dmai_mapping;
15490Sstevel@tonic-gate 		cp->dmac_size = *lenp = seg->dmai_size;
15500Sstevel@tonic-gate 		cp->dmac_type = 0;
15510Sstevel@tonic-gate 		*offp = seg->dmai_offset;
15520Sstevel@tonic-gate 		break;
15530Sstevel@tonic-gate 	}
15540Sstevel@tonic-gate 
15550Sstevel@tonic-gate 	case DDI_DMA_MOVWIN:
15560Sstevel@tonic-gate 	{
15570Sstevel@tonic-gate 		uint_t winsize;
15580Sstevel@tonic-gate 		uint_t newoff;
15590Sstevel@tonic-gate 		int rval;
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate 		offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
15620Sstevel@tonic-gate 		winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
15630Sstevel@tonic-gate 
15641035Smike_s 		DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx "
15650Sstevel@tonic-gate 		    "winsize %x\n", *offp, *lenp, winsize));
15660Sstevel@tonic-gate 
15670Sstevel@tonic-gate 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0)
15680Sstevel@tonic-gate 			return (DDI_FAILURE);
15690Sstevel@tonic-gate 
15700Sstevel@tonic-gate 		if (*lenp != (uint_t)-1 && *lenp != winsize) {
15710Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n"));
15720Sstevel@tonic-gate 			return (DDI_FAILURE);
15730Sstevel@tonic-gate 		}
15740Sstevel@tonic-gate 		newoff = (uint_t)*offp;
15750Sstevel@tonic-gate 		if (newoff & (winsize - 1)) {
15760Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n"));
15770Sstevel@tonic-gate 			return (DDI_FAILURE);
15780Sstevel@tonic-gate 		}
15790Sstevel@tonic-gate 
15800Sstevel@tonic-gate 		if (newoff == mp->dmai_offset) {
15810Sstevel@tonic-gate 			/*
15820Sstevel@tonic-gate 			 * Nothing to do...
15830Sstevel@tonic-gate 			 */
15840Sstevel@tonic-gate 			break;
15850Sstevel@tonic-gate 		}
15860Sstevel@tonic-gate 
15870Sstevel@tonic-gate 		/*
15880Sstevel@tonic-gate 		 * Check out new address...
15890Sstevel@tonic-gate 		 */
15900Sstevel@tonic-gate 		if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) {
15910Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of "
15920Sstevel@tonic-gate 			    "range\n"));
15930Sstevel@tonic-gate 			return (DDI_FAILURE);
15940Sstevel@tonic-gate 		}
15950Sstevel@tonic-gate 
15960Sstevel@tonic-gate 		rval = iommu_map_window(mp, newoff, winsize);
15970Sstevel@tonic-gate 		if (rval != DDI_SUCCESS)
15980Sstevel@tonic-gate 			return (rval);
15990Sstevel@tonic-gate 
16000Sstevel@tonic-gate 		if ((cp = (ddi_dma_cookie_t *)objp) != 0) {
16010Sstevel@tonic-gate 			cp->dmac_notused = 0;
16020Sstevel@tonic-gate 			cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
16030Sstevel@tonic-gate 			cp->dmac_size = mp->dmai_size;
16040Sstevel@tonic-gate 			cp->dmac_type = 0;
16050Sstevel@tonic-gate 		}
16060Sstevel@tonic-gate 		*offp = (off_t)newoff;
16070Sstevel@tonic-gate 		*lenp = (uint_t)winsize;
16080Sstevel@tonic-gate 		break;
16090Sstevel@tonic-gate 	}
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate 	case DDI_DMA_REPWIN:
16120Sstevel@tonic-gate 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
16130Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n"));
16140Sstevel@tonic-gate 			return (DDI_FAILURE);
16150Sstevel@tonic-gate 		}
16160Sstevel@tonic-gate 
16170Sstevel@tonic-gate 		*offp = (off_t)mp->dmai_offset;
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate 		addr = mp->dmai_ndvmapages -
16200Sstevel@tonic-gate 		    iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET);
16210Sstevel@tonic-gate 
16220Sstevel@tonic-gate 		*lenp = (uint_t)iommu_ptob(addr);
16230Sstevel@tonic-gate 
16241035Smike_s 		DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n",
16250Sstevel@tonic-gate 		    mp->dmai_offset, mp->dmai_size));
16260Sstevel@tonic-gate 
16270Sstevel@tonic-gate 		break;
16280Sstevel@tonic-gate 
16290Sstevel@tonic-gate 	case DDI_DMA_GETERR:
16300Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG,
16310Sstevel@tonic-gate 		    ("iommu_dma_mctl: geterr\n"));
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate 		break;
16340Sstevel@tonic-gate 
16350Sstevel@tonic-gate 	case DDI_DMA_COFF:
16360Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)offp;
16370Sstevel@tonic-gate 		addr = cp->dmac_address;
16380Sstevel@tonic-gate 
16390Sstevel@tonic-gate 		if (addr < mp->dmai_mapping ||
16400Sstevel@tonic-gate 		    addr >= mp->dmai_mapping + mp->dmai_size)
16410Sstevel@tonic-gate 			return (DDI_FAILURE);
16420Sstevel@tonic-gate 
16430Sstevel@tonic-gate 		*objp = (caddr_t)(addr - mp->dmai_mapping);
16440Sstevel@tonic-gate 
16451035Smike_s 		DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx "
16460Sstevel@tonic-gate 		    "size %x\n", (ulong_t)*objp, mp->dmai_mapping,
16470Sstevel@tonic-gate 		    mp->dmai_size));
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate 		break;
16500Sstevel@tonic-gate 
16510Sstevel@tonic-gate 	case DDI_DMA_RESERVE:
16520Sstevel@tonic-gate 	{
16530Sstevel@tonic-gate 		struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
16540Sstevel@tonic-gate 		ddi_dma_lim_t *dma_lim;
16550Sstevel@tonic-gate 		ddi_dma_handle_t *handlep;
16560Sstevel@tonic-gate 		uint_t np;
16570Sstevel@tonic-gate 		ioaddr_t ioaddr;
16580Sstevel@tonic-gate 		int i;
16590Sstevel@tonic-gate 		struct fast_dvma *iommu_fast_dvma;
16600Sstevel@tonic-gate 		struct sbus_soft_state *softsp =
16610Sstevel@tonic-gate 		    (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
16620Sstevel@tonic-gate 		    ddi_get_instance(dip));
16630Sstevel@tonic-gate 
16640Sstevel@tonic-gate 		/* Some simple sanity checks */
16650Sstevel@tonic-gate 		dma_lim = dmareq->dmar_limits;
16660Sstevel@tonic-gate 		if (dma_lim->dlim_burstsizes == 0) {
16670Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16680Sstevel@tonic-gate 			    ("Reserve: bad burstsizes\n"));
16690Sstevel@tonic-gate 			return (DDI_DMA_BADLIMITS);
16700Sstevel@tonic-gate 		}
16710Sstevel@tonic-gate 		if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
16720Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16730Sstevel@tonic-gate 			    ("Reserve: bad limits\n"));
16740Sstevel@tonic-gate 			return (DDI_DMA_BADLIMITS);
16750Sstevel@tonic-gate 		}
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate 		np = dmareq->dmar_object.dmao_size;
16780Sstevel@tonic-gate 		mutex_enter(&softsp->dma_pool_lock);
16790Sstevel@tonic-gate 		if (np > softsp->dma_reserve) {
16800Sstevel@tonic-gate 			mutex_exit(&softsp->dma_pool_lock);
16810Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16820Sstevel@tonic-gate 			    ("Reserve: dma_reserve is exhausted\n"));
16830Sstevel@tonic-gate 			return (DDI_DMA_NORESOURCES);
16840Sstevel@tonic-gate 		}
16850Sstevel@tonic-gate 
16860Sstevel@tonic-gate 		softsp->dma_reserve -= np;
16870Sstevel@tonic-gate 		mutex_exit(&softsp->dma_pool_lock);
16880Sstevel@tonic-gate 		mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
16890Sstevel@tonic-gate 		mp->dmai_rflags = DMP_BYPASSNEXUS;
16900Sstevel@tonic-gate 		mp->dmai_rdip = rdip;
16910Sstevel@tonic-gate 		mp->dmai_minxfer = dma_lim->dlim_minxfer;
16920Sstevel@tonic-gate 		mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
16930Sstevel@tonic-gate 
16941035Smike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
16950Sstevel@tonic-gate 		    iommu_ptob(np), IOMMU_PAGESIZE, 0,
16961035Smike_s 		    dma_lim->dlim_cntr_max + 1,
16971035Smike_s 		    (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1),
16980Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
16990Sstevel@tonic-gate 
17000Sstevel@tonic-gate 		if (ioaddr == 0) {
17010Sstevel@tonic-gate 			mutex_enter(&softsp->dma_pool_lock);
17020Sstevel@tonic-gate 			softsp->dma_reserve += np;
17030Sstevel@tonic-gate 			mutex_exit(&softsp->dma_pool_lock);
17040Sstevel@tonic-gate 			kmem_free(mp, sizeof (*mp));
17050Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
17060Sstevel@tonic-gate 			    ("Reserve: No dvma resources available\n"));
17070Sstevel@tonic-gate 			return (DDI_DMA_NOMAPPING);
17080Sstevel@tonic-gate 		}
17090Sstevel@tonic-gate 
17100Sstevel@tonic-gate 		/* create a per request structure */
17110Sstevel@tonic-gate 		iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
17120Sstevel@tonic-gate 		    KM_SLEEP);
17130Sstevel@tonic-gate 
17140Sstevel@tonic-gate 		/*
17150Sstevel@tonic-gate 		 * We need to remember the size of the transfer so that
17160Sstevel@tonic-gate 		 * we can figure the virtual pages to sync when the transfer
17170Sstevel@tonic-gate 		 * is complete.
17180Sstevel@tonic-gate 		 */
17190Sstevel@tonic-gate 		iommu_fast_dvma->pagecnt = kmem_zalloc(np *
17200Sstevel@tonic-gate 		    sizeof (uint_t), KM_SLEEP);
17210Sstevel@tonic-gate 
17220Sstevel@tonic-gate 		/* Allocate a streaming cache sync flag for each index */
17230Sstevel@tonic-gate 		iommu_fast_dvma->sync_flag = kmem_zalloc(np *
17240Sstevel@tonic-gate 		    sizeof (int), KM_SLEEP);
17250Sstevel@tonic-gate 
17260Sstevel@tonic-gate 		/* Allocate a physical sync flag for each index */
17270Sstevel@tonic-gate 		iommu_fast_dvma->phys_sync_flag =
17280Sstevel@tonic-gate 		    kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
17290Sstevel@tonic-gate 
17300Sstevel@tonic-gate 		for (i = 0; i < np; i++)
17310Sstevel@tonic-gate 			iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
17320Sstevel@tonic-gate 			    &iommu_fast_dvma->sync_flag[i]);
17330Sstevel@tonic-gate 
17340Sstevel@tonic-gate 		mp->dmai_mapping = ioaddr;
17350Sstevel@tonic-gate 		mp->dmai_ndvmapages = np;
17360Sstevel@tonic-gate 		iommu_fast_dvma->ops = &iommu_dvma_ops;
17370Sstevel@tonic-gate 		iommu_fast_dvma->softsp = (caddr_t)softsp;
17380Sstevel@tonic-gate 		mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
17390Sstevel@tonic-gate 		handlep = (ddi_dma_handle_t *)objp;
17400Sstevel@tonic-gate 		*handlep = (ddi_dma_handle_t)mp;
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate 		DPRINTF(IOMMU_FASTDMA_RESERVE,
17430Sstevel@tonic-gate 		    ("Reserve: mapping object %p base addr %lx size %x\n",
1744*7632SNick.Todd@Sun.COM 		    (void *)mp, mp->dmai_mapping, mp->dmai_ndvmapages));
17450Sstevel@tonic-gate 
17460Sstevel@tonic-gate 		break;
17470Sstevel@tonic-gate 	}
17480Sstevel@tonic-gate 
17490Sstevel@tonic-gate 	case DDI_DMA_RELEASE:
17500Sstevel@tonic-gate 	{
17510Sstevel@tonic-gate 		ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
17520Sstevel@tonic-gate 		uint_t np = npages = mp->dmai_ndvmapages;
17530Sstevel@tonic-gate 		ioaddr_t ioaddr = mp->dmai_mapping;
17540Sstevel@tonic-gate 		volatile uint64_t *iotte_ptr;
17550Sstevel@tonic-gate 		struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
17560Sstevel@tonic-gate 		    mp->dmai_nexus_private;
17570Sstevel@tonic-gate 		struct sbus_soft_state *softsp = (struct sbus_soft_state *)
17580Sstevel@tonic-gate 		    iommu_fast_dvma->softsp;
17590Sstevel@tonic-gate 
17600Sstevel@tonic-gate 		ASSERT(softsp != NULL);
17610Sstevel@tonic-gate 
17620Sstevel@tonic-gate 		/* Unload stale mappings and flush stale tlb's */
17630Sstevel@tonic-gate 		iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
17640Sstevel@tonic-gate 
17650Sstevel@tonic-gate 		while (npages > (uint_t)0) {
17660Sstevel@tonic-gate 			*iotte_ptr = (uint64_t)0;	/* unload tte */
17670Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
17680Sstevel@tonic-gate 
17690Sstevel@tonic-gate 			npages--;
17700Sstevel@tonic-gate 			iotte_ptr++;
17710Sstevel@tonic-gate 			ioaddr += IOMMU_PAGESIZE;
17720Sstevel@tonic-gate 		}
17730Sstevel@tonic-gate 
17740Sstevel@tonic-gate 		ioaddr = (ioaddr_t)mp->dmai_mapping;
17750Sstevel@tonic-gate 		mutex_enter(&softsp->dma_pool_lock);
17760Sstevel@tonic-gate 		softsp->dma_reserve += np;
17770Sstevel@tonic-gate 		mutex_exit(&softsp->dma_pool_lock);
17780Sstevel@tonic-gate 
17790Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_NOLIMIT)
17801035Smike_s 			vmem_free(softsp->dvma_arena,
17811035Smike_s 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
17820Sstevel@tonic-gate 		else
17831035Smike_s 			vmem_xfree(softsp->dvma_arena,
17841035Smike_s 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
17850Sstevel@tonic-gate 
17860Sstevel@tonic-gate 		kmem_free(mp, sizeof (*mp));
17870Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
17880Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
17890Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->phys_sync_flag, np *
17900Sstevel@tonic-gate 		    sizeof (uint64_t));
17910Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
17920Sstevel@tonic-gate 
17930Sstevel@tonic-gate 
17940Sstevel@tonic-gate 		DPRINTF(IOMMU_FASTDMA_RESERVE,
17950Sstevel@tonic-gate 		    ("Release: Base addr %x size %x\n", ioaddr, np));
17960Sstevel@tonic-gate 		/*
17970Sstevel@tonic-gate 		 * Now that we've freed some resource,
17980Sstevel@tonic-gate 		 * if there is anybody waiting for it
17990Sstevel@tonic-gate 		 * try and get them going.
18000Sstevel@tonic-gate 		 */
18010Sstevel@tonic-gate 		if (softsp->dvma_call_list_id != 0)
18020Sstevel@tonic-gate 			ddi_run_callback(&softsp->dvma_call_list_id);
18030Sstevel@tonic-gate 
18040Sstevel@tonic-gate 		break;
18050Sstevel@tonic-gate 	}
18060Sstevel@tonic-gate 
18070Sstevel@tonic-gate 	default:
18080Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
18090Sstevel@tonic-gate 		    "0%x\n", request));
18100Sstevel@tonic-gate 
18110Sstevel@tonic-gate 		return (DDI_FAILURE);
18120Sstevel@tonic-gate 	}
18130Sstevel@tonic-gate 	return (DDI_SUCCESS);
18140Sstevel@tonic-gate }
18150Sstevel@tonic-gate 
18160Sstevel@tonic-gate /*ARGSUSED*/
18170Sstevel@tonic-gate void
iommu_dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)18180Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
18190Sstevel@tonic-gate     ddi_dma_cookie_t *cp)
18200Sstevel@tonic-gate {
18210Sstevel@tonic-gate 	uintptr_t addr;
18220Sstevel@tonic-gate 	ioaddr_t ioaddr;
18230Sstevel@tonic-gate 	uint_t offset;
18240Sstevel@tonic-gate 	pfn_t pfn;
18250Sstevel@tonic-gate 	int npages;
18260Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
18270Sstevel@tonic-gate 	uint64_t iotte_flag = 0;
18280Sstevel@tonic-gate 	struct as *as = NULL;
18290Sstevel@tonic-gate 	extern struct as kas;
18300Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
18310Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
18320Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
18330Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
18340Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
18350Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18360Sstevel@tonic-gate 	struct io_mem_list *iomemp;
18370Sstevel@tonic-gate 	pfn_t *pfnp;
18380Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18390Sstevel@tonic-gate 
18400Sstevel@tonic-gate 	ASSERT(softsp != NULL);
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate 	addr = (uintptr_t)a;
18430Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
18440Sstevel@tonic-gate 	offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
18450Sstevel@tonic-gate 	iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
18460Sstevel@tonic-gate 	as = &kas;
18470Sstevel@tonic-gate 	addr &= ~IOMMU_PAGEOFFSET;
18480Sstevel@tonic-gate 	npages = iommu_btopr(len + offset);
18490Sstevel@tonic-gate 
18500Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18510Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
18520Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
18530Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
18540Sstevel@tonic-gate 	iomemp->addr = addr;
18550Sstevel@tonic-gate 	iomemp->npages = npages;
18560Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
18570Sstevel@tonic-gate 	    KM_SLEEP);
18580Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18590Sstevel@tonic-gate 
18600Sstevel@tonic-gate 	cp->dmac_address = ioaddr | offset;
18610Sstevel@tonic-gate 	cp->dmac_size = len;
18620Sstevel@tonic-gate 
18630Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
18640Sstevel@tonic-gate 	/* read/write and streaming io on */
18650Sstevel@tonic-gate 	iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
18660Sstevel@tonic-gate 
18670Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
18680Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
18690Sstevel@tonic-gate 	else if (!softsp->stream_buf_off)
18700Sstevel@tonic-gate 		iotte_flag |= IOTTE_STREAM;
18710Sstevel@tonic-gate 
18720Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
18731035Smike_s 	    "size %x offset %x index %x kaddr %lx\n",
18740Sstevel@tonic-gate 	    ioaddr, len, offset, index, addr));
18750Sstevel@tonic-gate 	ASSERT(npages > 0);
18760Sstevel@tonic-gate 	do {
18770Sstevel@tonic-gate 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
18780Sstevel@tonic-gate 		if (pfn == PFN_INVALID) {
18790Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
18800Sstevel@tonic-gate 			    "from hat_getpfnum()\n"));
18810Sstevel@tonic-gate 		}
18820Sstevel@tonic-gate 
18830Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, 1);
18840Sstevel@tonic-gate 
18850Sstevel@tonic-gate 		/* load tte */
18860Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate 		npages--;
18890Sstevel@tonic-gate 		iotte_ptr++;
18900Sstevel@tonic-gate 
18910Sstevel@tonic-gate 		addr += IOMMU_PAGESIZE;
18920Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
18930Sstevel@tonic-gate 
18940Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18950Sstevel@tonic-gate 		*pfnp = pfn;
18960Sstevel@tonic-gate 		pfnp++;
18970Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18980Sstevel@tonic-gate 
18990Sstevel@tonic-gate 	} while (npages > 0);
19000Sstevel@tonic-gate 
19010Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19020Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
19030Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
19040Sstevel@tonic-gate 	softsp->iomem = iomemp;
19050Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
19060Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19070Sstevel@tonic-gate }
19080Sstevel@tonic-gate 
19090Sstevel@tonic-gate /*ARGSUSED*/
19100Sstevel@tonic-gate void
iommu_dvma_unload(ddi_dma_handle_t h,uint_t index,uint_t view)19110Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
19120Sstevel@tonic-gate {
19130Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
19140Sstevel@tonic-gate 	ioaddr_t ioaddr;
19150Sstevel@tonic-gate 	pgcnt_t npages;
19160Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
19170Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
19180Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
19190Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
19200Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19210Sstevel@tonic-gate 	struct io_mem_list **prevp, *walk;
19220Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19230Sstevel@tonic-gate 
19240Sstevel@tonic-gate 	ASSERT(softsp != NULL);
19250Sstevel@tonic-gate 
19260Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
19270Sstevel@tonic-gate 	npages = iommu_fast_dvma->pagecnt[index];
19280Sstevel@tonic-gate 
19290Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19300Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
19310Sstevel@tonic-gate 	prevp = &softsp->iomem;
19320Sstevel@tonic-gate 	walk = softsp->iomem;
19330Sstevel@tonic-gate 
19340Sstevel@tonic-gate 	while (walk != NULL) {
19350Sstevel@tonic-gate 		if (walk->ioaddr == ioaddr) {
19360Sstevel@tonic-gate 			*prevp = walk->next;
19370Sstevel@tonic-gate 			break;
19380Sstevel@tonic-gate 		}
19390Sstevel@tonic-gate 		prevp = &walk->next;
19400Sstevel@tonic-gate 		walk = walk->next;
19410Sstevel@tonic-gate 	}
19420Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
19430Sstevel@tonic-gate 
19440Sstevel@tonic-gate 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
19450Sstevel@tonic-gate 	kmem_free(walk, sizeof (struct io_mem_list));
19460Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19470Sstevel@tonic-gate 
19480Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1949*7632SNick.Todd@Sun.COM 	    "addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp,
1950*7632SNick.Todd@Sun.COM 	    (void *)&iommu_fast_dvma->sync_flag[index],
19510Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index],
19520Sstevel@tonic-gate 	    index, npages));
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 	if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
19550Sstevel@tonic-gate 		sync_stream_buf(softsp, ioaddr, npages,
1956*7632SNick.Todd@Sun.COM 		    (int *)&iommu_fast_dvma->sync_flag[index],
1957*7632SNick.Todd@Sun.COM 		    iommu_fast_dvma->phys_sync_flag[index]);
19580Sstevel@tonic-gate 	}
19590Sstevel@tonic-gate }
19600Sstevel@tonic-gate 
19610Sstevel@tonic-gate /*ARGSUSED*/
19620Sstevel@tonic-gate void
iommu_dvma_sync(ddi_dma_handle_t h,uint_t index,uint_t view)19630Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
19640Sstevel@tonic-gate {
19650Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
19660Sstevel@tonic-gate 	ioaddr_t ioaddr;
19670Sstevel@tonic-gate 	uint_t npages;
19680Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
19690Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
19700Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
19710Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
19720Sstevel@tonic-gate 
19730Sstevel@tonic-gate 	if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
19740Sstevel@tonic-gate 		return;
19750Sstevel@tonic-gate 
19760Sstevel@tonic-gate 	ASSERT(softsp != NULL);
19770Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
19780Sstevel@tonic-gate 	npages = iommu_fast_dvma->pagecnt[index];
19790Sstevel@tonic-gate 
19800Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1981*7632SNick.Todd@Sun.COM 	    "sync flag addr %p, sync flag pfn %llx\n", (void *)mp,
1982*7632SNick.Todd@Sun.COM 	    (void *)&iommu_fast_dvma->sync_flag[index],
19830Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index]));
19840Sstevel@tonic-gate 
19850Sstevel@tonic-gate 	sync_stream_buf(softsp, ioaddr, npages,
19860Sstevel@tonic-gate 	    (int *)&iommu_fast_dvma->sync_flag[index],
19870Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index]);
19880Sstevel@tonic-gate }
1989