xref: /onnv-gate/usr/src/uts/sun4u/io/pci/pci_reloc.c (revision 2251:99824a0e7def)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*2251Selowe  * Common Development and Distribution License (the "License").
6*2251Selowe  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*2251Selowe  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * PCI nexus DVMA relocation routines.
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  * These routines handle the interactions with the HAT layer to
320Sstevel@tonic-gate  * implement page relocation for page(s) which have active DMA handle
330Sstevel@tonic-gate  * bindings when DVMA is being used for those handles.
340Sstevel@tonic-gate  *
350Sstevel@tonic-gate  * The current modus operandi is as follows:
360Sstevel@tonic-gate  *
370Sstevel@tonic-gate  *   Object binding: register the appropriate callback for each page
380Sstevel@tonic-gate  *     of the kernel object while obtaining the PFN for the DVMA page.
390Sstevel@tonic-gate  *
400Sstevel@tonic-gate  *   Object unbinding: unregister the callback for each page of the
410Sstevel@tonic-gate  *     kernel object.
420Sstevel@tonic-gate  *
430Sstevel@tonic-gate  *   Relocation request:
440Sstevel@tonic-gate  *     1) Suspend the bus and sync the caches.
450Sstevel@tonic-gate  *     2) Remap the DVMA object using the new provided PFN.
460Sstevel@tonic-gate  *     3) Unsuspend the bus.
470Sstevel@tonic-gate  *
480Sstevel@tonic-gate  *  The relocation code runs with CPUs captured (idling in xc_loop())
490Sstevel@tonic-gate  *  so we can only acquire spinlocks at PIL >= 13 for synchronization
500Sstevel@tonic-gate  *  within those codepaths.
510Sstevel@tonic-gate  */
520Sstevel@tonic-gate #include <sys/types.h>
530Sstevel@tonic-gate #include <sys/kmem.h>
540Sstevel@tonic-gate #include <sys/async.h>
550Sstevel@tonic-gate #include <sys/sysmacros.h>
560Sstevel@tonic-gate #include <sys/sunddi.h>
570Sstevel@tonic-gate #include <sys/machsystm.h>
580Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
590Sstevel@tonic-gate #include <sys/dvma.h>
600Sstevel@tonic-gate #include <vm/hat.h>
610Sstevel@tonic-gate #include <sys/pci/pci_obj.h>
620Sstevel@tonic-gate 
630Sstevel@tonic-gate /*LINTLIBRARY*/
640Sstevel@tonic-gate 
650Sstevel@tonic-gate void
pci_dvma_unregister_callbacks(pci_t * pci_p,ddi_dma_impl_t * mp)660Sstevel@tonic-gate pci_dvma_unregister_callbacks(pci_t *pci_p, ddi_dma_impl_t *mp)
670Sstevel@tonic-gate {
680Sstevel@tonic-gate 	ddi_dma_obj_t *dobj_p = &mp->dmai_object;
690Sstevel@tonic-gate 	struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as;
700Sstevel@tonic-gate 	page_t **pplist = dobj_p->dmao_obj.virt_obj.v_priv;
710Sstevel@tonic-gate 	caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr;
720Sstevel@tonic-gate 	struct hat *hat_p;
730Sstevel@tonic-gate 	uint32_t offset;
740Sstevel@tonic-gate 	int i;
750Sstevel@tonic-gate 
760Sstevel@tonic-gate 	if (!PCI_DMA_CANRELOC(mp))
770Sstevel@tonic-gate 		return;
780Sstevel@tonic-gate 
790Sstevel@tonic-gate 	hat_p = (as_p == NULL)? kas.a_hat : as_p->a_hat;
800Sstevel@tonic-gate 	ASSERT(hat_p == kas.a_hat);
810Sstevel@tonic-gate 	ASSERT(pplist == NULL);
820Sstevel@tonic-gate 
830Sstevel@tonic-gate 	offset = mp->dmai_roffset;
84*2251Selowe 	hat_delete_callback(vaddr, IOMMU_PAGE_SIZE - offset, mp, HAC_PAGELOCK,
85*2251Selowe 	    MP_HAT_CB_COOKIE(mp, 0));
860Sstevel@tonic-gate 	vaddr = (caddr_t)(((uintptr_t)vaddr + IOMMU_PAGE_SIZE) &
870Sstevel@tonic-gate 	    IOMMU_PAGE_MASK);
880Sstevel@tonic-gate 	for (i = 1; i < mp->dmai_ndvmapages; i++) {
89*2251Selowe 		hat_delete_callback(vaddr, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK,
90*2251Selowe 		    MP_HAT_CB_COOKIE(mp, i));
910Sstevel@tonic-gate 		vaddr += IOMMU_PAGE_SIZE;
920Sstevel@tonic-gate 	}
930Sstevel@tonic-gate 	mp->dmai_flags &= ~DMAI_FLAGS_RELOC;
940Sstevel@tonic-gate }
950Sstevel@tonic-gate 
960Sstevel@tonic-gate static int
pci_dvma_postrelocator(caddr_t va,uint_t len,uint_t flags,void * mpvoid,pfn_t newpfn)970Sstevel@tonic-gate pci_dvma_postrelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid,
980Sstevel@tonic-gate 	pfn_t newpfn)
990Sstevel@tonic-gate {
1000Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
1010Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
1020Sstevel@tonic-gate 	ddi_dma_obj_t *dobj_p = &mp->dmai_object;
1030Sstevel@tonic-gate 	page_t **pplist = dobj_p->dmao_obj.virt_obj.v_priv;
1040Sstevel@tonic-gate 	caddr_t baseva = dobj_p->dmao_obj.virt_obj.v_addr;
1050Sstevel@tonic-gate 	int index;
1060Sstevel@tonic-gate 	size_t length = IOMMU_PTOB(1);
1070Sstevel@tonic-gate 	off_t offset;
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "postrelocator called\n");
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate 	if (flags == HAT_POSTUNSUSPEND) {
1120Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
1130Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
1140Sstevel@tonic-gate 		ASSERT(pci_reloc_presuspend > 0);
1150Sstevel@tonic-gate 		if (--pci_reloc_presuspend == 0) {
1160Sstevel@tonic-gate 			pci_reloc_thread = NULL;
1170Sstevel@tonic-gate 			cv_broadcast(&pci_reloc_cv);
1180Sstevel@tonic-gate 		}
1190Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
1200Sstevel@tonic-gate 		return (0);
1210Sstevel@tonic-gate 	}
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	ASSERT(flags == HAT_UNSUSPEND);
1240Sstevel@tonic-gate 	ASSERT(pci_reloc_suspend > 0);
1250Sstevel@tonic-gate 	pci_reloc_suspend--;
1260Sstevel@tonic-gate 
1270Sstevel@tonic-gate 	ASSERT(len <= length);
1280Sstevel@tonic-gate 	ASSERT(pplist == NULL);	/* addr bind handle only */
1290Sstevel@tonic-gate 	ASSERT(dobj_p->dmao_obj.virt_obj.v_as == &kas ||
1300Sstevel@tonic-gate 	    dobj_p->dmao_obj.virt_obj.v_as == NULL);
1310Sstevel@tonic-gate 	ASSERT(PCI_DMA_ISDVMA(mp));
1320Sstevel@tonic-gate 	ASSERT(pci_reloc_thread == curthread);
1330Sstevel@tonic-gate 
1340Sstevel@tonic-gate 	offset = va - baseva;
1350Sstevel@tonic-gate 	index = IOMMU_BTOPR(offset);
1360Sstevel@tonic-gate 	ASSERT(index < mp->dmai_ndvmapages);
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate 	DEBUG3(DBG_RELOC, rdip, "index 0x%x, vaddr 0x%llx, baseva 0x%llx\n",
1390Sstevel@tonic-gate 	    index, (int64_t)va, (int64_t)baseva);
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate 	if ((mp)->dmai_ndvmapages == 1) {
1420Sstevel@tonic-gate 		DEBUG2(DBG_RELOC, rdip, "pfn remap (1) 0x%x -> 0x%x\n",
1430Sstevel@tonic-gate 		    mp->dmai_pfnlst, newpfn);
1440Sstevel@tonic-gate 		    mp->dmai_pfnlst = (void *)newpfn;
1450Sstevel@tonic-gate 	} else {
1460Sstevel@tonic-gate 		DEBUG3(DBG_RELOC, rdip, "pfn remap (%d) 0x%x -> 0x%x\n",
1470Sstevel@tonic-gate 		    index, ((iopfn_t *)mp->dmai_pfnlst)[index], newpfn);
1480Sstevel@tonic-gate 		((iopfn_t *)mp->dmai_pfnlst)[index] = (iopfn_t)newpfn;
1490Sstevel@tonic-gate 	}
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	if (ddi_dma_mctl(rdip, rdip, (ddi_dma_handle_t)mp, DDI_DMA_REMAP,
1520Sstevel@tonic-gate 	    &offset, &length, NULL, 0) != DDI_SUCCESS)
1530Sstevel@tonic-gate 		return (EIO);
1540Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_UNQUIESCE, NULL, NULL) !=
1550Sstevel@tonic-gate 	    DDI_SUCCESS)
1560Sstevel@tonic-gate 		return (EIO);
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 	return (0);
1590Sstevel@tonic-gate }
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate /*
1620Sstevel@tonic-gate  * Log a warning message if a callback is still registered on
1630Sstevel@tonic-gate  * a page which is being freed.  This is indicative of a driver
1640Sstevel@tonic-gate  * bug -- DMA handles are bound, and the memory is being freed by
1650Sstevel@tonic-gate  * the VM subsystem without an unbind call on the handle first.
1660Sstevel@tonic-gate  */
1670Sstevel@tonic-gate static int
pci_dma_relocerr(caddr_t va,uint_t len,uint_t errorcode,void * mpvoid)1680Sstevel@tonic-gate pci_dma_relocerr(caddr_t va, uint_t len, uint_t errorcode, void *mpvoid)
1690Sstevel@tonic-gate {
1700Sstevel@tonic-gate 	int errlevel = pci_dma_panic_on_leak? CE_PANIC : CE_WARN;
1710Sstevel@tonic-gate 	if (errorcode == HAT_CB_ERR_LEAKED) {
1720Sstevel@tonic-gate 		cmn_err(errlevel, "object 0x%p has a bound DMA handle 0x%p\n",
1730Sstevel@tonic-gate 			va, mpvoid);
1740Sstevel@tonic-gate 		return (0);
1750Sstevel@tonic-gate 	}
1760Sstevel@tonic-gate 
1770Sstevel@tonic-gate 	/* unknown error code, unhandled so panic */
1780Sstevel@tonic-gate 	return (EINVAL);
1790Sstevel@tonic-gate }
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate /*
1820Sstevel@tonic-gate  * pci DVMA remap entry points
1830Sstevel@tonic-gate  *
1840Sstevel@tonic-gate  * Called in response to a DDI_DMA_REMAP DMA ctlops command.
1850Sstevel@tonic-gate  * Remaps the region specified in the underlying IOMMU. Safe
1860Sstevel@tonic-gate  * to assume that the bus was quiesced and ddi_dma_sync() was
1870Sstevel@tonic-gate  * invoked by the caller before we got to this point.
1880Sstevel@tonic-gate  */
1890Sstevel@tonic-gate int
pci_dvma_remap(dev_info_t * dip,dev_info_t * rdip,ddi_dma_impl_t * mp,off_t offset,size_t length)1900Sstevel@tonic-gate pci_dvma_remap(dev_info_t *dip, dev_info_t *rdip, ddi_dma_impl_t *mp,
1910Sstevel@tonic-gate 	off_t offset, size_t length)
1920Sstevel@tonic-gate {
1930Sstevel@tonic-gate 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
1940Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
1950Sstevel@tonic-gate 	dvma_addr_t dvma_pg;
1960Sstevel@tonic-gate 	size_t npgs;
1970Sstevel@tonic-gate 	int idx;
1980Sstevel@tonic-gate 
1990Sstevel@tonic-gate 	dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
2000Sstevel@tonic-gate 	idx = IOMMU_BTOPR(offset);
2010Sstevel@tonic-gate 	dvma_pg += idx;
2020Sstevel@tonic-gate 	npgs = IOMMU_BTOPR(length);
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	DEBUG3(DBG_RELOC, mp->dmai_rdip,
2050Sstevel@tonic-gate 	    "pci_dvma_remap: dvma_pg 0x%llx len 0x%llx idx 0x%x\n",
2060Sstevel@tonic-gate 	    dvma_pg, length, idx);
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 	ASSERT(pci_p->pci_pbm_p->pbm_quiesce_count > 0);
2090Sstevel@tonic-gate 	iommu_remap_pages(iommu_p, mp, dvma_pg, npgs, idx);
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 	return (DDI_SUCCESS);
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate void
pci_fdvma_remap(ddi_dma_impl_t * mp,caddr_t kvaddr,dvma_addr_t dvma_pg,size_t npages,size_t index,pfn_t newpfn)2150Sstevel@tonic-gate pci_fdvma_remap(ddi_dma_impl_t *mp, caddr_t kvaddr, dvma_addr_t dvma_pg,
2160Sstevel@tonic-gate 	size_t npages, size_t index, pfn_t newpfn)
2170Sstevel@tonic-gate {
2180Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
2190Sstevel@tonic-gate 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
2200Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2210Sstevel@tonic-gate 	dev_info_t *dip = pci_p->pci_dip;
2220Sstevel@tonic-gate 	iopfn_t pfn = (iopfn_t)newpfn;
2230Sstevel@tonic-gate 	dvma_addr_t pg_index = dvma_pg - iommu_p->dvma_base_pg;
2240Sstevel@tonic-gate 	int i;
2250Sstevel@tonic-gate 	uint64_t tte;
2260Sstevel@tonic-gate 
2270Sstevel@tonic-gate 	/* make sure we don't exceed reserved boundary */
2280Sstevel@tonic-gate 	DEBUG3(DBG_FAST_DVMA, dip, "fast remap index=%x: %p, npgs=%x", index,
2290Sstevel@tonic-gate 	    kvaddr, npages);
2300Sstevel@tonic-gate 	if (index + npages > mp->dmai_ndvmapages) {
2310Sstevel@tonic-gate 		cmn_err(pci_panic_on_fatal_errors ? CE_PANIC : CE_WARN,
232946Smathue 			"%s%d: fdvma remap index(%lx)+pgs(%lx) exceeds limit\n",
2330Sstevel@tonic-gate 			ddi_driver_name(dip), ddi_get_instance(dip),
2340Sstevel@tonic-gate 			index, npages);
2350Sstevel@tonic-gate 		return;
2360Sstevel@tonic-gate 	}
2370Sstevel@tonic-gate 
2380Sstevel@tonic-gate 	for (i = 0; i < npages; i++, kvaddr += IOMMU_PAGE_SIZE) {
2390Sstevel@tonic-gate 		DEBUG3(DBG_FAST_DVMA, dip, "remap dvma_pg %x -> pfn %x,"
2400Sstevel@tonic-gate 		    " old tte 0x%llx\n", dvma_pg + i, pfn,
2410Sstevel@tonic-gate 		    iommu_p->iommu_tsb_vaddr[pg_index + i]);
2420Sstevel@tonic-gate 
2430Sstevel@tonic-gate 		if (pfn == PFN_INVALID)
2440Sstevel@tonic-gate 			goto bad_pfn;
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate 		if (i == 0)
2470Sstevel@tonic-gate 			tte = MAKE_TTE_TEMPLATE(pfn, mp);
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 		/* XXX assumes iommu and mmu has same page size */
2500Sstevel@tonic-gate 		iommu_p->iommu_tsb_vaddr[pg_index + i] = tte | IOMMU_PTOB(pfn);
2510Sstevel@tonic-gate 		IOMMU_PAGE_FLUSH(iommu_p, (dvma_pg + i));
2520Sstevel@tonic-gate 	}
2530Sstevel@tonic-gate 	return;
2540Sstevel@tonic-gate bad_pfn:
2550Sstevel@tonic-gate 	cmn_err(CE_WARN, "%s%d: fdvma remap can't get page frame for vaddr %p",
2560Sstevel@tonic-gate 		ddi_driver_name(dip), ddi_get_instance(dip), kvaddr);
2570Sstevel@tonic-gate }
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate static int
pci_fdvma_prerelocator(caddr_t va,uint_t len,uint_t flags,void * mpvoid)2600Sstevel@tonic-gate pci_fdvma_prerelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid)
2610Sstevel@tonic-gate {
2620Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
2630Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
2640Sstevel@tonic-gate 	caddr_t baseva, endva;
2650Sstevel@tonic-gate 	int i;
2660Sstevel@tonic-gate 
2670Sstevel@tonic-gate 	/*
2680Sstevel@tonic-gate 	 * It isn't safe to do relocation if all of the IOMMU
2690Sstevel@tonic-gate 	 * mappings haven't yet been established at this index.
2700Sstevel@tonic-gate 	 */
2710Sstevel@tonic-gate 	for (i = 0; i < mp->dmai_ndvmapages; i++) {
2720Sstevel@tonic-gate 		baseva = fdvma_p->kvbase[i];
2730Sstevel@tonic-gate 		endva = baseva + IOMMU_PTOB(fdvma_p->pagecnt[i]);
2740Sstevel@tonic-gate 		if (va >= baseva && va < endva)
2750Sstevel@tonic-gate 			return (0);	/* found a valid index */
2760Sstevel@tonic-gate 	}
2770Sstevel@tonic-gate 	return (EAGAIN);
2780Sstevel@tonic-gate }
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate static int
pci_fdvma_postrelocator(caddr_t va,uint_t len,uint_t flags,void * mpvoid,pfn_t pfn)2810Sstevel@tonic-gate pci_fdvma_postrelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid,
2820Sstevel@tonic-gate 	pfn_t pfn)
2830Sstevel@tonic-gate {
2840Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
2850Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
2860Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
2870Sstevel@tonic-gate 	caddr_t baseva;
2880Sstevel@tonic-gate 	dvma_addr_t dvma_pg;
2890Sstevel@tonic-gate 	size_t length = PAGESIZE;
2900Sstevel@tonic-gate 	int i;
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "fdvma postrelocator called\n");
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate 	if (flags == HAT_POSTUNSUSPEND) {
2950Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
2960Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
2970Sstevel@tonic-gate 		if (--pci_reloc_presuspend == 0) {
2980Sstevel@tonic-gate 			pci_reloc_thread = NULL;
2990Sstevel@tonic-gate 			cv_broadcast(&pci_reloc_cv);
3000Sstevel@tonic-gate 		}
3010Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
3020Sstevel@tonic-gate 		return (0);
3030Sstevel@tonic-gate 	}
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	pci_reloc_suspend--;
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate 	ASSERT(flags == HAT_UNSUSPEND);
3080Sstevel@tonic-gate 	ASSERT(len <= length);
3090Sstevel@tonic-gate 	ASSERT((mp->dmai_rflags & DMP_BYPASSNEXUS) != 0);
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	/*
3120Sstevel@tonic-gate 	 * This virtual page can have multiple cookies that refer
3130Sstevel@tonic-gate 	 * to it within the same handle. We must walk the whole
3140Sstevel@tonic-gate 	 * table for this DMA handle finding all the cookies, and
3150Sstevel@tonic-gate 	 * update all of them. Sigh.
3160Sstevel@tonic-gate 	 */
3170Sstevel@tonic-gate 	for (i = 0; i < mp->dmai_ndvmapages; i++) {
3180Sstevel@tonic-gate 		caddr_t endva;
3190Sstevel@tonic-gate 		int index;
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate 		baseva = fdvma_p->kvbase[i];
3220Sstevel@tonic-gate 		endva = baseva + IOMMU_PTOB(fdvma_p->pagecnt[i]);
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate 		if (va >= baseva && va < endva) {
3250Sstevel@tonic-gate 			index = i + IOMMU_BTOP(va - baseva);
3260Sstevel@tonic-gate 			ASSERT(index < mp->dmai_ndvmapages);
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate 			DEBUG4(DBG_RELOC, rdip, "mp %p: index 0x%x, "
3290Sstevel@tonic-gate 			    " vaddr 0x%llx, baseva 0x%llx\n", mp, index,
3300Sstevel@tonic-gate 			    (int64_t)va, (int64_t)baseva);
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate 			dvma_pg = IOMMU_BTOP(mp->dmai_mapping) + index;
3330Sstevel@tonic-gate 			pci_fdvma_remap(mp, va, dvma_pg, IOMMU_BTOP(length),
3340Sstevel@tonic-gate 			    index, pfn);
3350Sstevel@tonic-gate 		}
3360Sstevel@tonic-gate 	}
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_UNQUIESCE, NULL, NULL) !=
3390Sstevel@tonic-gate 	    DDI_SUCCESS)
3400Sstevel@tonic-gate 		return (EIO);
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate 	return (0);
3430Sstevel@tonic-gate }
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate void
pci_fdvma_unregister_callbacks(pci_t * pci_p,fdvma_t * fdvma_p,ddi_dma_impl_t * mp,uint_t index)3460Sstevel@tonic-gate pci_fdvma_unregister_callbacks(pci_t *pci_p, fdvma_t *fdvma_p,
3470Sstevel@tonic-gate 	ddi_dma_impl_t *mp, uint_t index)
3480Sstevel@tonic-gate {
3490Sstevel@tonic-gate 	size_t npgs = fdvma_p->pagecnt[index];
3500Sstevel@tonic-gate 	caddr_t kva = fdvma_p->kvbase[index];
3510Sstevel@tonic-gate 	int i;
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate 	ASSERT(index + npgs <= mp->dmai_ndvmapages);
3540Sstevel@tonic-gate 	ASSERT(kva != NULL);
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate 	for (i = 0; i < npgs && pci_dvma_remap_enabled;
3570Sstevel@tonic-gate 	    i++, kva += IOMMU_PAGE_SIZE)
358*2251Selowe 		hat_delete_callback(kva, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK,
359*2251Selowe 		    fdvma_p->cbcookie[index + i]);
3600Sstevel@tonic-gate }
3610Sstevel@tonic-gate 
3620Sstevel@tonic-gate static int
pci_common_prerelocator(caddr_t va,uint_t len,uint_t flags,void * mpvoid)3630Sstevel@tonic-gate pci_common_prerelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid)
3640Sstevel@tonic-gate {
3650Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
3660Sstevel@tonic-gate 	ddi_dma_handle_t h = (ddi_dma_handle_t)mpvoid;
3670Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
3680Sstevel@tonic-gate 	int ret;
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "prerelocator called\n");
3710Sstevel@tonic-gate 
3720Sstevel@tonic-gate 	if (flags == HAT_PRESUSPEND) {
3730Sstevel@tonic-gate 		if (!ddi_prop_exists(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM,
3740Sstevel@tonic-gate 		    "dvma-remap-supported"))
3750Sstevel@tonic-gate 			return (ENOTSUP);
3760Sstevel@tonic-gate 		if (!PCI_DMA_ISMAPPED(mp))
3770Sstevel@tonic-gate 			return (EAGAIN);
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
3800Sstevel@tonic-gate 			ret = pci_fdvma_prerelocator(va, len, flags, mpvoid);
3810Sstevel@tonic-gate 			if (ret != 0)
3820Sstevel@tonic-gate 				return (ret);
3830Sstevel@tonic-gate 		} else if (!PCI_DMA_ISDVMA(mp))
3840Sstevel@tonic-gate 			return (EINVAL);
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 		/*
3870Sstevel@tonic-gate 		 * Acquire the exclusive right to relocate a PCI DMA page,
3880Sstevel@tonic-gate 		 * since we later have to pause CPUs which could otherwise
3890Sstevel@tonic-gate 		 * lead to all sorts of synchronization headaches.
3900Sstevel@tonic-gate 		 */
3910Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
3920Sstevel@tonic-gate 		if (pci_reloc_thread != curthread) {
3930Sstevel@tonic-gate 			while (pci_reloc_thread != NULL) {
3940Sstevel@tonic-gate 				cv_wait(&pci_reloc_cv, &pci_reloc_mutex);
3950Sstevel@tonic-gate 			}
3960Sstevel@tonic-gate 			pci_reloc_thread = curthread;
3970Sstevel@tonic-gate 			ASSERT(pci_reloc_suspend == 0);
3980Sstevel@tonic-gate 		}
3990Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
4020Sstevel@tonic-gate 		pci_reloc_presuspend++;
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate 		return (0);
4050Sstevel@tonic-gate 	}
4060Sstevel@tonic-gate 
4070Sstevel@tonic-gate 	ASSERT(flags == HAT_SUSPEND);
4080Sstevel@tonic-gate 	ASSERT(PCI_DMA_CANRELOC(mp));
4090Sstevel@tonic-gate 	ASSERT(pci_reloc_thread == curthread);
4100Sstevel@tonic-gate 	pci_reloc_suspend++;
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_QUIESCE, NULL, NULL) !=
4130Sstevel@tonic-gate 	    DDI_SUCCESS)
4140Sstevel@tonic-gate 		return (EIO);
4150Sstevel@tonic-gate 	if (ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4160Sstevel@tonic-gate 		return (EIO);
4170Sstevel@tonic-gate 
4180Sstevel@tonic-gate 	return (0);
4190Sstevel@tonic-gate }
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate /*
4220Sstevel@tonic-gate  * Register two callback types: one for normal DVMA and the
4230Sstevel@tonic-gate  * other for fast DVMA, since each method has a different way
4240Sstevel@tonic-gate  * of tracking the PFNs behind a handle.
4250Sstevel@tonic-gate  */
4260Sstevel@tonic-gate void
pci_reloc_init(void)4270Sstevel@tonic-gate pci_reloc_init(void)
4280Sstevel@tonic-gate {
429*2251Selowe 	int key = pci_reloc_getkey();
430*2251Selowe 
4310Sstevel@tonic-gate 	mutex_init(&pci_reloc_mutex, NULL, MUTEX_DEFAULT, NULL);
4320Sstevel@tonic-gate 	cv_init(&pci_reloc_cv, NULL, CV_DEFAULT, NULL);
433*2251Selowe 	pci_dvma_cbid = hat_register_callback(
434*2251Selowe 		key + ('D'<<24 | 'V'<<16 | 'M'<<8 | 'A'),
435670Selowe 		pci_common_prerelocator, pci_dvma_postrelocator,
436670Selowe 		pci_dma_relocerr, 1);
437670Selowe 	pci_fast_dvma_cbid = hat_register_callback(
438*2251Selowe 		key + ('F'<<24 | 'D'<<16 | 'M'<<8 | 'A'),
439*2251Selowe 		pci_common_prerelocator,
4400Sstevel@tonic-gate 		pci_fdvma_postrelocator, pci_dma_relocerr, 1);
4410Sstevel@tonic-gate }
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate void
pci_reloc_fini(void)4440Sstevel@tonic-gate pci_reloc_fini(void)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate 	cv_destroy(&pci_reloc_cv);
4470Sstevel@tonic-gate 	mutex_destroy(&pci_reloc_mutex);
4480Sstevel@tonic-gate }
449