xref: /onnv-gate/usr/src/uts/sun4u/io/pci/pci_reloc.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * PCI nexus DVMA relocation routines.
31*0Sstevel@tonic-gate  *
32*0Sstevel@tonic-gate  * These routines handle the interactions with the HAT layer to
33*0Sstevel@tonic-gate  * implement page relocation for page(s) which have active DMA handle
34*0Sstevel@tonic-gate  * bindings when DVMA is being used for those handles.
35*0Sstevel@tonic-gate  *
36*0Sstevel@tonic-gate  * The current modus operandi is as follows:
37*0Sstevel@tonic-gate  *
38*0Sstevel@tonic-gate  *   Object binding: register the appropriate callback for each page
39*0Sstevel@tonic-gate  *     of the kernel object while obtaining the PFN for the DVMA page.
40*0Sstevel@tonic-gate  *
41*0Sstevel@tonic-gate  *   Object unbinding: unregister the callback for each page of the
42*0Sstevel@tonic-gate  *     kernel object.
43*0Sstevel@tonic-gate  *
44*0Sstevel@tonic-gate  *   Relocation request:
45*0Sstevel@tonic-gate  *     1) Suspend the bus and sync the caches.
46*0Sstevel@tonic-gate  *     2) Remap the DVMA object using the new provided PFN.
47*0Sstevel@tonic-gate  *     3) Unsuspend the bus.
48*0Sstevel@tonic-gate  *
49*0Sstevel@tonic-gate  *  The relocation code runs with CPUs captured (idling in xc_loop())
50*0Sstevel@tonic-gate  *  so we can only acquire spinlocks at PIL >= 13 for synchronization
51*0Sstevel@tonic-gate  *  within those codepaths.
52*0Sstevel@tonic-gate  */
53*0Sstevel@tonic-gate #include <sys/types.h>
54*0Sstevel@tonic-gate #include <sys/kmem.h>
55*0Sstevel@tonic-gate #include <sys/async.h>
56*0Sstevel@tonic-gate #include <sys/sysmacros.h>
57*0Sstevel@tonic-gate #include <sys/sunddi.h>
58*0Sstevel@tonic-gate #include <sys/machsystm.h>
59*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
60*0Sstevel@tonic-gate #include <sys/dvma.h>
61*0Sstevel@tonic-gate #include <vm/hat.h>
62*0Sstevel@tonic-gate #include <sys/pci/pci_obj.h>
63*0Sstevel@tonic-gate 
64*0Sstevel@tonic-gate /*LINTLIBRARY*/
65*0Sstevel@tonic-gate 
66*0Sstevel@tonic-gate void
67*0Sstevel@tonic-gate pci_dvma_unregister_callbacks(pci_t *pci_p, ddi_dma_impl_t *mp)
68*0Sstevel@tonic-gate {
69*0Sstevel@tonic-gate 	ddi_dma_obj_t *dobj_p = &mp->dmai_object;
70*0Sstevel@tonic-gate 	struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as;
71*0Sstevel@tonic-gate 	page_t **pplist = dobj_p->dmao_obj.virt_obj.v_priv;
72*0Sstevel@tonic-gate 	caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr;
73*0Sstevel@tonic-gate 	struct hat *hat_p;
74*0Sstevel@tonic-gate 	uint32_t offset;
75*0Sstevel@tonic-gate 	int i;
76*0Sstevel@tonic-gate 
77*0Sstevel@tonic-gate 	if (!PCI_DMA_CANRELOC(mp))
78*0Sstevel@tonic-gate 		return;
79*0Sstevel@tonic-gate 
80*0Sstevel@tonic-gate 	hat_p = (as_p == NULL)? kas.a_hat : as_p->a_hat;
81*0Sstevel@tonic-gate 	ASSERT(hat_p == kas.a_hat);
82*0Sstevel@tonic-gate 	ASSERT(pplist == NULL);
83*0Sstevel@tonic-gate 
84*0Sstevel@tonic-gate 	offset = mp->dmai_roffset;
85*0Sstevel@tonic-gate 	hat_delete_callback(vaddr, IOMMU_PAGE_SIZE - offset, mp, HAC_PAGELOCK);
86*0Sstevel@tonic-gate 	vaddr = (caddr_t)(((uintptr_t)vaddr + IOMMU_PAGE_SIZE) &
87*0Sstevel@tonic-gate 	    IOMMU_PAGE_MASK);
88*0Sstevel@tonic-gate 	for (i = 1; i < mp->dmai_ndvmapages; i++) {
89*0Sstevel@tonic-gate 		hat_delete_callback(vaddr, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK);
90*0Sstevel@tonic-gate 		vaddr += IOMMU_PAGE_SIZE;
91*0Sstevel@tonic-gate 	}
92*0Sstevel@tonic-gate 	mp->dmai_flags &= ~DMAI_FLAGS_RELOC;
93*0Sstevel@tonic-gate }
94*0Sstevel@tonic-gate 
95*0Sstevel@tonic-gate static int
96*0Sstevel@tonic-gate pci_dvma_postrelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid,
97*0Sstevel@tonic-gate 	pfn_t newpfn)
98*0Sstevel@tonic-gate {
99*0Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
100*0Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
101*0Sstevel@tonic-gate 	ddi_dma_obj_t *dobj_p = &mp->dmai_object;
102*0Sstevel@tonic-gate 	page_t **pplist = dobj_p->dmao_obj.virt_obj.v_priv;
103*0Sstevel@tonic-gate 	caddr_t baseva = dobj_p->dmao_obj.virt_obj.v_addr;
104*0Sstevel@tonic-gate 	int index;
105*0Sstevel@tonic-gate 	size_t length = IOMMU_PTOB(1);
106*0Sstevel@tonic-gate 	off_t offset;
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "postrelocator called\n");
109*0Sstevel@tonic-gate 
110*0Sstevel@tonic-gate 	if (flags == HAT_POSTUNSUSPEND) {
111*0Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
112*0Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
113*0Sstevel@tonic-gate 		ASSERT(pci_reloc_presuspend > 0);
114*0Sstevel@tonic-gate 		if (--pci_reloc_presuspend == 0) {
115*0Sstevel@tonic-gate 			pci_reloc_thread = NULL;
116*0Sstevel@tonic-gate 			cv_broadcast(&pci_reloc_cv);
117*0Sstevel@tonic-gate 		}
118*0Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
119*0Sstevel@tonic-gate 		return (0);
120*0Sstevel@tonic-gate 	}
121*0Sstevel@tonic-gate 
122*0Sstevel@tonic-gate 	ASSERT(flags == HAT_UNSUSPEND);
123*0Sstevel@tonic-gate 	ASSERT(pci_reloc_suspend > 0);
124*0Sstevel@tonic-gate 	pci_reloc_suspend--;
125*0Sstevel@tonic-gate 
126*0Sstevel@tonic-gate 	ASSERT(len <= length);
127*0Sstevel@tonic-gate 	ASSERT(pplist == NULL);	/* addr bind handle only */
128*0Sstevel@tonic-gate 	ASSERT(dobj_p->dmao_obj.virt_obj.v_as == &kas ||
129*0Sstevel@tonic-gate 	    dobj_p->dmao_obj.virt_obj.v_as == NULL);
130*0Sstevel@tonic-gate 	ASSERT(PCI_DMA_ISDVMA(mp));
131*0Sstevel@tonic-gate 	ASSERT(pci_reloc_thread == curthread);
132*0Sstevel@tonic-gate 
133*0Sstevel@tonic-gate 	offset = va - baseva;
134*0Sstevel@tonic-gate 	index = IOMMU_BTOPR(offset);
135*0Sstevel@tonic-gate 	ASSERT(index < mp->dmai_ndvmapages);
136*0Sstevel@tonic-gate 
137*0Sstevel@tonic-gate 	DEBUG3(DBG_RELOC, rdip, "index 0x%x, vaddr 0x%llx, baseva 0x%llx\n",
138*0Sstevel@tonic-gate 	    index, (int64_t)va, (int64_t)baseva);
139*0Sstevel@tonic-gate 
140*0Sstevel@tonic-gate 	if ((mp)->dmai_ndvmapages == 1) {
141*0Sstevel@tonic-gate 		DEBUG2(DBG_RELOC, rdip, "pfn remap (1) 0x%x -> 0x%x\n",
142*0Sstevel@tonic-gate 		    mp->dmai_pfnlst, newpfn);
143*0Sstevel@tonic-gate 		    mp->dmai_pfnlst = (void *)newpfn;
144*0Sstevel@tonic-gate 	} else {
145*0Sstevel@tonic-gate 		DEBUG3(DBG_RELOC, rdip, "pfn remap (%d) 0x%x -> 0x%x\n",
146*0Sstevel@tonic-gate 		    index, ((iopfn_t *)mp->dmai_pfnlst)[index], newpfn);
147*0Sstevel@tonic-gate 		((iopfn_t *)mp->dmai_pfnlst)[index] = (iopfn_t)newpfn;
148*0Sstevel@tonic-gate 	}
149*0Sstevel@tonic-gate 
150*0Sstevel@tonic-gate 	if (ddi_dma_mctl(rdip, rdip, (ddi_dma_handle_t)mp, DDI_DMA_REMAP,
151*0Sstevel@tonic-gate 	    &offset, &length, NULL, 0) != DDI_SUCCESS)
152*0Sstevel@tonic-gate 		return (EIO);
153*0Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_UNQUIESCE, NULL, NULL) !=
154*0Sstevel@tonic-gate 	    DDI_SUCCESS)
155*0Sstevel@tonic-gate 		return (EIO);
156*0Sstevel@tonic-gate 
157*0Sstevel@tonic-gate 	return (0);
158*0Sstevel@tonic-gate }
159*0Sstevel@tonic-gate 
160*0Sstevel@tonic-gate /*
161*0Sstevel@tonic-gate  * Log a warning message if a callback is still registered on
162*0Sstevel@tonic-gate  * a page which is being freed.  This is indicative of a driver
163*0Sstevel@tonic-gate  * bug -- DMA handles are bound, and the memory is being freed by
164*0Sstevel@tonic-gate  * the VM subsystem without an unbind call on the handle first.
165*0Sstevel@tonic-gate  */
166*0Sstevel@tonic-gate static int
167*0Sstevel@tonic-gate pci_dma_relocerr(caddr_t va, uint_t len, uint_t errorcode, void *mpvoid)
168*0Sstevel@tonic-gate {
169*0Sstevel@tonic-gate 	int errlevel = pci_dma_panic_on_leak? CE_PANIC : CE_WARN;
170*0Sstevel@tonic-gate 	if (errorcode == HAT_CB_ERR_LEAKED) {
171*0Sstevel@tonic-gate 		cmn_err(errlevel, "object 0x%p has a bound DMA handle 0x%p\n",
172*0Sstevel@tonic-gate 			va, mpvoid);
173*0Sstevel@tonic-gate 		return (0);
174*0Sstevel@tonic-gate 	}
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate 	/* unknown error code, unhandled so panic */
177*0Sstevel@tonic-gate 	return (EINVAL);
178*0Sstevel@tonic-gate }
179*0Sstevel@tonic-gate 
180*0Sstevel@tonic-gate /*
181*0Sstevel@tonic-gate  * pci DVMA remap entry points
182*0Sstevel@tonic-gate  *
183*0Sstevel@tonic-gate  * Called in response to a DDI_DMA_REMAP DMA ctlops command.
184*0Sstevel@tonic-gate  * Remaps the region specified in the underlying IOMMU. Safe
185*0Sstevel@tonic-gate  * to assume that the bus was quiesced and ddi_dma_sync() was
186*0Sstevel@tonic-gate  * invoked by the caller before we got to this point.
187*0Sstevel@tonic-gate  */
188*0Sstevel@tonic-gate int
189*0Sstevel@tonic-gate pci_dvma_remap(dev_info_t *dip, dev_info_t *rdip, ddi_dma_impl_t *mp,
190*0Sstevel@tonic-gate 	off_t offset, size_t length)
191*0Sstevel@tonic-gate {
192*0Sstevel@tonic-gate 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
193*0Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
194*0Sstevel@tonic-gate 	dvma_addr_t dvma_pg;
195*0Sstevel@tonic-gate 	size_t npgs;
196*0Sstevel@tonic-gate 	int idx;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 	dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
199*0Sstevel@tonic-gate 	idx = IOMMU_BTOPR(offset);
200*0Sstevel@tonic-gate 	dvma_pg += idx;
201*0Sstevel@tonic-gate 	npgs = IOMMU_BTOPR(length);
202*0Sstevel@tonic-gate 
203*0Sstevel@tonic-gate 	DEBUG3(DBG_RELOC, mp->dmai_rdip,
204*0Sstevel@tonic-gate 	    "pci_dvma_remap: dvma_pg 0x%llx len 0x%llx idx 0x%x\n",
205*0Sstevel@tonic-gate 	    dvma_pg, length, idx);
206*0Sstevel@tonic-gate 
207*0Sstevel@tonic-gate 	ASSERT(pci_p->pci_pbm_p->pbm_quiesce_count > 0);
208*0Sstevel@tonic-gate 	iommu_remap_pages(iommu_p, mp, dvma_pg, npgs, idx);
209*0Sstevel@tonic-gate 
210*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
211*0Sstevel@tonic-gate }
212*0Sstevel@tonic-gate 
213*0Sstevel@tonic-gate void
214*0Sstevel@tonic-gate pci_fdvma_remap(ddi_dma_impl_t *mp, caddr_t kvaddr, dvma_addr_t dvma_pg,
215*0Sstevel@tonic-gate 	size_t npages, size_t index, pfn_t newpfn)
216*0Sstevel@tonic-gate {
217*0Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
218*0Sstevel@tonic-gate 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
219*0Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
220*0Sstevel@tonic-gate 	dev_info_t *dip = pci_p->pci_dip;
221*0Sstevel@tonic-gate 	iopfn_t pfn = (iopfn_t)newpfn;
222*0Sstevel@tonic-gate 	dvma_addr_t pg_index = dvma_pg - iommu_p->dvma_base_pg;
223*0Sstevel@tonic-gate 	int i;
224*0Sstevel@tonic-gate 	uint64_t tte;
225*0Sstevel@tonic-gate 
226*0Sstevel@tonic-gate 	/* make sure we don't exceed reserved boundary */
227*0Sstevel@tonic-gate 	DEBUG3(DBG_FAST_DVMA, dip, "fast remap index=%x: %p, npgs=%x", index,
228*0Sstevel@tonic-gate 	    kvaddr, npages);
229*0Sstevel@tonic-gate 	if (index + npages > mp->dmai_ndvmapages) {
230*0Sstevel@tonic-gate 		cmn_err(pci_panic_on_fatal_errors ? CE_PANIC : CE_WARN,
231*0Sstevel@tonic-gate 			"%s%d: fdvma remap index(%x)+pgs(%x) exceeds limit\n",
232*0Sstevel@tonic-gate 			ddi_driver_name(dip), ddi_get_instance(dip),
233*0Sstevel@tonic-gate 			index, npages);
234*0Sstevel@tonic-gate 		return;
235*0Sstevel@tonic-gate 	}
236*0Sstevel@tonic-gate 
237*0Sstevel@tonic-gate 	for (i = 0; i < npages; i++, kvaddr += IOMMU_PAGE_SIZE) {
238*0Sstevel@tonic-gate 		DEBUG3(DBG_FAST_DVMA, dip, "remap dvma_pg %x -> pfn %x,"
239*0Sstevel@tonic-gate 		    " old tte 0x%llx\n", dvma_pg + i, pfn,
240*0Sstevel@tonic-gate 		    iommu_p->iommu_tsb_vaddr[pg_index + i]);
241*0Sstevel@tonic-gate 
242*0Sstevel@tonic-gate 		if (pfn == PFN_INVALID)
243*0Sstevel@tonic-gate 			goto bad_pfn;
244*0Sstevel@tonic-gate 
245*0Sstevel@tonic-gate 		if (i == 0)
246*0Sstevel@tonic-gate 			tte = MAKE_TTE_TEMPLATE(pfn, mp);
247*0Sstevel@tonic-gate 
248*0Sstevel@tonic-gate 		/* XXX assumes iommu and mmu has same page size */
249*0Sstevel@tonic-gate 		iommu_p->iommu_tsb_vaddr[pg_index + i] = tte | IOMMU_PTOB(pfn);
250*0Sstevel@tonic-gate 		IOMMU_PAGE_FLUSH(iommu_p, (dvma_pg + i));
251*0Sstevel@tonic-gate 	}
252*0Sstevel@tonic-gate 	return;
253*0Sstevel@tonic-gate bad_pfn:
254*0Sstevel@tonic-gate 	cmn_err(CE_WARN, "%s%d: fdvma remap can't get page frame for vaddr %p",
255*0Sstevel@tonic-gate 		ddi_driver_name(dip), ddi_get_instance(dip), kvaddr);
256*0Sstevel@tonic-gate }
257*0Sstevel@tonic-gate 
258*0Sstevel@tonic-gate static int
259*0Sstevel@tonic-gate pci_fdvma_prerelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid)
260*0Sstevel@tonic-gate {
261*0Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
262*0Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
263*0Sstevel@tonic-gate 	caddr_t baseva, endva;
264*0Sstevel@tonic-gate 	int i;
265*0Sstevel@tonic-gate 
266*0Sstevel@tonic-gate 	/*
267*0Sstevel@tonic-gate 	 * It isn't safe to do relocation if all of the IOMMU
268*0Sstevel@tonic-gate 	 * mappings haven't yet been established at this index.
269*0Sstevel@tonic-gate 	 */
270*0Sstevel@tonic-gate 	for (i = 0; i < mp->dmai_ndvmapages; i++) {
271*0Sstevel@tonic-gate 		baseva = fdvma_p->kvbase[i];
272*0Sstevel@tonic-gate 		endva = baseva + IOMMU_PTOB(fdvma_p->pagecnt[i]);
273*0Sstevel@tonic-gate 		if (va >= baseva && va < endva)
274*0Sstevel@tonic-gate 			return (0);	/* found a valid index */
275*0Sstevel@tonic-gate 	}
276*0Sstevel@tonic-gate 	return (EAGAIN);
277*0Sstevel@tonic-gate }
278*0Sstevel@tonic-gate 
279*0Sstevel@tonic-gate static int
280*0Sstevel@tonic-gate pci_fdvma_postrelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid,
281*0Sstevel@tonic-gate 	pfn_t pfn)
282*0Sstevel@tonic-gate {
283*0Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
284*0Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
285*0Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
286*0Sstevel@tonic-gate 	caddr_t baseva;
287*0Sstevel@tonic-gate 	dvma_addr_t dvma_pg;
288*0Sstevel@tonic-gate 	size_t length = PAGESIZE;
289*0Sstevel@tonic-gate 	int i;
290*0Sstevel@tonic-gate 
291*0Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "fdvma postrelocator called\n");
292*0Sstevel@tonic-gate 
293*0Sstevel@tonic-gate 	if (flags == HAT_POSTUNSUSPEND) {
294*0Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
295*0Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
296*0Sstevel@tonic-gate 		if (--pci_reloc_presuspend == 0) {
297*0Sstevel@tonic-gate 			pci_reloc_thread = NULL;
298*0Sstevel@tonic-gate 			cv_broadcast(&pci_reloc_cv);
299*0Sstevel@tonic-gate 		}
300*0Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
301*0Sstevel@tonic-gate 		return (0);
302*0Sstevel@tonic-gate 	}
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 	pci_reloc_suspend--;
305*0Sstevel@tonic-gate 
306*0Sstevel@tonic-gate 	ASSERT(flags == HAT_UNSUSPEND);
307*0Sstevel@tonic-gate 	ASSERT(len <= length);
308*0Sstevel@tonic-gate 	ASSERT((mp->dmai_rflags & DMP_BYPASSNEXUS) != 0);
309*0Sstevel@tonic-gate 
310*0Sstevel@tonic-gate 	/*
311*0Sstevel@tonic-gate 	 * This virtual page can have multiple cookies that refer
312*0Sstevel@tonic-gate 	 * to it within the same handle. We must walk the whole
313*0Sstevel@tonic-gate 	 * table for this DMA handle finding all the cookies, and
314*0Sstevel@tonic-gate 	 * update all of them. Sigh.
315*0Sstevel@tonic-gate 	 */
316*0Sstevel@tonic-gate 	for (i = 0; i < mp->dmai_ndvmapages; i++) {
317*0Sstevel@tonic-gate 		caddr_t endva;
318*0Sstevel@tonic-gate 		int index;
319*0Sstevel@tonic-gate 
320*0Sstevel@tonic-gate 		baseva = fdvma_p->kvbase[i];
321*0Sstevel@tonic-gate 		endva = baseva + IOMMU_PTOB(fdvma_p->pagecnt[i]);
322*0Sstevel@tonic-gate 
323*0Sstevel@tonic-gate 		if (va >= baseva && va < endva) {
324*0Sstevel@tonic-gate 			index = i + IOMMU_BTOP(va - baseva);
325*0Sstevel@tonic-gate 			ASSERT(index < mp->dmai_ndvmapages);
326*0Sstevel@tonic-gate 
327*0Sstevel@tonic-gate 			DEBUG4(DBG_RELOC, rdip, "mp %p: index 0x%x, "
328*0Sstevel@tonic-gate 			    " vaddr 0x%llx, baseva 0x%llx\n", mp, index,
329*0Sstevel@tonic-gate 			    (int64_t)va, (int64_t)baseva);
330*0Sstevel@tonic-gate 
331*0Sstevel@tonic-gate 			dvma_pg = IOMMU_BTOP(mp->dmai_mapping) + index;
332*0Sstevel@tonic-gate 			pci_fdvma_remap(mp, va, dvma_pg, IOMMU_BTOP(length),
333*0Sstevel@tonic-gate 			    index, pfn);
334*0Sstevel@tonic-gate 		}
335*0Sstevel@tonic-gate 	}
336*0Sstevel@tonic-gate 
337*0Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_UNQUIESCE, NULL, NULL) !=
338*0Sstevel@tonic-gate 	    DDI_SUCCESS)
339*0Sstevel@tonic-gate 		return (EIO);
340*0Sstevel@tonic-gate 
341*0Sstevel@tonic-gate 	return (0);
342*0Sstevel@tonic-gate }
343*0Sstevel@tonic-gate 
344*0Sstevel@tonic-gate void
345*0Sstevel@tonic-gate pci_fdvma_unregister_callbacks(pci_t *pci_p, fdvma_t *fdvma_p,
346*0Sstevel@tonic-gate 	ddi_dma_impl_t *mp, uint_t index)
347*0Sstevel@tonic-gate {
348*0Sstevel@tonic-gate 	size_t npgs = fdvma_p->pagecnt[index];
349*0Sstevel@tonic-gate 	caddr_t kva = fdvma_p->kvbase[index];
350*0Sstevel@tonic-gate 	int i;
351*0Sstevel@tonic-gate 
352*0Sstevel@tonic-gate 	ASSERT(index + npgs <= mp->dmai_ndvmapages);
353*0Sstevel@tonic-gate 	ASSERT(kva != NULL);
354*0Sstevel@tonic-gate 
355*0Sstevel@tonic-gate 	for (i = 0; i < npgs && pci_dvma_remap_enabled;
356*0Sstevel@tonic-gate 	    i++, kva += IOMMU_PAGE_SIZE)
357*0Sstevel@tonic-gate 		hat_delete_callback(kva, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK);
358*0Sstevel@tonic-gate }
359*0Sstevel@tonic-gate 
360*0Sstevel@tonic-gate static int
361*0Sstevel@tonic-gate pci_common_prerelocator(caddr_t va, uint_t len, uint_t flags, void *mpvoid)
362*0Sstevel@tonic-gate {
363*0Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)mpvoid;
364*0Sstevel@tonic-gate 	ddi_dma_handle_t h = (ddi_dma_handle_t)mpvoid;
365*0Sstevel@tonic-gate 	dev_info_t *rdip = mp->dmai_rdip;
366*0Sstevel@tonic-gate 	int ret;
367*0Sstevel@tonic-gate 
368*0Sstevel@tonic-gate 	DEBUG0(DBG_RELOC, rdip, "prerelocator called\n");
369*0Sstevel@tonic-gate 
370*0Sstevel@tonic-gate 	if (flags == HAT_PRESUSPEND) {
371*0Sstevel@tonic-gate 		if (!ddi_prop_exists(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM,
372*0Sstevel@tonic-gate 		    "dvma-remap-supported"))
373*0Sstevel@tonic-gate 			return (ENOTSUP);
374*0Sstevel@tonic-gate 		if (!PCI_DMA_ISMAPPED(mp))
375*0Sstevel@tonic-gate 			return (EAGAIN);
376*0Sstevel@tonic-gate 
377*0Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
378*0Sstevel@tonic-gate 			ret = pci_fdvma_prerelocator(va, len, flags, mpvoid);
379*0Sstevel@tonic-gate 			if (ret != 0)
380*0Sstevel@tonic-gate 				return (ret);
381*0Sstevel@tonic-gate 		} else if (!PCI_DMA_ISDVMA(mp))
382*0Sstevel@tonic-gate 			return (EINVAL);
383*0Sstevel@tonic-gate 
384*0Sstevel@tonic-gate 		/*
385*0Sstevel@tonic-gate 		 * Acquire the exclusive right to relocate a PCI DMA page,
386*0Sstevel@tonic-gate 		 * since we later have to pause CPUs which could otherwise
387*0Sstevel@tonic-gate 		 * lead to all sorts of synchronization headaches.
388*0Sstevel@tonic-gate 		 */
389*0Sstevel@tonic-gate 		mutex_enter(&pci_reloc_mutex);
390*0Sstevel@tonic-gate 		if (pci_reloc_thread != curthread) {
391*0Sstevel@tonic-gate 			while (pci_reloc_thread != NULL) {
392*0Sstevel@tonic-gate 				cv_wait(&pci_reloc_cv, &pci_reloc_mutex);
393*0Sstevel@tonic-gate 			}
394*0Sstevel@tonic-gate 			pci_reloc_thread = curthread;
395*0Sstevel@tonic-gate 			ASSERT(pci_reloc_suspend == 0);
396*0Sstevel@tonic-gate 		}
397*0Sstevel@tonic-gate 		mutex_exit(&pci_reloc_mutex);
398*0Sstevel@tonic-gate 
399*0Sstevel@tonic-gate 		ASSERT(pci_reloc_thread == curthread);
400*0Sstevel@tonic-gate 		pci_reloc_presuspend++;
401*0Sstevel@tonic-gate 
402*0Sstevel@tonic-gate 		return (0);
403*0Sstevel@tonic-gate 	}
404*0Sstevel@tonic-gate 
405*0Sstevel@tonic-gate 	ASSERT(flags == HAT_SUSPEND);
406*0Sstevel@tonic-gate 	ASSERT(PCI_DMA_CANRELOC(mp));
407*0Sstevel@tonic-gate 	ASSERT(pci_reloc_thread == curthread);
408*0Sstevel@tonic-gate 	pci_reloc_suspend++;
409*0Sstevel@tonic-gate 
410*0Sstevel@tonic-gate 	if (ddi_ctlops(rdip, rdip, DDI_CTLOPS_QUIESCE, NULL, NULL) !=
411*0Sstevel@tonic-gate 	    DDI_SUCCESS)
412*0Sstevel@tonic-gate 		return (EIO);
413*0Sstevel@tonic-gate 	if (ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
414*0Sstevel@tonic-gate 		return (EIO);
415*0Sstevel@tonic-gate 
416*0Sstevel@tonic-gate 	return (0);
417*0Sstevel@tonic-gate }
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate /*
420*0Sstevel@tonic-gate  * Register two callback types: one for normal DVMA and the
421*0Sstevel@tonic-gate  * other for fast DVMA, since each method has a different way
422*0Sstevel@tonic-gate  * of tracking the PFNs behind a handle.
423*0Sstevel@tonic-gate  */
424*0Sstevel@tonic-gate void
425*0Sstevel@tonic-gate pci_reloc_init(void)
426*0Sstevel@tonic-gate {
427*0Sstevel@tonic-gate 	mutex_init(&pci_reloc_mutex, NULL, MUTEX_DEFAULT, NULL);
428*0Sstevel@tonic-gate 	cv_init(&pci_reloc_cv, NULL, CV_DEFAULT, NULL);
429*0Sstevel@tonic-gate 	pci_dvma_cbid = hat_register_callback(pci_common_prerelocator,
430*0Sstevel@tonic-gate 		pci_dvma_postrelocator, pci_dma_relocerr, 1);
431*0Sstevel@tonic-gate 	pci_fast_dvma_cbid = hat_register_callback(pci_common_prerelocator,
432*0Sstevel@tonic-gate 		pci_fdvma_postrelocator, pci_dma_relocerr, 1);
433*0Sstevel@tonic-gate }
434*0Sstevel@tonic-gate 
435*0Sstevel@tonic-gate void
436*0Sstevel@tonic-gate pci_reloc_fini(void)
437*0Sstevel@tonic-gate {
438*0Sstevel@tonic-gate 	cv_destroy(&pci_reloc_cv);
439*0Sstevel@tonic-gate 	mutex_destroy(&pci_reloc_mutex);
440*0Sstevel@tonic-gate }
441