xref: /onnv-gate/usr/src/uts/sun4u/io/pci/pci_iommu.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * PCI iommu initialization and configuration
31*0Sstevel@tonic-gate  */
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate #include <sys/types.h>
34*0Sstevel@tonic-gate #include <sys/kmem.h>
35*0Sstevel@tonic-gate #include <sys/async.h>
36*0Sstevel@tonic-gate #include <sys/sysmacros.h>
37*0Sstevel@tonic-gate #include <sys/sunddi.h>
38*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
39*0Sstevel@tonic-gate #include <sys/vmem.h>
40*0Sstevel@tonic-gate #include <sys/machsystm.h>	/* lddphys() */
41*0Sstevel@tonic-gate #include <sys/iommutsb.h>
42*0Sstevel@tonic-gate #include <sys/pci/pci_obj.h>
43*0Sstevel@tonic-gate 
44*0Sstevel@tonic-gate /*LINTLIBRARY*/
45*0Sstevel@tonic-gate 
46*0Sstevel@tonic-gate static void iommu_tlb_flushall(iommu_t *iommu_p);
47*0Sstevel@tonic-gate static void iommu_preserve_tsb(iommu_t *iommu_p);
48*0Sstevel@tonic-gate 
49*0Sstevel@tonic-gate void
50*0Sstevel@tonic-gate iommu_create(pci_t *pci_p)
51*0Sstevel@tonic-gate {
52*0Sstevel@tonic-gate 	dev_info_t *dip = pci_p->pci_dip;
53*0Sstevel@tonic-gate 	iommu_t *iommu_p;
54*0Sstevel@tonic-gate 	uintptr_t a;
55*0Sstevel@tonic-gate 	size_t cache_size;
56*0Sstevel@tonic-gate 	uint32_t tsb_entries;
57*0Sstevel@tonic-gate 
58*0Sstevel@tonic-gate 	char map_name[32];
59*0Sstevel@tonic-gate 	extern uint64_t va_to_pa(void *);
60*0Sstevel@tonic-gate 
61*0Sstevel@tonic-gate 	pci_dvma_range_prop_t	pci_dvma_range;
62*0Sstevel@tonic-gate 
63*0Sstevel@tonic-gate 	/*
64*0Sstevel@tonic-gate 	 * Allocate iommu state structure and link it to the
65*0Sstevel@tonic-gate 	 * pci state structure.
66*0Sstevel@tonic-gate 	 */
67*0Sstevel@tonic-gate 	iommu_p = (iommu_t *)kmem_zalloc(sizeof (iommu_t), KM_SLEEP);
68*0Sstevel@tonic-gate 	pci_p->pci_iommu_p = iommu_p;
69*0Sstevel@tonic-gate 	iommu_p->iommu_pci_p = pci_p;
70*0Sstevel@tonic-gate 	iommu_p->iommu_inst = ddi_get_instance(dip);
71*0Sstevel@tonic-gate 
72*0Sstevel@tonic-gate 	/*
73*0Sstevel@tonic-gate 	 * chip specific dvma_end, tsb_size & context support
74*0Sstevel@tonic-gate 	 */
75*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_end = pci_iommu_dvma_end;
76*0Sstevel@tonic-gate 	a = pci_iommu_setup(iommu_p);
77*0Sstevel@tonic-gate 
78*0Sstevel@tonic-gate 	/*
79*0Sstevel@tonic-gate 	 * Determine the virtual address of iommu registers.
80*0Sstevel@tonic-gate 	 */
81*0Sstevel@tonic-gate 	iommu_p->iommu_ctrl_reg =
82*0Sstevel@tonic-gate 		(uint64_t *)(a + COMMON_IOMMU_CTRL_REG_OFFSET);
83*0Sstevel@tonic-gate 	iommu_p->iommu_tsb_base_addr_reg =
84*0Sstevel@tonic-gate 		(uint64_t *)(a + COMMON_IOMMU_TSB_BASE_ADDR_REG_OFFSET);
85*0Sstevel@tonic-gate 	iommu_p->iommu_flush_page_reg =
86*0Sstevel@tonic-gate 		(uint64_t *)(a + COMMON_IOMMU_FLUSH_PAGE_REG_OFFSET);
87*0Sstevel@tonic-gate 
88*0Sstevel@tonic-gate 	/*
89*0Sstevel@tonic-gate 	 * Configure the rest of the iommu parameters according to:
90*0Sstevel@tonic-gate 	 * tsb_size and dvma_end
91*0Sstevel@tonic-gate 	 */
92*0Sstevel@tonic-gate 	iommu_p->iommu_tsb_vaddr = /* retrieve TSB VA reserved by system */
93*0Sstevel@tonic-gate 		iommu_tsb_cookie_to_va(pci_p->pci_tsb_cookie);
94*0Sstevel@tonic-gate 	iommu_p->iommu_tsb_entries = tsb_entries =
95*0Sstevel@tonic-gate 		IOMMU_TSBSIZE_TO_TSBENTRIES(iommu_p->iommu_tsb_size);
96*0Sstevel@tonic-gate 	iommu_p->iommu_tsb_paddr = va_to_pa((caddr_t)iommu_p->iommu_tsb_vaddr);
97*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_cache_locks =
98*0Sstevel@tonic-gate 		kmem_zalloc(pci_dvma_page_cache_entries, KM_SLEEP);
99*0Sstevel@tonic-gate 
100*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_base = iommu_p->iommu_dvma_end + 1
101*0Sstevel@tonic-gate 		- (tsb_entries * IOMMU_PAGE_SIZE);
102*0Sstevel@tonic-gate 	iommu_p->dvma_base_pg = IOMMU_BTOP(iommu_p->iommu_dvma_base);
103*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_reserve = tsb_entries >> 1;
104*0Sstevel@tonic-gate 	iommu_p->dvma_end_pg = IOMMU_BTOP(iommu_p->iommu_dvma_end);
105*0Sstevel@tonic-gate 	iommu_p->iommu_dma_bypass_base = COMMON_IOMMU_BYPASS_BASE;
106*0Sstevel@tonic-gate 	iommu_p->iommu_dma_bypass_end = COMMON_IOMMU_BYPASS_END;
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate 	/*
109*0Sstevel@tonic-gate 	 * export "virtual-dma" software property to support
110*0Sstevel@tonic-gate 	 * child devices needing to know DVMA range
111*0Sstevel@tonic-gate 	 */
112*0Sstevel@tonic-gate 	pci_dvma_range.dvma_base = (uint32_t)iommu_p->iommu_dvma_base;
113*0Sstevel@tonic-gate 	pci_dvma_range.dvma_len = (uint32_t)
114*0Sstevel@tonic-gate 		iommu_p->iommu_dvma_end - iommu_p->iommu_dvma_base + 1;
115*0Sstevel@tonic-gate 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
116*0Sstevel@tonic-gate 		"virtual-dma", (caddr_t)&pci_dvma_range,
117*0Sstevel@tonic-gate 		sizeof (pci_dvma_range));
118*0Sstevel@tonic-gate 
119*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip, "iommu_create: ctrl=%p, tsb=%p\n",
120*0Sstevel@tonic-gate 		iommu_p->iommu_ctrl_reg, iommu_p->iommu_tsb_base_addr_reg);
121*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip, "iommu_create: page_flush=%p, ctx_flush=%p\n",
122*0Sstevel@tonic-gate 		iommu_p->iommu_flush_page_reg, iommu_p->iommu_flush_ctx_reg);
123*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip, "iommu_create: tsb vaddr=%p tsb_paddr=%p\n",
124*0Sstevel@tonic-gate 		iommu_p->iommu_tsb_vaddr, iommu_p->iommu_tsb_paddr);
125*0Sstevel@tonic-gate 	DEBUG1(DBG_ATTACH, dip, "iommu_create: allocated size=%x\n",
126*0Sstevel@tonic-gate 		iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie));
127*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip, "iommu_create: fast tsb tte addr: %x + %x\n",
128*0Sstevel@tonic-gate 		iommu_p->iommu_tsb_vaddr,
129*0Sstevel@tonic-gate 		pci_dvma_page_cache_entries * pci_dvma_page_cache_clustsz);
130*0Sstevel@tonic-gate 	DEBUG3(DBG_ATTACH, dip,
131*0Sstevel@tonic-gate 		"iommu_create: tsb size=%x, tsb entries=%x, dvma base=%x\n",
132*0Sstevel@tonic-gate 		iommu_p->iommu_tsb_size, iommu_p->iommu_tsb_entries,
133*0Sstevel@tonic-gate 		iommu_p->iommu_dvma_base);
134*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip,
135*0Sstevel@tonic-gate 		"iommu_create: dvma_cache_locks=%x cache_entries=%x\n",
136*0Sstevel@tonic-gate 		iommu_p->iommu_dvma_cache_locks, pci_dvma_page_cache_entries);
137*0Sstevel@tonic-gate 
138*0Sstevel@tonic-gate 	/*
139*0Sstevel@tonic-gate 	 * zero out the area to be used for iommu tsb
140*0Sstevel@tonic-gate 	 */
141*0Sstevel@tonic-gate 	bzero(iommu_p->iommu_tsb_vaddr, tsb_entries << 3);
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate 	/*
144*0Sstevel@tonic-gate 	 * Create a virtual memory map for dvma address space.
145*0Sstevel@tonic-gate 	 * Reserve 'size' bytes of low dvma space for fast track cache.
146*0Sstevel@tonic-gate 	 */
147*0Sstevel@tonic-gate 	(void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
148*0Sstevel@tonic-gate 		ddi_driver_name(dip), ddi_get_instance(dip));
149*0Sstevel@tonic-gate 
150*0Sstevel@tonic-gate 	cache_size = IOMMU_PTOB(pci_dvma_page_cache_entries *
151*0Sstevel@tonic-gate 		pci_dvma_page_cache_clustsz);
152*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_fast_end = iommu_p->iommu_dvma_base +
153*0Sstevel@tonic-gate 		cache_size - 1;
154*0Sstevel@tonic-gate 	iommu_p->iommu_dvma_map = vmem_create(map_name,
155*0Sstevel@tonic-gate 		(void *)(iommu_p->iommu_dvma_fast_end + 1),
156*0Sstevel@tonic-gate 		IOMMU_PTOB(tsb_entries) - cache_size, IOMMU_PAGE_SIZE,
157*0Sstevel@tonic-gate 		NULL, NULL, NULL, IOMMU_PAGE_SIZE, VM_SLEEP);
158*0Sstevel@tonic-gate 
159*0Sstevel@tonic-gate 	mutex_init(&iommu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate 	/*
162*0Sstevel@tonic-gate 	 * On detach, the TSB Base Address Register gets set to zero,
163*0Sstevel@tonic-gate 	 * so if its zero here, there is no need to preserve TTEs.
164*0Sstevel@tonic-gate 	 */
165*0Sstevel@tonic-gate 	if (pci_preserve_iommu_tsb && *iommu_p->iommu_tsb_base_addr_reg)
166*0Sstevel@tonic-gate 		iommu_preserve_tsb(iommu_p);
167*0Sstevel@tonic-gate 
168*0Sstevel@tonic-gate 	iommu_configure(iommu_p);
169*0Sstevel@tonic-gate }
170*0Sstevel@tonic-gate 
171*0Sstevel@tonic-gate void
172*0Sstevel@tonic-gate iommu_destroy(pci_t *pci_p)
173*0Sstevel@tonic-gate {
174*0Sstevel@tonic-gate #ifdef DEBUG
175*0Sstevel@tonic-gate 	dev_info_t *dip = pci_p->pci_dip;
176*0Sstevel@tonic-gate #endif
177*0Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
178*0Sstevel@tonic-gate 	volatile uint64_t ctl_val = *iommu_p->iommu_ctrl_reg;
179*0Sstevel@tonic-gate 
180*0Sstevel@tonic-gate 	DEBUG0(DBG_DETACH, dip, "iommu_destroy:\n");
181*0Sstevel@tonic-gate 
182*0Sstevel@tonic-gate 	/*
183*0Sstevel@tonic-gate 	 * Disable the IOMMU by setting the TSB Base Address to zero
184*0Sstevel@tonic-gate 	 * and the TSB Table size to the smallest possible.
185*0Sstevel@tonic-gate 	 */
186*0Sstevel@tonic-gate 	ctl_val = ctl_val & ~(7 << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT);
187*0Sstevel@tonic-gate 
188*0Sstevel@tonic-gate 	*iommu_p->iommu_ctrl_reg = ctl_val;
189*0Sstevel@tonic-gate 	*iommu_p->iommu_tsb_base_addr_reg = 0;
190*0Sstevel@tonic-gate 
191*0Sstevel@tonic-gate 	/*
192*0Sstevel@tonic-gate 	 * Return the boot time allocated tsb.
193*0Sstevel@tonic-gate 	 */
194*0Sstevel@tonic-gate 	iommu_tsb_free(pci_p->pci_tsb_cookie);
195*0Sstevel@tonic-gate 
196*0Sstevel@tonic-gate 	/*
197*0Sstevel@tonic-gate 	 * Teardown any implementation-specific structures set up in
198*0Sstevel@tonic-gate 	 * pci_iommu_setup.
199*0Sstevel@tonic-gate 	 */
200*0Sstevel@tonic-gate 	pci_iommu_teardown(iommu_p);
201*0Sstevel@tonic-gate 
202*0Sstevel@tonic-gate 	if (DVMA_DBG_ON(iommu_p))
203*0Sstevel@tonic-gate 		pci_dvma_debug_fini(iommu_p);
204*0Sstevel@tonic-gate 	mutex_destroy(&iommu_p->dvma_debug_lock);
205*0Sstevel@tonic-gate 
206*0Sstevel@tonic-gate 	/*
207*0Sstevel@tonic-gate 	 * Free the dvma resource map.
208*0Sstevel@tonic-gate 	 */
209*0Sstevel@tonic-gate 	vmem_destroy(iommu_p->iommu_dvma_map);
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 	kmem_free(iommu_p->iommu_dvma_cache_locks,
212*0Sstevel@tonic-gate 	    pci_dvma_page_cache_entries);
213*0Sstevel@tonic-gate 
214*0Sstevel@tonic-gate 	/*
215*0Sstevel@tonic-gate 	 * Free the iommu state structure.
216*0Sstevel@tonic-gate 	 */
217*0Sstevel@tonic-gate 	kmem_free(iommu_p, sizeof (iommu_t));
218*0Sstevel@tonic-gate 	pci_p->pci_iommu_p = NULL;
219*0Sstevel@tonic-gate }
220*0Sstevel@tonic-gate 
221*0Sstevel@tonic-gate /*
222*0Sstevel@tonic-gate  * re-program iommu on the fly while preserving on-going dma
223*0Sstevel@tonic-gate  * transactions on the PCI bus.
224*0Sstevel@tonic-gate  */
225*0Sstevel@tonic-gate void
226*0Sstevel@tonic-gate iommu_configure(iommu_t *iommu_p)
227*0Sstevel@tonic-gate {
228*0Sstevel@tonic-gate 	pci_t *pci_p = iommu_p->iommu_pci_p;
229*0Sstevel@tonic-gate 	uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
230*0Sstevel@tonic-gate 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
231*0Sstevel@tonic-gate 	dev_info_t *cdip = NULL;
232*0Sstevel@tonic-gate 	volatile uint64_t ctl_val = (uint64_t)
233*0Sstevel@tonic-gate 		((iommu_p->iommu_tsb_size << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT) |
234*0Sstevel@tonic-gate 			(0 /* 8k page */ << COMMON_IOMMU_CTRL_TBW_SZ_SHIFT) |
235*0Sstevel@tonic-gate 			COMMON_IOMMU_CTRL_ENABLE |
236*0Sstevel@tonic-gate 			COMMON_IOMMU_CTRL_DIAG_ENABLE |
237*0Sstevel@tonic-gate 			(pci_lock_tlb ? COMMON_IOMMU_CTRL_LCK_ENABLE : 0));
238*0Sstevel@tonic-gate 
239*0Sstevel@tonic-gate 	DEBUG2(DBG_ATTACH, dip, "iommu_configure: iommu_ctl=%08x.%08x\n",
240*0Sstevel@tonic-gate 		HI32(ctl_val), LO32(ctl_val));
241*0Sstevel@tonic-gate 	if (!pci_preserve_iommu_tsb || !(*iommu_p->iommu_tsb_base_addr_reg)) {
242*0Sstevel@tonic-gate 		*iommu_p->iommu_ctrl_reg = COMMON_IOMMU_CTRL_DIAG_ENABLE;
243*0Sstevel@tonic-gate 		iommu_tlb_flushall(iommu_p);
244*0Sstevel@tonic-gate 		goto config;
245*0Sstevel@tonic-gate 	}
246*0Sstevel@tonic-gate 	cdip = ddi_get_child(dip);
247*0Sstevel@tonic-gate 	for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
248*0Sstevel@tonic-gate 		uint32_t *reg_p;
249*0Sstevel@tonic-gate 		int reg_len;
250*0Sstevel@tonic-gate 		if (ddi_getlongprop(DDI_DEV_T_NONE, cdip, DDI_PROP_DONTPASS,
251*0Sstevel@tonic-gate 			"reg", (caddr_t)&reg_p, &reg_len) != DDI_PROP_SUCCESS)
252*0Sstevel@tonic-gate 			continue;
253*0Sstevel@tonic-gate 		cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
254*0Sstevel@tonic-gate 		kmem_free(reg_p, reg_len);
255*0Sstevel@tonic-gate 		break;
256*0Sstevel@tonic-gate 	}
257*0Sstevel@tonic-gate 
258*0Sstevel@tonic-gate config:
259*0Sstevel@tonic-gate 	pci_iommu_config(iommu_p, ctl_val, cdip ? cfgpa : 0);
260*0Sstevel@tonic-gate }
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate void
263*0Sstevel@tonic-gate iommu_map_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp,
264*0Sstevel@tonic-gate 		dvma_addr_t dvma_pg, size_t npages, size_t pfn_index)
265*0Sstevel@tonic-gate {
266*0Sstevel@tonic-gate 	int i;
267*0Sstevel@tonic-gate 	dvma_addr_t pg_index = dvma_pg - iommu_p->dvma_base_pg;
268*0Sstevel@tonic-gate 	uint64_t *tte_addr = iommu_p->iommu_tsb_vaddr + pg_index;
269*0Sstevel@tonic-gate 	size_t pfn_last = pfn_index + npages;
270*0Sstevel@tonic-gate 	uint64_t tte = PCI_GET_MP_TTE(mp->dmai_tte);
271*0Sstevel@tonic-gate #ifdef DEBUG
272*0Sstevel@tonic-gate 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
273*0Sstevel@tonic-gate #endif
274*0Sstevel@tonic-gate 
275*0Sstevel@tonic-gate 	ASSERT(pfn_last <= mp->dmai_ndvmapages);
276*0Sstevel@tonic-gate 
277*0Sstevel@tonic-gate 	DEBUG5(DBG_MAP_WIN, dip,
278*0Sstevel@tonic-gate 		"iommu_map_pages:%x+%x=%x npages=0x%x pfn_index=0x%x\n",
279*0Sstevel@tonic-gate 		(uint_t)iommu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
280*0Sstevel@tonic-gate 		(uint_t)npages, (uint_t)pfn_index);
281*0Sstevel@tonic-gate 
282*0Sstevel@tonic-gate 	for (i = pfn_index; i < pfn_last; i++, pg_index++, tte_addr++) {
283*0Sstevel@tonic-gate 		iopfn_t pfn = PCI_GET_MP_PFN(mp, i);
284*0Sstevel@tonic-gate 		volatile uint64_t cur_tte = IOMMU_PTOB(pfn) | tte;
285*0Sstevel@tonic-gate 
286*0Sstevel@tonic-gate 		DEBUG3(DBG_MAP_WIN, dip, "iommu_map_pages: mp=%p pg[%x]=%x\n",
287*0Sstevel@tonic-gate 			mp, i, (uint_t)pfn);
288*0Sstevel@tonic-gate 		DEBUG3(DBG_MAP_WIN, dip,
289*0Sstevel@tonic-gate 			"iommu_map_pages: pg_index=%x tte=%08x.%08x\n",
290*0Sstevel@tonic-gate 			pg_index, HI32(cur_tte), LO32(cur_tte));
291*0Sstevel@tonic-gate 		ASSERT(TTE_IS_INVALID(*tte_addr));
292*0Sstevel@tonic-gate 		*tte_addr = cur_tte;
293*0Sstevel@tonic-gate #ifdef DEBUG
294*0Sstevel@tonic-gate 		if (pfn == 0 && pci_warn_pp0)
295*0Sstevel@tonic-gate 			cmn_err(CE_WARN, "%s%d <%p> doing DMA to pp0\n",
296*0Sstevel@tonic-gate 				ddi_driver_name(mp->dmai_rdip),
297*0Sstevel@tonic-gate 				ddi_get_instance(mp->dmai_rdip), mp);
298*0Sstevel@tonic-gate #endif
299*0Sstevel@tonic-gate 	}
300*0Sstevel@tonic-gate 	ASSERT(tte_addr == iommu_p->iommu_tsb_vaddr + pg_index);
301*0Sstevel@tonic-gate #ifdef DEBUG
302*0Sstevel@tonic-gate 	if (HAS_REDZONE(mp)) {
303*0Sstevel@tonic-gate 		DEBUG1(DBG_MAP_WIN, dip, "iommu_map_pages: redzone pg=%x\n",
304*0Sstevel@tonic-gate 			pg_index);
305*0Sstevel@tonic-gate 		ASSERT(TTE_IS_INVALID(iommu_p->iommu_tsb_vaddr[pg_index]));
306*0Sstevel@tonic-gate 	}
307*0Sstevel@tonic-gate #endif
308*0Sstevel@tonic-gate 	if (DVMA_DBG_ON(iommu_p))
309*0Sstevel@tonic-gate 		pci_dvma_alloc_debug(iommu_p, (char *)mp->dmai_mapping,
310*0Sstevel@tonic-gate 			mp->dmai_size, mp);
311*0Sstevel@tonic-gate }
312*0Sstevel@tonic-gate 
313*0Sstevel@tonic-gate /*
314*0Sstevel@tonic-gate  * iommu_map_window - map a dvma window into the iommu
315*0Sstevel@tonic-gate  *
316*0Sstevel@tonic-gate  * used by: pci_dma_win(), pci_dma_ctlops() - DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
317*0Sstevel@tonic-gate  *
318*0Sstevel@tonic-gate  * return value: none
319*0Sstevel@tonic-gate  */
320*0Sstevel@tonic-gate /*ARGSUSED*/
321*0Sstevel@tonic-gate void
322*0Sstevel@tonic-gate iommu_map_window(iommu_t *iommu_p, ddi_dma_impl_t *mp, window_t win_no)
323*0Sstevel@tonic-gate {
324*0Sstevel@tonic-gate 	uint32_t obj_pg0_off = mp->dmai_roffset;
325*0Sstevel@tonic-gate 	uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
326*0Sstevel@tonic-gate 	size_t win_size = mp->dmai_winsize;
327*0Sstevel@tonic-gate 	size_t pfn_index = win_size * win_no;			/* temp value */
328*0Sstevel@tonic-gate 	size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0;	/* xferred sz */
329*0Sstevel@tonic-gate 	dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
330*0Sstevel@tonic-gate 	size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
331*0Sstevel@tonic-gate 
332*0Sstevel@tonic-gate 	ASSERT(!(win_size & IOMMU_PAGE_OFFSET));
333*0Sstevel@tonic-gate 	if (win_no >= mp->dmai_nwin)
334*0Sstevel@tonic-gate 		return;
335*0Sstevel@tonic-gate 	if (res_size < win_size)		/* last window */
336*0Sstevel@tonic-gate 		win_size = res_size;		/* mp->dmai_winsize unchanged */
337*0Sstevel@tonic-gate 
338*0Sstevel@tonic-gate 	mp->dmai_mapping = IOMMU_PTOB(dvma_pg) | win_pg0_off;
339*0Sstevel@tonic-gate 	mp->dmai_size = win_size - win_pg0_off;	/* cur win xferrable size */
340*0Sstevel@tonic-gate 	mp->dmai_offset = obj_off;		/* win offset into object */
341*0Sstevel@tonic-gate 	pfn_index = IOMMU_BTOP(pfn_index);	/* index into pfnlist */
342*0Sstevel@tonic-gate 	iommu_map_pages(iommu_p, mp, dvma_pg, IOMMU_BTOPR(win_size), pfn_index);
343*0Sstevel@tonic-gate }
344*0Sstevel@tonic-gate 
345*0Sstevel@tonic-gate void
346*0Sstevel@tonic-gate iommu_unmap_pages(iommu_t *iommu_p, dvma_addr_t dvma_pg, uint_t npages)
347*0Sstevel@tonic-gate {
348*0Sstevel@tonic-gate 	dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg);
349*0Sstevel@tonic-gate 
350*0Sstevel@tonic-gate 	for (; npages; npages--, dvma_pg++, pg_index++) {
351*0Sstevel@tonic-gate 		DEBUG1(DBG_UNMAP_WIN|DBG_CONT, 0, " %x", dvma_pg);
352*0Sstevel@tonic-gate 		IOMMU_UNLOAD_TTE(iommu_p, pg_index);
353*0Sstevel@tonic-gate 
354*0Sstevel@tonic-gate 		if (!tm_mtlb_gc)
355*0Sstevel@tonic-gate 			IOMMU_PAGE_FLUSH(iommu_p, dvma_pg);
356*0Sstevel@tonic-gate 	}
357*0Sstevel@tonic-gate }
358*0Sstevel@tonic-gate 
359*0Sstevel@tonic-gate void
360*0Sstevel@tonic-gate iommu_remap_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp, dvma_addr_t dvma_pg,
361*0Sstevel@tonic-gate 	size_t npages, size_t pfn_index)
362*0Sstevel@tonic-gate {
363*0Sstevel@tonic-gate 	iommu_unmap_pages(iommu_p, dvma_pg, npages);
364*0Sstevel@tonic-gate 	iommu_map_pages(iommu_p, mp, dvma_pg, npages, pfn_index);
365*0Sstevel@tonic-gate }
366*0Sstevel@tonic-gate 
367*0Sstevel@tonic-gate /*
368*0Sstevel@tonic-gate  * iommu_unmap_window
369*0Sstevel@tonic-gate  *
370*0Sstevel@tonic-gate  * This routine is called to break down the iommu mappings to a dvma window.
371*0Sstevel@tonic-gate  * Non partial mappings are viewed as single window mapping.
372*0Sstevel@tonic-gate  *
373*0Sstevel@tonic-gate  * used by: pci_dma_unbindhdl(), pci_dma_window(),
374*0Sstevel@tonic-gate  *	and pci_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
375*0Sstevel@tonic-gate  *
376*0Sstevel@tonic-gate  * return value: none
377*0Sstevel@tonic-gate  */
378*0Sstevel@tonic-gate /*ARGSUSED*/
379*0Sstevel@tonic-gate void
380*0Sstevel@tonic-gate iommu_unmap_window(iommu_t *iommu_p, ddi_dma_impl_t *mp)
381*0Sstevel@tonic-gate {
382*0Sstevel@tonic-gate 	dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
383*0Sstevel@tonic-gate 	dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg);
384*0Sstevel@tonic-gate 	uint_t npages = IOMMU_BTOP(mp->dmai_winsize);
385*0Sstevel@tonic-gate #ifdef DEBUG
386*0Sstevel@tonic-gate 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
387*0Sstevel@tonic-gate #endif
388*0Sstevel@tonic-gate 	/*
389*0Sstevel@tonic-gate 	 * Invalidate each page of the mapping in the tsb and flush
390*0Sstevel@tonic-gate 	 * it from the tlb.
391*0Sstevel@tonic-gate 	 */
392*0Sstevel@tonic-gate 	DEBUG2(DBG_UNMAP_WIN, dip, "mp=%p %x pfns:", mp, npages);
393*0Sstevel@tonic-gate 	if (mp->dmai_flags & DMAI_FLAGS_CONTEXT) {
394*0Sstevel@tonic-gate 		dvma_context_t ctx = MP2CTX(mp);
395*0Sstevel@tonic-gate 		for (; npages; npages--, pg_index++) {
396*0Sstevel@tonic-gate 			DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " %x", pg_index);
397*0Sstevel@tonic-gate 			IOMMU_UNLOAD_TTE(iommu_p, pg_index);
398*0Sstevel@tonic-gate 		}
399*0Sstevel@tonic-gate 		DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " (context %x)", ctx);
400*0Sstevel@tonic-gate 		*iommu_p->iommu_flush_ctx_reg = ctx;
401*0Sstevel@tonic-gate 	} else
402*0Sstevel@tonic-gate 		iommu_unmap_pages(iommu_p, dvma_pg, npages);
403*0Sstevel@tonic-gate 
404*0Sstevel@tonic-gate 	DEBUG0(DBG_UNMAP_WIN|DBG_CONT, dip, "\n");
405*0Sstevel@tonic-gate 
406*0Sstevel@tonic-gate 	if (DVMA_DBG_ON(iommu_p))
407*0Sstevel@tonic-gate 		pci_dvma_free_debug(iommu_p, (char *)mp->dmai_mapping,
408*0Sstevel@tonic-gate 			mp->dmai_size, mp);
409*0Sstevel@tonic-gate }
410*0Sstevel@tonic-gate 
411*0Sstevel@tonic-gate int
412*0Sstevel@tonic-gate pci_alloc_tsb(pci_t *pci_p)
413*0Sstevel@tonic-gate {
414*0Sstevel@tonic-gate 	uint16_t tsbc;
415*0Sstevel@tonic-gate 
416*0Sstevel@tonic-gate 	if ((tsbc = iommu_tsb_alloc(pci_p->pci_id)) == IOMMU_TSB_COOKIE_NONE) {
417*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: Unable to allocate IOMMU TSB.",
418*0Sstevel@tonic-gate 		    ddi_driver_name(pci_p->pci_dip),
419*0Sstevel@tonic-gate 		    ddi_get_instance(pci_p->pci_dip));
420*0Sstevel@tonic-gate 		return (DDI_FAILURE);
421*0Sstevel@tonic-gate 	}
422*0Sstevel@tonic-gate 	pci_p->pci_tsb_cookie = tsbc;
423*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
424*0Sstevel@tonic-gate }
425*0Sstevel@tonic-gate 
426*0Sstevel@tonic-gate void
427*0Sstevel@tonic-gate pci_free_tsb(pci_t *pci_p)
428*0Sstevel@tonic-gate {
429*0Sstevel@tonic-gate 	iommu_tsb_free(pci_p->pci_tsb_cookie);
430*0Sstevel@tonic-gate }
431*0Sstevel@tonic-gate 
432*0Sstevel@tonic-gate #if 0
433*0Sstevel@tonic-gate /*
434*0Sstevel@tonic-gate  * The following data structure is used to map a tsb size
435*0Sstevel@tonic-gate  * to a tsb size configuration parameter in the iommu
436*0Sstevel@tonic-gate  * control register.
437*0Sstevel@tonic-gate  * This is a hardware table. It is here for reference only.
438*0Sstevel@tonic-gate  */
439*0Sstevel@tonic-gate static int pci_iommu_tsb_sizes[] = {
440*0Sstevel@tonic-gate 	0x2000,		/* 0 - 8 mb */
441*0Sstevel@tonic-gate 	0x4000,		/* 1 - 16 mb */
442*0Sstevel@tonic-gate 	0x8000,		/* 2 - 32 mb */
443*0Sstevel@tonic-gate 	0x10000,	/* 3 - 64 mb */
444*0Sstevel@tonic-gate 	0x20000,	/* 4 - 128 mb */
445*0Sstevel@tonic-gate 	0x40000,	/* 5 - 256 mb */
446*0Sstevel@tonic-gate 	0x80000,	/* 6 - 512 mb */
447*0Sstevel@tonic-gate 	0x100000	/* 7 - 1 gb */
448*0Sstevel@tonic-gate };
449*0Sstevel@tonic-gate #endif
450*0Sstevel@tonic-gate 
451*0Sstevel@tonic-gate uint_t
452*0Sstevel@tonic-gate iommu_tsb_size_encode(uint_t tsb_bytes)
453*0Sstevel@tonic-gate {
454*0Sstevel@tonic-gate 	uint_t i;
455*0Sstevel@tonic-gate 
456*0Sstevel@tonic-gate 	for (i = 7; i && (tsb_bytes < (0x2000 << i)); i--)
457*0Sstevel@tonic-gate 		/* empty */;
458*0Sstevel@tonic-gate 	return (i);
459*0Sstevel@tonic-gate }
460*0Sstevel@tonic-gate 
461*0Sstevel@tonic-gate /*
462*0Sstevel@tonic-gate  * invalidate IOMMU TLB entries through diagnostic registers.
463*0Sstevel@tonic-gate  */
464*0Sstevel@tonic-gate static void
465*0Sstevel@tonic-gate iommu_tlb_flushall(iommu_t *iommu_p)
466*0Sstevel@tonic-gate {
467*0Sstevel@tonic-gate 	int i;
468*0Sstevel@tonic-gate 	uint64_t base = (uint64_t)(iommu_p->iommu_ctrl_reg) -
469*0Sstevel@tonic-gate 		COMMON_IOMMU_CTRL_REG_OFFSET;
470*0Sstevel@tonic-gate 	volatile uint64_t *tlb_tag = (volatile uint64_t *)
471*0Sstevel@tonic-gate 			(base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
472*0Sstevel@tonic-gate 	volatile uint64_t *tlb_data = (volatile uint64_t *)
473*0Sstevel@tonic-gate 			(base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
474*0Sstevel@tonic-gate 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
475*0Sstevel@tonic-gate 		tlb_tag[i] = tlb_data[i] = 0ull;
476*0Sstevel@tonic-gate }
477*0Sstevel@tonic-gate 
478*0Sstevel@tonic-gate static void
479*0Sstevel@tonic-gate iommu_preserve_tsb(iommu_t *iommu_p)
480*0Sstevel@tonic-gate {
481*0Sstevel@tonic-gate #ifdef DEBUG
482*0Sstevel@tonic-gate 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
483*0Sstevel@tonic-gate #endif
484*0Sstevel@tonic-gate 	uint_t i, obp_tsb_entries, obp_tsb_size, base_pg_index;
485*0Sstevel@tonic-gate 	uint64_t ctl = *iommu_p->iommu_ctrl_reg;
486*0Sstevel@tonic-gate 	uint64_t obp_tsb_pa = *iommu_p->iommu_tsb_base_addr_reg;
487*0Sstevel@tonic-gate 	uint64_t *base_tte_addr;
488*0Sstevel@tonic-gate 
489*0Sstevel@tonic-gate 	DEBUG3(DBG_ATTACH, dip,
490*0Sstevel@tonic-gate 		"iommu_tsb_base_addr_reg=0x%08x (0x%08x.0x%08x)\n",
491*0Sstevel@tonic-gate 		iommu_p->iommu_tsb_base_addr_reg,
492*0Sstevel@tonic-gate 		(uint32_t)(*iommu_p->iommu_tsb_base_addr_reg >> 32),
493*0Sstevel@tonic-gate 		(uint32_t)(*iommu_p->iommu_tsb_base_addr_reg & 0xffffffff));
494*0Sstevel@tonic-gate 
495*0Sstevel@tonic-gate 	obp_tsb_size = IOMMU_CTL_TO_TSBSIZE(ctl);
496*0Sstevel@tonic-gate 	obp_tsb_entries = IOMMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
497*0Sstevel@tonic-gate 	base_pg_index = iommu_p->dvma_end_pg - obp_tsb_entries + 1;
498*0Sstevel@tonic-gate 	base_tte_addr = iommu_p->iommu_tsb_vaddr +
499*0Sstevel@tonic-gate 		(iommu_p->iommu_tsb_entries - obp_tsb_entries);
500*0Sstevel@tonic-gate 
501*0Sstevel@tonic-gate 	/*
502*0Sstevel@tonic-gate 	 * old darwin prom does not set tsb size correctly, bail out.
503*0Sstevel@tonic-gate 	 */
504*0Sstevel@tonic-gate 	if ((obp_tsb_size == IOMMU_DARWIN_BOGUS_TSBSIZE) &&
505*0Sstevel@tonic-gate 		(CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_SABRE))
506*0Sstevel@tonic-gate 			return;
507*0Sstevel@tonic-gate 
508*0Sstevel@tonic-gate 	DEBUG3(DBG_ATTACH, dip, "iommu_preserve_tsb: kernel info\n"
509*0Sstevel@tonic-gate 		"iommu_tsb_vaddr=%08x copy to base_tte_addr=%08x "
510*0Sstevel@tonic-gate 		"base_pg_index=%x\n", iommu_p->iommu_tsb_vaddr,
511*0Sstevel@tonic-gate 			base_tte_addr, base_pg_index);
512*0Sstevel@tonic-gate 
513*0Sstevel@tonic-gate 	DEBUG3(DBG_ATTACH | DBG_CONT, dip, "iommu_preserve_tsb: obp info "
514*0Sstevel@tonic-gate 		"obp_tsb_entries=0x%x obp_tsb_pa=%08x.%08x\n", obp_tsb_entries,
515*0Sstevel@tonic-gate 			(uint32_t)(obp_tsb_pa >> 32), (uint32_t)obp_tsb_pa);
516*0Sstevel@tonic-gate 
517*0Sstevel@tonic-gate 	for (i = 0; i < obp_tsb_entries; i++) {
518*0Sstevel@tonic-gate 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
519*0Sstevel@tonic-gate 		caddr_t va;
520*0Sstevel@tonic-gate 
521*0Sstevel@tonic-gate 		if (TTE_IS_INVALID(tte)) {
522*0Sstevel@tonic-gate 			DEBUG0(DBG_ATTACH | DBG_CONT, dip, ".");
523*0Sstevel@tonic-gate 			continue;
524*0Sstevel@tonic-gate 		}
525*0Sstevel@tonic-gate 
526*0Sstevel@tonic-gate 		base_tte_addr[i] = tte;
527*0Sstevel@tonic-gate 		DEBUG3(DBG_ATTACH | DBG_CONT, dip,
528*0Sstevel@tonic-gate 			"\npreserve_tsb: (%x)=%08x.%08x\n", base_tte_addr + i,
529*0Sstevel@tonic-gate 			(uint_t)(tte >> 32), (uint_t)(tte & 0xffffffff));
530*0Sstevel@tonic-gate 
531*0Sstevel@tonic-gate 		/*
532*0Sstevel@tonic-gate 		 * permanantly reserve this page from dvma address space
533*0Sstevel@tonic-gate 		 * resource map
534*0Sstevel@tonic-gate 		 */
535*0Sstevel@tonic-gate 
536*0Sstevel@tonic-gate 		va = (caddr_t)(IOMMU_PTOB(base_pg_index + i));
537*0Sstevel@tonic-gate 		(void) vmem_xalloc(iommu_p->iommu_dvma_map, IOMMU_PAGE_SIZE,
538*0Sstevel@tonic-gate 			IOMMU_PAGE_SIZE, 0, 0, va, va + IOMMU_PAGE_SIZE,
539*0Sstevel@tonic-gate 			VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
540*0Sstevel@tonic-gate 	}
541*0Sstevel@tonic-gate }
542