xref: /onnv-gate/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c (revision 10535:32e6cba0f566)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/sunddi.h>
28 #include <sys/iommulib.h>
29 #include <sys/amd_iommu.h>
30 #include <sys/pci_cap.h>
31 #include <sys/bootconf.h>
32 #include <sys/ddidmareq.h>
33 
34 #include "amd_iommu_impl.h"
35 #include "amd_iommu_acpi.h"
36 #include "amd_iommu_page_tables.h"
37 
38 static int amd_iommu_fini(amd_iommu_t *iommu);
39 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
40 static void amd_iommu_stop(amd_iommu_t *iommu);
41 
42 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
43 static int amd_iommu_allochdl(iommulib_handle_t handle,
44     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
45     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
46 static int amd_iommu_freehdl(iommulib_handle_t handle,
47     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
48 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
49     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
50     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
51     uint_t *ccountp);
52 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
53     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
54 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
55     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
56     size_t len, uint_t cache_flags);
57 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
58     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
59     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
60     uint_t *ccountp);
61 static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
62     dev_info_t *rdip, struct ddi_dma_req *dmareq,
63     ddi_dma_handle_t *dma_handle);
64 static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
65     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
66     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
67     caddr_t *objpp, uint_t cache_flags);
68 
69 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
70     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
71 
72 extern void *device_arena_alloc(size_t size, int vm_flag);
73 extern void device_arena_free(void * vaddr, size_t size);
74 
75 ddi_dma_attr_t amd_iommu_dma_attr = {
76 	DMA_ATTR_V0,
77 	0U,				/* dma_attr_addr_lo */
78 	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
79 	0xffffffffU,			/* dma_attr_count_max */
80 	(uint64_t)4096,			/* dma_attr_align */
81 	1,				/* dma_attr_burstsizes */
82 	64,				/* dma_attr_minxfer */
83 	0xffffffffU,			/* dma_attr_maxxfer */
84 	0xffffffffU,			/* dma_attr_seg */
85 	1,				/* dma_attr_sgllen, variable */
86 	64,				/* dma_attr_granular */
87 	0				/* dma_attr_flags */
88 };
89 
90 ddi_device_acc_attr_t amd_iommu_devacc = {
91 	DDI_DEVICE_ATTR_V0,
92 	DDI_NEVERSWAP_ACC,
93 	DDI_STRICTORDER_ACC
94 };
95 
96 struct iommulib_ops amd_iommulib_ops = {
97 	IOMMU_OPS_VERSION,
98 	AMD_IOMMU,
99 	"AMD IOMMU Vers. 1",
100 	NULL,
101 	amd_iommu_probe,
102 	amd_iommu_allochdl,
103 	amd_iommu_freehdl,
104 	amd_iommu_bindhdl,
105 	amd_iommu_unbindhdl,
106 	amd_iommu_sync,
107 	amd_iommu_win,
108 	amd_iommu_map,
109 	amd_iommu_mctl
110 };
111 
112 static kmutex_t amd_iommu_pgtable_lock;
113 
114 static int
115 amd_iommu_register(amd_iommu_t *iommu)
116 {
117 	dev_info_t *dip = iommu->aiomt_dip;
118 	const char *driver = ddi_driver_name(dip);
119 	int instance = ddi_get_instance(dip);
120 	iommulib_ops_t *iommulib_ops;
121 	iommulib_handle_t handle;
122 	const char *f = "amd_iommu_register";
123 
124 	iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
125 
126 	*iommulib_ops = amd_iommulib_ops;
127 
128 	iommulib_ops->ilops_data = (void *)iommu;
129 	iommu->aiomt_iommulib_ops = iommulib_ops;
130 
131 	if (iommulib_iommu_register(dip, iommulib_ops, &handle)
132 	    != DDI_SUCCESS) {
133 		cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
134 		    "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
135 		kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
136 		return (DDI_FAILURE);
137 	}
138 
139 	iommu->aiomt_iommulib_handle = handle;
140 
141 	return (DDI_SUCCESS);
142 }
143 
144 static int
145 amd_iommu_unregister(amd_iommu_t *iommu)
146 {
147 	if (iommu->aiomt_iommulib_handle == NULL) {
148 		/* we never registered */
149 		return (DDI_SUCCESS);
150 	}
151 
152 	if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
153 	    != DDI_SUCCESS) {
154 		return (DDI_FAILURE);
155 	}
156 
157 	kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
158 	iommu->aiomt_iommulib_ops = NULL;
159 	iommu->aiomt_iommulib_handle = NULL;
160 
161 	return (DDI_SUCCESS);
162 }
163 
164 static int
165 amd_iommu_setup_passthru(amd_iommu_t *iommu)
166 {
167 	gfx_entry_t *gfxp;
168 	dev_info_t *dip;
169 
170 	/*
171 	 * Setup passthru mapping for "special" devices
172 	 */
173 	amd_iommu_set_passthru(iommu, NULL);
174 
175 	for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
176 		gfxp->g_ref++;
177 		dip = gfxp->g_dip;
178 		if (dip) {
179 			amd_iommu_set_passthru(iommu, dip);
180 		}
181 		gfxp->g_ref--;
182 	}
183 
184 	return (DDI_SUCCESS);
185 }
186 
187 static int
188 amd_iommu_start(amd_iommu_t *iommu)
189 {
190 	dev_info_t *dip = iommu->aiomt_dip;
191 	int instance = ddi_get_instance(dip);
192 	const char *driver = ddi_driver_name(dip);
193 	amd_iommu_acpi_ivhd_t *hinfop;
194 	const char *f = "amd_iommu_start";
195 
196 	hinfop = amd_iommu_lookup_all_ivhd();
197 
198 	/*
199 	 * Disable HT tunnel translation.
200 	 * XXX use ACPI
201 	 */
202 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
203 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
204 
205 	if (hinfop) {
206 		if (amd_iommu_debug) {
207 			cmn_err(CE_NOTE,
208 			    "amd_iommu: using ACPI for CTRL registers");
209 		}
210 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
211 		    AMD_IOMMU_ISOC, hinfop->ach_Isoc);
212 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
213 		    AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
214 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
215 		    AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
216 	}
217 
218 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
219 	    AMD_IOMMU_INVTO, 5);
220 
221 
222 	/*
223 	 * The Device table entry bit 0 (V) controls whether the device
224 	 * table entry is valid for address translation and Device table
225 	 * entry bit 128 (IV) controls whether interrupt remapping is valid.
226 	 * By setting both to zero we are essentially doing pass-thru. Since
227 	 * this table is zeroed on allocation, essentially we will have
228 	 * pass-thru when IOMMU is enabled.
229 	 */
230 
231 	/* Finally enable the IOMMU ... */
232 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
233 	    AMD_IOMMU_ENABLE, 1);
234 
235 	if (amd_iommu_debug) {
236 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
237 		    "Successfully started AMD IOMMU", f, driver, instance,
238 		    iommu->aiomt_idx);
239 	}
240 	cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
241 	    instance, iommu->aiomt_idx);
242 
243 	return (DDI_SUCCESS);
244 }
245 
246 static void
247 amd_iommu_stop(amd_iommu_t *iommu)
248 {
249 	dev_info_t *dip = iommu->aiomt_dip;
250 	int instance = ddi_get_instance(dip);
251 	const char *driver = ddi_driver_name(dip);
252 	const char *f = "amd_iommu_stop";
253 
254 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
255 	    AMD_IOMMU_ENABLE, 0);
256 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
257 	    AMD_IOMMU_EVENTINT_ENABLE, 0);
258 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
259 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
260 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
261 	    AMD_IOMMU_EVENTLOG_ENABLE, 0);
262 
263 	/*
264 	 * Disable translation on HT tunnel traffic
265 	 */
266 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
267 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
268 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
269 	    AMD_IOMMU_CMDBUF_ENABLE, 0);
270 
271 	cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
272 	    "Successfully stopped AMD IOMMU", f, driver, instance,
273 	    iommu->aiomt_idx);
274 }
275 
276 static int
277 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
278 {
279 	dev_info_t *dip = iommu->aiomt_dip;
280 	int instance = ddi_get_instance(dip);
281 	const char *driver = ddi_driver_name(dip);
282 	uint32_t dma_bufsz;
283 	caddr_t addr;
284 	uint32_t sz;
285 	uint32_t p2sz;
286 	int i;
287 	uint64_t *dentry;
288 	int err;
289 	const char *f = "amd_iommu_setup_tables_and_buffers";
290 
291 	/*
292 	 * We will put the Device Table, Command Buffer and
293 	 * Event Log in contiguous memory. Allocate the maximum
294 	 * size allowed for such structures
295 	 * Device Table:  256b * 64K = 32B * 64K
296 	 * Command Buffer: 128b * 32K = 16B * 32K
297 	 * Event Log:  128b * 32K = 16B * 32K
298 	 */
299 	iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
300 	iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
301 	iommu->aiomt_eventlog_sz =
302 	    (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
303 
304 	dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
305 	    + iommu->aiomt_eventlog_sz;
306 
307 	/*
308 	 * Alloc a DMA handle.
309 	 */
310 	err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
311 	    DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
312 	if (err != DDI_SUCCESS) {
313 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
314 		    "AMD IOMMU tables and buffers", f, driver, instance);
315 		return (DDI_FAILURE);
316 	}
317 
318 	/*
319 	 * Alloc memory for tables and buffers
320 	 * XXX remove cast to size_t
321 	 */
322 	err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
323 	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
324 	    DDI_DMA_SLEEP,  NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
325 	    (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
326 	if (err != DDI_SUCCESS) {
327 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
328 		    "to AMD IOMMU tables and buffers", f, driver, instance);
329 		iommu->aiomt_dma_bufva = NULL;
330 		iommu->aiomt_dma_mem_realsz = 0;
331 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
332 		iommu->aiomt_dmahdl = NULL;
333 		return (DDI_FAILURE);
334 	}
335 
336 	/*
337 	 * The VA must be 4K aligned and >= table size
338 	 */
339 	ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
340 	    AMD_IOMMU_TABLE_ALIGN) == 0);
341 	ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
342 
343 	/*
344 	 * Now bind the handle
345 	 */
346 	err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
347 	    iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
348 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
349 	    NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
350 	if (err != DDI_DMA_MAPPED) {
351 		cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
352 		    "to AMD IOMMU tables and buffers. bufrealsz=%p",
353 		    f, driver, instance,
354 		    (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
355 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
356 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
357 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
358 		iommu->aiomt_buf_dma_ncookie = 0;
359 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
360 		iommu->aiomt_dma_mem_hdl = NULL;
361 		iommu->aiomt_dma_bufva = NULL;
362 		iommu->aiomt_dma_mem_realsz = 0;
363 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
364 		iommu->aiomt_dmahdl = NULL;
365 		return (DDI_FAILURE);
366 	}
367 
368 	/*
369 	 * We assume the DMA engine on the IOMMU is capable of handling the
370 	 * whole table buffer in a single cookie. If not and multiple cookies
371 	 * are needed we fail.
372 	 */
373 	if (iommu->aiomt_buf_dma_ncookie != 1) {
374 		cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
375 		    "cookies for DMA to AMD IOMMU tables and buffers. "
376 		    "#cookies=%u", f, driver, instance,
377 		    iommu->aiomt_buf_dma_ncookie);
378 		(void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
379 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
380 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
381 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
382 		iommu->aiomt_buf_dma_ncookie = 0;
383 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
384 		iommu->aiomt_dma_mem_hdl = NULL;
385 		iommu->aiomt_dma_bufva = NULL;
386 		iommu->aiomt_dma_mem_realsz = 0;
387 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
388 		iommu->aiomt_dmahdl = NULL;
389 		return (DDI_FAILURE);
390 	}
391 
392 	/*
393 	 * The address in the cookie must be 4K aligned and >= table size
394 	 */
395 	ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
396 	    & AMD_IOMMU_TABLE_ALIGN) == 0);
397 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
398 	    <= iommu->aiomt_dma_mem_realsz);
399 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
400 
401 	/*
402 	 * Setup the device table pointers in the iommu struct as
403 	 * well as the IOMMU device table register
404 	 */
405 	iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
406 	bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
407 
408 	/*
409 	 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
410 	 * page faults. Also set SE bit so we aren't swamped with
411 	 * page fault messages
412 	 */
413 	for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
414 		/*LINTED*/
415 		dentry = (uint64_t *)&iommu->aiomt_devtbl
416 		    [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
417 		AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
418 		AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
419 	}
420 
421 	addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
422 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
423 	    AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
424 	sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
425 	ASSERT(sz <= ((1 << 9) - 1));
426 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
427 	    AMD_IOMMU_DEVTABSIZE, sz);
428 
429 	/*
430 	 * Setup the command buffer pointers
431 	 */
432 	iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
433 	    iommu->aiomt_devtbl_sz;
434 	bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
435 	addr += iommu->aiomt_devtbl_sz;
436 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
437 	    AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
438 
439 	p2sz = AMD_IOMMU_CMDBUF_SZ;
440 	ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
441 	    p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
442 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
443 	    AMD_IOMMU_COMLEN, p2sz);
444 	/*LINTED*/
445 	iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
446 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
447 	    AMD_IOMMU_CMDHEADPTR, 0);
448 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
449 	    AMD_IOMMU_CMDTAILPTR, 0);
450 
451 	/*
452 	 * Setup the event log pointers
453 	 */
454 	iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
455 	    iommu->aiomt_eventlog_sz;
456 	bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
457 	addr += iommu->aiomt_cmdbuf_sz;
458 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
459 	    AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
460 	p2sz = AMD_IOMMU_EVENTLOG_SZ;
461 	ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
462 	    p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
463 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
464 	    AMD_IOMMU_EVENTLEN, sz);
465 	/*LINTED*/
466 	iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
467 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
468 	    AMD_IOMMU_EVENTHEADPTR, 0);
469 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
470 	    AMD_IOMMU_EVENTTAILPTR, 0);
471 
472 	/* dma sync so device sees this init */
473 	SYNC_FORDEV(iommu->aiomt_dmahdl);
474 
475 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
476 		cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
477 		    "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
478 	}
479 
480 	return (DDI_SUCCESS);
481 }
482 
483 static void
484 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
485 {
486 	dev_info_t *dip = iommu->aiomt_dip;
487 	int instance = ddi_get_instance(dip);
488 	const char *driver = ddi_driver_name(dip);
489 	const char *f = "amd_iommu_teardown_tables_and_buffers";
490 
491 	iommu->aiomt_eventlog = NULL;
492 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
493 	    AMD_IOMMU_EVENTBASE, 0);
494 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
495 	    AMD_IOMMU_EVENTLEN, 0);
496 
497 	iommu->aiomt_cmdbuf = NULL;
498 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
499 	    AMD_IOMMU_COMBASE, 0);
500 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
501 	    AMD_IOMMU_COMLEN, 0);
502 
503 	iommu->aiomt_devtbl = NULL;
504 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
505 	    AMD_IOMMU_DEVTABBASE, 0);
506 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
507 	    AMD_IOMMU_DEVTABSIZE, 0);
508 
509 	if (iommu->aiomt_dmahdl == NULL)
510 		return;
511 
512 	/* Unbind the handle */
513 	if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
514 		cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
515 		    "%p for IOMMU idx=%d", f, driver, instance,
516 		    (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
517 	}
518 	iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
519 	iommu->aiomt_buf_dma_cookie.dmac_size = 0;
520 	iommu->aiomt_buf_dma_cookie.dmac_type = 0;
521 	iommu->aiomt_buf_dma_ncookie = 0;
522 
523 	/* Free the table memory allocated for DMA */
524 	ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
525 	iommu->aiomt_dma_mem_hdl = NULL;
526 	iommu->aiomt_dma_bufva = NULL;
527 	iommu->aiomt_dma_mem_realsz = 0;
528 
529 	/* Free the DMA handle */
530 	ddi_dma_free_handle(&iommu->aiomt_dmahdl);
531 	iommu->aiomt_dmahdl = NULL;
532 }
533 
534 static void
535 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
536 {
537 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
538 	    AMD_IOMMU_CMDBUF_RUN) == 0);
539 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
540 	    AMD_IOMMU_EVENT_LOG_RUN) == 0);
541 
542 	/* Must be set prior to enabling command buffer */
543 	/* Must be set prior to enabling event logging */
544 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
545 	    AMD_IOMMU_CMDBUF_ENABLE, 1);
546 	/* No interrupts for completion wait  - too heavy weight. use polling */
547 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
548 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
549 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
550 	    AMD_IOMMU_EVENTLOG_ENABLE, 1);
551 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
552 	    AMD_IOMMU_EVENTINT_ENABLE, 1);
553 }
554 
555 static int
556 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
557 {
558 	amd_iommu_acpi_ivmd_t *minfop;
559 
560 	minfop = amd_iommu_lookup_all_ivmd();
561 
562 	if (minfop && minfop->acm_ExclRange == 1) {
563 		cmn_err(CE_NOTE, "Programming exclusion range");
564 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
565 		    AMD_IOMMU_EXCL_BASE_ADDR,
566 		    minfop->acm_ivmd_phys_start >> 12);
567 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
568 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
569 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
570 		    AMD_IOMMU_EXCL_BASE_EXEN, 1);
571 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
572 		    AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
573 		    minfop->acm_ivmd_phys_len) >> 12);
574 	} else {
575 		if (amd_iommu_debug) {
576 			cmn_err(CE_NOTE, "Skipping exclusion range");
577 		}
578 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
579 		    AMD_IOMMU_EXCL_BASE_ADDR, 0);
580 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
581 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
582 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
583 		    AMD_IOMMU_EXCL_BASE_EXEN, 0);
584 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
585 		    AMD_IOMMU_EXCL_LIM, 0);
586 	}
587 
588 	return (DDI_SUCCESS);
589 }
590 
591 static void
592 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
593 {
594 	(void) amd_iommu_setup_exclusion(iommu);
595 }
596 
597 static uint_t
598 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
599 {
600 	/*LINTED*/
601 	amd_iommu_t *iommu = (amd_iommu_t *)arg1;
602 	dev_info_t *dip = iommu->aiomt_dip;
603 	int instance = ddi_get_instance(dip);
604 	const char *driver = ddi_driver_name(dip);
605 	const char *f = "amd_iommu_intr_handler";
606 
607 	ASSERT(arg1);
608 	ASSERT(arg2 == NULL);
609 
610 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
611 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
612 		    f, driver, instance, iommu->aiomt_idx);
613 	}
614 
615 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
616 	    AMD_IOMMU_EVENT_LOG_INT) == 1) {
617 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
618 			cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
619 			    "Event Log Interrupt", f, driver, instance,
620 			    iommu->aiomt_idx);
621 		}
622 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
623 		WAIT_SEC(1);
624 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
625 		    AMD_IOMMU_EVENT_LOG_INT, 1);
626 		return (DDI_INTR_CLAIMED);
627 	}
628 
629 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
630 	    AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
631 		cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
632 		    "Event Overflow Interrupt", f, driver, instance,
633 		    iommu->aiomt_idx);
634 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
635 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
636 		    AMD_IOMMU_EVENT_LOG_INT, 1);
637 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
638 		    AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
639 		return (DDI_INTR_CLAIMED);
640 	}
641 
642 	return (DDI_INTR_UNCLAIMED);
643 }
644 
645 
646 static int
647 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
648 {
649 	dev_info_t *dip = iommu->aiomt_dip;
650 	int instance = ddi_get_instance(dip);
651 	const char *driver = ddi_driver_name(dip);
652 	int intrcap0;
653 	int intrcapN;
654 	int type;
655 	int err;
656 	int req;
657 	int avail;
658 	int p2req;
659 	int actual;
660 	int i;
661 	int j;
662 	const char *f = "amd_iommu_setup_interrupts";
663 
664 	if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
665 		cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
666 		    "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
667 		return (DDI_FAILURE);
668 	}
669 
670 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
671 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
672 		    "Interrupt types supported = 0x%x", f, driver, instance,
673 		    iommu->aiomt_idx, type);
674 	}
675 
676 	/*
677 	 * for now we only support MSI
678 	 */
679 	if ((type & DDI_INTR_TYPE_MSI) == 0) {
680 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
681 		    "MSI interrupts not supported. Failing init.",
682 		    f, driver, instance, iommu->aiomt_idx);
683 		return (DDI_FAILURE);
684 	}
685 
686 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
687 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
688 		    f, driver, instance, iommu->aiomt_idx);
689 	}
690 
691 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
692 	if (err != DDI_SUCCESS) {
693 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
694 		    "ddi_intr_get_nintrs failed err = %d",
695 		    f, driver, instance, iommu->aiomt_idx, err);
696 		return (DDI_FAILURE);
697 	}
698 
699 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
700 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
701 		    "MSI number of interrupts requested: %d",
702 		    f, driver, instance, iommu->aiomt_idx, req);
703 	}
704 
705 	if (req == 0) {
706 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
707 		    "interrupts requested. Failing init", f,
708 		    driver, instance, iommu->aiomt_idx);
709 		return (DDI_FAILURE);
710 	}
711 
712 	err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
713 	if (err != DDI_SUCCESS) {
714 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
715 		    "ddi_intr_get_navail failed err = %d", f,
716 		    driver, instance, iommu->aiomt_idx, err);
717 		return (DDI_FAILURE);
718 	}
719 
720 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
721 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
722 		    "MSI number of interrupts available: %d",
723 		    f, driver, instance, iommu->aiomt_idx, avail);
724 	}
725 
726 	if (avail == 0) {
727 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
728 		    "interrupts available. Failing init", f,
729 		    driver, instance, iommu->aiomt_idx);
730 		return (DDI_FAILURE);
731 	}
732 
733 	if (avail < req) {
734 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
735 		    "interrupts: requested (%d) > available (%d). "
736 		    "Failing init", f, driver, instance, iommu->aiomt_idx,
737 		    req, avail);
738 		return (DDI_FAILURE);
739 	}
740 
741 	/* Allocate memory for DDI interrupt handles */
742 	iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
743 	iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
744 	    KM_SLEEP);
745 
746 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
747 
748 	/* Convert req to a power of two as required by ddi_intr_alloc */
749 	p2req = 0;
750 	while (1<<p2req <= req)
751 		p2req++;
752 	p2req--;
753 	req = 1<<p2req;
754 
755 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
756 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
757 		    "MSI power of 2 number of interrupts: %d,%d",
758 		    f, driver, instance, iommu->aiomt_idx, p2req, req);
759 	}
760 
761 	err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
762 	    DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
763 	if (err != DDI_SUCCESS) {
764 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
765 		    "ddi_intr_alloc failed: err = %d",
766 		    f, driver, instance, iommu->aiomt_idx, err);
767 		amd_iommu_teardown_interrupts(iommu);
768 		return (DDI_FAILURE);
769 	}
770 
771 	iommu->aiomt_actual_intrs = actual;
772 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
773 
774 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
775 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
776 		    "number of interrupts actually allocated %d",
777 		    f, driver, instance, iommu->aiomt_idx, actual);
778 	}
779 
780 	if (iommu->aiomt_actual_intrs < req) {
781 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
782 		    "ddi_intr_alloc failed: actual (%d) < req (%d)",
783 		    f, driver, instance, iommu->aiomt_idx,
784 		    iommu->aiomt_actual_intrs, req);
785 		amd_iommu_teardown_interrupts(iommu);
786 		return (DDI_FAILURE);
787 	}
788 
789 	for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
790 		if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
791 		    amd_iommu_intr_handler, (void *)iommu, NULL)
792 		    != DDI_SUCCESS) {
793 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
794 			    "ddi_intr_add_handler failed: intr = %d, err = %d",
795 			    f, driver, instance, iommu->aiomt_idx, i, err);
796 			for (j = 0; j < i; j++) {
797 				(void) ddi_intr_remove_handler(
798 				    iommu->aiomt_intr_htable[j]);
799 			}
800 			amd_iommu_teardown_interrupts(iommu);
801 			return (DDI_FAILURE);
802 		}
803 	}
804 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
805 
806 	intrcap0 = intrcapN = -1;
807 	if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
808 	    != DDI_SUCCESS ||
809 	    ddi_intr_get_cap(
810 	    iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
811 	    != DDI_SUCCESS || intrcap0 != intrcapN) {
812 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
813 		    "ddi_intr_get_cap failed or inconsistent cap among "
814 		    "interrupts: intrcap0 (%d) < intrcapN (%d)",
815 		    f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
816 		amd_iommu_teardown_interrupts(iommu);
817 		return (DDI_FAILURE);
818 	}
819 	iommu->aiomt_intr_cap = intrcap0;
820 
821 	if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
822 		/* Need to call block enable */
823 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
824 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
825 			    "Need to call block enable",
826 			    f, driver, instance, iommu->aiomt_idx);
827 		}
828 		if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
829 		    iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
830 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
831 			    "ddi_intr_block enable failed ", f, driver,
832 			    instance, iommu->aiomt_idx);
833 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
834 			    iommu->aiomt_actual_intrs);
835 			amd_iommu_teardown_interrupts(iommu);
836 			return (DDI_FAILURE);
837 		}
838 	} else {
839 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
840 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
841 			    "Need to call individual enable",
842 			    f, driver, instance, iommu->aiomt_idx);
843 		}
844 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
845 			if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
846 			    != DDI_SUCCESS) {
847 				cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
848 				    "ddi_intr_enable failed: intr = %d", f,
849 				    driver, instance, iommu->aiomt_idx, i);
850 				for (j = 0; j < i; j++) {
851 					(void) ddi_intr_disable(
852 					    iommu->aiomt_intr_htable[j]);
853 				}
854 				amd_iommu_teardown_interrupts(iommu);
855 				return (DDI_FAILURE);
856 			}
857 		}
858 	}
859 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
860 
861 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
862 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
863 		    "Interrupts successfully %s enabled. # of interrupts = %d",
864 		    f, driver, instance, iommu->aiomt_idx,
865 		    (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
866 		    "(individually)", iommu->aiomt_actual_intrs);
867 	}
868 
869 	return (DDI_SUCCESS);
870 }
871 
872 static void
873 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
874 {
875 	int i;
876 
877 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
878 		if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
879 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
880 			    iommu->aiomt_actual_intrs);
881 		} else {
882 			for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
883 				(void) ddi_intr_disable(
884 				    iommu->aiomt_intr_htable[i]);
885 			}
886 		}
887 	}
888 
889 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
890 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
891 			(void) ddi_intr_remove_handler(
892 			    iommu->aiomt_intr_htable[i]);
893 		}
894 	}
895 
896 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
897 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
898 			(void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
899 		}
900 	}
901 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
902 		kmem_free(iommu->aiomt_intr_htable,
903 		    iommu->aiomt_intr_htable_sz);
904 	}
905 	iommu->aiomt_intr_htable = NULL;
906 	iommu->aiomt_intr_htable_sz = 0;
907 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
908 }
909 
910 static amd_iommu_t *
911 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
912     uint16_t cap_base)
913 {
914 	amd_iommu_t *iommu;
915 	int instance = ddi_get_instance(dip);
916 	const char *driver = ddi_driver_name(dip);
917 	uint32_t caphdr;
918 	uint32_t low_addr32;
919 	uint32_t hi_addr32;
920 	uint32_t range;
921 	uint32_t misc;
922 	uint64_t pgoffset;
923 	amd_iommu_acpi_global_t *global;
924 	amd_iommu_acpi_ivhd_t *hinfop;
925 	const char *f = "amd_iommu_init";
926 
927 	global = amd_iommu_lookup_acpi_global();
928 	hinfop = amd_iommu_lookup_any_ivhd();
929 
930 	low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
931 	    AMD_IOMMU_CAP_ADDR_LOW_OFF);
932 	if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
933 		cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
934 		    "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
935 		    instance, idx);
936 		return (NULL);
937 	}
938 
939 	iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
940 	mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
941 	mutex_enter(&iommu->aiomt_mutex);
942 
943 	mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
944 	mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
945 
946 	iommu->aiomt_dip = dip;
947 	iommu->aiomt_idx = idx;
948 
949 	/*
950 	 * Since everything in the capability block is locked and RO at this
951 	 * point, copy everything into the IOMMU struct
952 	 */
953 
954 	/* Get cap header */
955 	caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
956 	iommu->aiomt_cap_hdr = caphdr;
957 	iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
958 	    AMD_IOMMU_CAP_NPCACHE);
959 	iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
960 
961 	if (hinfop)
962 		iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
963 	else
964 		iommu->aiomt_iotlb =
965 		    AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
966 
967 	iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
968 	iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
969 
970 	/*
971 	 * Get address of IOMMU control registers
972 	 */
973 	hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
974 	    AMD_IOMMU_CAP_ADDR_HI_OFF);
975 	iommu->aiomt_low_addr32 = low_addr32;
976 	iommu->aiomt_hi_addr32 = hi_addr32;
977 	low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
978 
979 	if (hinfop) {
980 		iommu->aiomt_reg_pa =  hinfop->ach_IOMMU_reg_base;
981 		ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
982 	} else {
983 		iommu->aiomt_reg_pa =  ((uint64_t)hi_addr32 << 32 | low_addr32);
984 	}
985 
986 	/*
987 	 * Get cap range reg
988 	 */
989 	range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
990 	iommu->aiomt_range = range;
991 	iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
992 	    AMD_IOMMU_RNG_VALID);
993 	if (iommu->aiomt_rng_valid) {
994 		iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
995 		    AMD_IOMMU_RNG_BUS);
996 		iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
997 		    AMD_IOMMU_FIRST_DEVFN);
998 		iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
999 		    AMD_IOMMU_LAST_DEVFN);
1000 	} else {
1001 		iommu->aiomt_rng_bus = 0;
1002 		iommu->aiomt_first_devfn = 0;
1003 		iommu->aiomt_last_devfn = 0;
1004 	}
1005 
1006 	if (hinfop)
1007 		iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1008 	else
1009 		iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1010 		    AMD_IOMMU_HT_UNITID);
1011 
1012 	/*
1013 	 * Get cap misc reg
1014 	 */
1015 	misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1016 	iommu->aiomt_misc = misc;
1017 
1018 	if (global) {
1019 		iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1020 		iommu->aiomt_vasize = global->acg_VAsize;
1021 		iommu->aiomt_pasize = global->acg_PAsize;
1022 	} else {
1023 		iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1024 		    AMD_IOMMU_HT_ATSRSV);
1025 		iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1026 		    AMD_IOMMU_VA_SIZE);
1027 		iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1028 		    AMD_IOMMU_PA_SIZE);
1029 	}
1030 
1031 	if (hinfop) {
1032 		iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1033 	} else {
1034 		iommu->aiomt_msinum =
1035 		    AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1036 	}
1037 
1038 	/*
1039 	 * Set up mapping between control registers PA and VA
1040 	 */
1041 	pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1042 	ASSERT(pgoffset == 0);
1043 	iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1044 	iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1045 
1046 	iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1047 	    ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1048 	if (iommu->aiomt_va == 0) {
1049 		cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1050 		    "control regs. Skipping IOMMU idx=%d", f, driver,
1051 		    instance, idx);
1052 		mutex_exit(&iommu->aiomt_mutex);
1053 		(void) amd_iommu_fini(iommu);
1054 		return (NULL);
1055 	}
1056 
1057 	hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1058 	    iommu->aiomt_reg_size,
1059 	    mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1060 	    | HAT_STRICTORDER, HAT_LOAD_LOCK);
1061 
1062 	iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1063 
1064 	/*
1065 	 * Setup the various control register's VA
1066 	 */
1067 	iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1068 	    AMD_IOMMU_DEVTBL_REG_OFF;
1069 	iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1070 	    AMD_IOMMU_CMDBUF_REG_OFF;
1071 	iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1072 	    AMD_IOMMU_EVENTLOG_REG_OFF;
1073 	iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1074 	    AMD_IOMMU_CTRL_REG_OFF;
1075 	iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1076 	    AMD_IOMMU_EXCL_BASE_REG_OFF;
1077 	iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1078 	    AMD_IOMMU_EXCL_LIM_REG_OFF;
1079 	iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1080 	    AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1081 	iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1082 	    AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1083 	iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1084 	    AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1085 	iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1086 	    AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1087 	iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1088 	    AMD_IOMMU_STATUS_REG_OFF;
1089 
1090 
1091 	/*
1092 	 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1093 	 * memory and setup DMA access to this memory location
1094 	 */
1095 	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1096 		mutex_exit(&iommu->aiomt_mutex);
1097 		(void) amd_iommu_fini(iommu);
1098 		return (NULL);
1099 	}
1100 
1101 	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1102 		mutex_exit(&iommu->aiomt_mutex);
1103 		(void) amd_iommu_fini(iommu);
1104 		return (NULL);
1105 	}
1106 
1107 	amd_iommu_enable_interrupts(iommu);
1108 
1109 	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1110 		mutex_exit(&iommu->aiomt_mutex);
1111 		(void) amd_iommu_fini(iommu);
1112 		return (NULL);
1113 	}
1114 
1115 	/*
1116 	 * need to setup domain table before gfx bypass
1117 	 */
1118 	amd_iommu_init_page_tables(iommu);
1119 
1120 	/*
1121 	 * Set pass-thru for special devices like IOAPIC and HPET
1122 	 *
1123 	 * Also, gfx devices don't use DDI for DMA. No need to register
1124 	 * before setting up gfx passthru
1125 	 */
1126 	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1127 		mutex_exit(&iommu->aiomt_mutex);
1128 		(void) amd_iommu_fini(iommu);
1129 		return (NULL);
1130 	}
1131 
1132 	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1133 		mutex_exit(&iommu->aiomt_mutex);
1134 		(void) amd_iommu_fini(iommu);
1135 		return (NULL);
1136 	}
1137 
1138 	/* xxx register/start race  */
1139 	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1140 		mutex_exit(&iommu->aiomt_mutex);
1141 		(void) amd_iommu_fini(iommu);
1142 		return (NULL);
1143 	}
1144 
1145 	if (amd_iommu_debug) {
1146 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1147 		    instance, idx);
1148 	}
1149 
1150 	return (iommu);
1151 }
1152 
1153 static int
1154 amd_iommu_fini(amd_iommu_t *iommu)
1155 {
1156 	int idx = iommu->aiomt_idx;
1157 	dev_info_t *dip = iommu->aiomt_dip;
1158 	int instance = ddi_get_instance(dip);
1159 	const char *driver = ddi_driver_name(dip);
1160 	const char *f = "amd_iommu_fini";
1161 
1162 	mutex_enter(&iommu->aiomt_mutex);
1163 	if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1164 		cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1165 		    "idx = %d", f, driver, instance, idx);
1166 		return (DDI_FAILURE);
1167 	}
1168 	amd_iommu_stop(iommu);
1169 	amd_iommu_fini_page_tables(iommu);
1170 	amd_iommu_teardown_interrupts(iommu);
1171 	amd_iommu_teardown_exclusion(iommu);
1172 	amd_iommu_teardown_tables_and_buffers(iommu);
1173 	if (iommu->aiomt_va != NULL) {
1174 		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1175 		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1176 		device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1177 		    ptob(iommu->aiomt_reg_pages));
1178 		iommu->aiomt_va = NULL;
1179 		iommu->aiomt_reg_va = NULL;
1180 	}
1181 	mutex_destroy(&iommu->aiomt_eventlock);
1182 	mutex_destroy(&iommu->aiomt_cmdlock);
1183 	mutex_exit(&iommu->aiomt_mutex);
1184 	mutex_destroy(&iommu->aiomt_mutex);
1185 	kmem_free(iommu, sizeof (amd_iommu_t));
1186 
1187 	cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1188 	    f, driver, instance, idx);
1189 
1190 	return (DDI_SUCCESS);
1191 }
1192 
1193 int
1194 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1195 {
1196 	int instance = ddi_get_instance(dip);
1197 	const char *driver = ddi_driver_name(dip);
1198 	ddi_acc_handle_t handle;
1199 	uint8_t base_class;
1200 	uint8_t sub_class;
1201 	uint8_t prog_class;
1202 	int idx;
1203 	uint32_t id;
1204 	uint16_t cap_base;
1205 	uint32_t caphdr;
1206 	uint8_t cap_type;
1207 	uint8_t cap_id;
1208 	amd_iommu_t *iommu;
1209 	const char *f = "amd_iommu_setup";
1210 
1211 	ASSERT(instance >= 0);
1212 	ASSERT(driver);
1213 
1214 	/* First setup PCI access to config space */
1215 
1216 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1217 		cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1218 		    f, driver, instance);
1219 		return (DDI_FAILURE);
1220 	}
1221 
1222 	/*
1223 	 * The AMD IOMMU is part of an independent PCI function. There may be
1224 	 * more than one IOMMU in that PCI function
1225 	 */
1226 	base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1227 	sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1228 	prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1229 
1230 	if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1231 	    prog_class != AMD_IOMMU_PCI_PROG_IF) {
1232 		cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1233 		    "subclass(0x%x)/programming interface(0x%x)", f, driver,
1234 		    instance, base_class, sub_class, prog_class);
1235 		pci_config_teardown(&handle);
1236 		return (DDI_FAILURE);
1237 	}
1238 
1239 	/*
1240 	 * Find and initialize all IOMMU units in this function
1241 	 */
1242 	for (idx = 0; ; idx++) {
1243 		if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1244 			break;
1245 
1246 		/* check if cap ID is secure device cap id */
1247 		if (id != PCI_CAP_ID_SECURE_DEV) {
1248 			if (amd_iommu_debug) {
1249 				cmn_err(CE_WARN,
1250 				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
1251 				    "cap ID (0x%x) != secure dev capid (0x%x)",
1252 				    f, driver, instance, idx, id,
1253 				    PCI_CAP_ID_SECURE_DEV);
1254 			}
1255 			continue;
1256 		}
1257 
1258 		/* check if cap type is IOMMU cap type */
1259 		caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1260 		    AMD_IOMMU_CAP_HDR_OFF);
1261 		cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1262 		cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1263 
1264 		if (cap_type != AMD_IOMMU_CAP) {
1265 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1266 			    "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1267 			    driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1268 			continue;
1269 		}
1270 		ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1271 		ASSERT(cap_id == id);
1272 
1273 		iommu = amd_iommu_init(dip, handle, idx, cap_base);
1274 		if (iommu == NULL) {
1275 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1276 			    "failed to init IOMMU", f,
1277 			    driver, instance, idx);
1278 			continue;
1279 		}
1280 
1281 		if (statep->aioms_iommu_start == NULL) {
1282 			statep->aioms_iommu_start = iommu;
1283 		} else {
1284 			statep->aioms_iommu_end->aiomt_next = iommu;
1285 		}
1286 		statep->aioms_iommu_end = iommu;
1287 
1288 		statep->aioms_nunits++;
1289 	}
1290 
1291 	pci_config_teardown(&handle);
1292 
1293 	if (amd_iommu_debug) {
1294 		cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1295 		    f, driver, instance, (void *)statep, statep->aioms_nunits);
1296 	}
1297 
1298 	return (DDI_SUCCESS);
1299 }
1300 
1301 int
1302 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
1303 {
1304 	int instance = ddi_get_instance(dip);
1305 	const char *driver = ddi_driver_name(dip);
1306 	amd_iommu_t *iommu;
1307 	int teardown;
1308 	int error = DDI_SUCCESS;
1309 	const char *f = "amd_iommu_teardown";
1310 
1311 	teardown = 0;
1312 	for (iommu = statep->aioms_iommu_start; iommu;
1313 	    iommu = iommu->aiomt_next) {
1314 		ASSERT(statep->aioms_nunits > 0);
1315 		if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
1316 			error = DDI_FAILURE;
1317 			continue;
1318 		}
1319 		statep->aioms_nunits--;
1320 		teardown++;
1321 	}
1322 
1323 	cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1324 	    "%d units left", f, driver, instance, (void *)statep,
1325 	    teardown, statep->aioms_nunits);
1326 
1327 	return (error);
1328 }
1329 
1330 /* Interface with IOMMULIB */
1331 /*ARGSUSED*/
1332 static int
1333 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1334 {
1335 	const char *driver = ddi_driver_name(rdip);
1336 	char *s;
1337 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1338 
1339 	if (amd_iommu_disable_list) {
1340 		s = strstr(amd_iommu_disable_list, driver);
1341 		if (s == NULL)
1342 			return (DDI_SUCCESS);
1343 		if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1344 			s += strlen(driver);
1345 			if (*s == '\0' || *s == ':') {
1346 				amd_iommu_set_passthru(iommu, rdip);
1347 				return (DDI_FAILURE);
1348 			}
1349 		}
1350 	}
1351 
1352 	return (DDI_SUCCESS);
1353 }
1354 
1355 /*ARGSUSED*/
1356 static int
1357 amd_iommu_allochdl(iommulib_handle_t handle,
1358     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1359     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1360 {
1361 	return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1362 	    arg, dma_handlep));
1363 }
1364 
1365 /*ARGSUSED*/
1366 static int
1367 amd_iommu_freehdl(iommulib_handle_t handle,
1368     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1369 {
1370 	return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1371 }
1372 
1373 /*ARGSUSED*/
1374 static int
1375 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1376     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1377     int km_flags)
1378 {
1379 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1380 	int instance = ddi_get_instance(iommu->aiomt_dip);
1381 	int idx = iommu->aiomt_idx;
1382 	int i;
1383 	uint64_t start_va;
1384 	char *path;
1385 	int error = DDI_FAILURE;
1386 	const char *f = "map_current_window";
1387 
1388 	path = kmem_alloc(MAXPATHLEN, km_flags);
1389 	if (path == NULL) {
1390 		return (DDI_DMA_NORESOURCES);
1391 	}
1392 
1393 	(void) ddi_pathname(rdip, path);
1394 	mutex_enter(&amd_iommu_pgtable_lock);
1395 
1396 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1397 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
1398 		    "from handle for device %s",
1399 		    f, driver, instance, idx, path);
1400 	}
1401 
1402 	start_va = 0;
1403 	for (i = 0; i < ccount; i++) {
1404 		if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1405 		    cookie_array[i].dmac_cookie_addr,
1406 		    cookie_array[i].dmac_size,
1407 		    AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1408 			break;
1409 		}
1410 		cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1411 		cookie_array[i].dmac_type = 0;
1412 	}
1413 
1414 	if (i != ccount) {
1415 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1416 		    "for device %s", f, driver, instance, idx, i, path);
1417 		(void) unmap_current_window(iommu, rdip, cookie_array,
1418 		    ccount, i, 1);
1419 		goto out;
1420 	}
1421 
1422 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1423 		cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1424 	}
1425 
1426 	error = DDI_DMA_MAPPED;
1427 out:
1428 	mutex_exit(&amd_iommu_pgtable_lock);
1429 	kmem_free(path, MAXPATHLEN);
1430 	return (error);
1431 }
1432 
1433 /*ARGSUSED*/
1434 static int
1435 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1436     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1437 {
1438 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1439 	int instance = ddi_get_instance(iommu->aiomt_dip);
1440 	int idx = iommu->aiomt_idx;
1441 	int i;
1442 	int error = DDI_FAILURE;
1443 	char *path;
1444 	int pathfree;
1445 	const char *f = "unmap_current_window";
1446 
1447 	if (!locked)
1448 		mutex_enter(&amd_iommu_pgtable_lock);
1449 
1450 	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1451 	if (path) {
1452 		(void) ddi_pathname(rdip, path);
1453 		pathfree = 1;
1454 	} else {
1455 		path = "<path-mem-alloc-failed>";
1456 		pathfree = 0;
1457 	}
1458 
1459 	if (ncookies == -1)
1460 		ncookies = ccount;
1461 
1462 	for (i = 0; i < ncookies; i++) {
1463 		if (amd_iommu_unmap_va(iommu, rdip,
1464 		    cookie_array[i].dmac_cookie_addr,
1465 		    cookie_array[i].dmac_size,
1466 		    AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1467 			break;
1468 		}
1469 	}
1470 
1471 	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1472 	    != DDI_SUCCESS) {
1473 		cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1474 		    f, path);
1475 	}
1476 
1477 	if (i != ncookies) {
1478 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1479 		    "for device %s", f, driver, instance, idx, i, path);
1480 		error = DDI_FAILURE;
1481 		goto out;
1482 	}
1483 
1484 	error = DDI_SUCCESS;
1485 
1486 out:
1487 	if (pathfree)
1488 		kmem_free(path, MAXPATHLEN);
1489 	if (!locked)
1490 		mutex_exit(&amd_iommu_pgtable_lock);
1491 	return (error);
1492 }
1493 
1494 /*ARGSUSED*/
1495 static int
1496 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1497     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1498     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1499     uint_t *ccountp)
1500 {
1501 	int dma_error = DDI_DMA_NOMAPPING;
1502 	int error;
1503 	char *path;
1504 	ddi_dma_cookie_t *cookie_array = NULL;
1505 	uint_t ccount = 0;
1506 	ddi_dma_impl_t *hp;
1507 	ddi_dma_attr_t *attrp;
1508 	int km_flags;
1509 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1510 	int instance = ddi_get_instance(rdip);
1511 	const char *driver = ddi_driver_name(rdip);
1512 	const char *f = "amd_iommu_bindhdl";
1513 
1514 	dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1515 	    dmareq, cookiep, ccountp);
1516 
1517 	if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1518 		return (dma_error);
1519 
1520 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1521 
1522 	path = kmem_alloc(MAXPATHLEN, km_flags);
1523 	if (path) {
1524 		(void) ddi_pathname(rdip, path);
1525 	} else {
1526 		dma_error = DDI_DMA_NORESOURCES;
1527 		goto unbind;
1528 	}
1529 
1530 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1531 		cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1532 		    f, path,
1533 		    (void *)cookiep->dmac_cookie_addr,
1534 		    *ccountp);
1535 	}
1536 
1537 	cookie_array = NULL;
1538 	ccount = 0;
1539 	if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1540 	    &cookie_array, &ccount)) != DDI_SUCCESS) {
1541 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1542 		    "for device %s", f, driver, instance, path);
1543 		dma_error = error;
1544 		goto unbind;
1545 	}
1546 
1547 	hp = (ddi_dma_impl_t *)dma_handle;
1548 	attrp = &hp->dmai_attr;
1549 
1550 	error = map_current_window(iommu, rdip, attrp, dmareq,
1551 	    cookie_array, ccount, km_flags);
1552 	if (error != DDI_SUCCESS) {
1553 		dma_error = error;
1554 		goto unbind;
1555 	}
1556 
1557 	if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1558 	    cookie_array, ccount)) != DDI_SUCCESS) {
1559 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1560 		    "for device %s", f, driver, instance, path);
1561 		dma_error = error;
1562 		goto unbind;
1563 	}
1564 
1565 	*cookiep = cookie_array[0];
1566 
1567 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1568 		cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1569 		    f, path,
1570 		    (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1571 		    *ccountp);
1572 	}
1573 
1574 	kmem_free(path, MAXPATHLEN);
1575 	ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1576 	return (dma_error);
1577 unbind:
1578 	kmem_free(path, MAXPATHLEN);
1579 	(void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1580 	return (dma_error);
1581 }
1582 
1583 /*ARGSUSED*/
1584 static int
1585 amd_iommu_unbindhdl(iommulib_handle_t handle,
1586     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1587 {
1588 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1589 	ddi_dma_cookie_t *cookie_array = NULL;
1590 	uint_t ccount = 0;
1591 	int error = DDI_FAILURE;
1592 	int instance = ddi_get_instance(rdip);
1593 	const char *driver = ddi_driver_name(rdip);
1594 	const char *f = "amd_iommu_unbindhdl";
1595 
1596 	cookie_array = NULL;
1597 	ccount = 0;
1598 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1599 	    &ccount) != DDI_SUCCESS) {
1600 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1601 		    "for device %p", f, driver, instance, (void *)rdip);
1602 		error = DDI_FAILURE;
1603 		goto out;
1604 	}
1605 
1606 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1607 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1608 		    "for device %p", f, driver, instance, (void *)rdip);
1609 		error = DDI_FAILURE;
1610 		goto out;
1611 	}
1612 
1613 	if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1614 	    != DDI_SUCCESS) {
1615 		cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1616 		    f, driver, instance, (void *)rdip);
1617 		error = DDI_FAILURE;
1618 		goto out;
1619 	}
1620 
1621 	if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1622 	    != DDI_SUCCESS) {
1623 		cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1624 		    "for dip=%p", f, driver, instance, (void *)rdip);
1625 		error = DDI_FAILURE;
1626 	} else {
1627 		error = DDI_SUCCESS;
1628 	}
1629 out:
1630 	if (cookie_array)
1631 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1632 	return (error);
1633 }
1634 
1635 /*ARGSUSED*/
1636 static int
1637 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1638     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1639     size_t len, uint_t cache_flags)
1640 {
1641 	ddi_dma_cookie_t *cookie_array = NULL;
1642 	uint_t ccount = 0;
1643 	int error;
1644 	const char *f = "amd_iommu_sync";
1645 
1646 	cookie_array = NULL;
1647 	ccount = 0;
1648 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1649 	    &ccount) != DDI_SUCCESS) {
1650 		ASSERT(cookie_array == NULL);
1651 		cmn_err(CE_WARN, "%s: Cannot get cookies "
1652 		    "for device %p", f, (void *)rdip);
1653 		error = DDI_FAILURE;
1654 		goto out;
1655 	}
1656 
1657 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1658 		cmn_err(CE_WARN, "%s: Cannot clear cookies "
1659 		    "for device %p", f, (void *)rdip);
1660 		error = DDI_FAILURE;
1661 		goto out;
1662 	}
1663 
1664 	error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1665 	    len, cache_flags);
1666 
1667 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1668 	    ccount) != DDI_SUCCESS) {
1669 		cmn_err(CE_WARN, "%s: Cannot set cookies "
1670 		    "for device %p", f, (void *)rdip);
1671 		error = DDI_FAILURE;
1672 	} else {
1673 		cookie_array = NULL;
1674 		ccount = 0;
1675 	}
1676 
1677 out:
1678 	if (cookie_array)
1679 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1680 	return (error);
1681 }
1682 
1683 /*ARGSUSED*/
1684 static int
1685 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1686     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1687     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1688     uint_t *ccountp)
1689 {
1690 	int error = DDI_FAILURE;
1691 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1692 	ddi_dma_cookie_t *cookie_array = NULL;
1693 	uint_t ccount = 0;
1694 	int km_flags;
1695 	ddi_dma_impl_t *hp;
1696 	ddi_dma_attr_t *attrp;
1697 	struct ddi_dma_req sdmareq = {0};
1698 	int instance = ddi_get_instance(rdip);
1699 	const char *driver = ddi_driver_name(rdip);
1700 	const char *f = "amd_iommu_win";
1701 
1702 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1703 
1704 	cookie_array = NULL;
1705 	ccount = 0;
1706 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1707 	    &ccount) != DDI_SUCCESS) {
1708 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1709 		    "for device %p", f, driver, instance, (void *)rdip);
1710 		error = DDI_FAILURE;
1711 		goto out;
1712 	}
1713 
1714 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1715 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1716 		    "for device %p", f, driver, instance, (void *)rdip);
1717 		error = DDI_FAILURE;
1718 		goto out;
1719 	}
1720 
1721 	if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1722 	    offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1723 		cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1724 		    f, driver, instance, (void *)rdip);
1725 		error = DDI_FAILURE;
1726 		goto out;
1727 	}
1728 
1729 	(void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1730 
1731 	if (cookie_array) {
1732 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1733 		cookie_array = NULL;
1734 		ccount = 0;
1735 	}
1736 
1737 	cookie_array = NULL;
1738 	ccount = 0;
1739 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1740 	    &ccount) != DDI_SUCCESS) {
1741 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1742 		    "for device %p", f, driver, instance, (void *)rdip);
1743 		error = DDI_FAILURE;
1744 		goto out;
1745 	}
1746 
1747 	hp = (ddi_dma_impl_t *)dma_handle;
1748 	attrp = &hp->dmai_attr;
1749 
1750 	sdmareq.dmar_flags = DDI_DMA_RDWR;
1751 	error = map_current_window(iommu, rdip, attrp, &sdmareq,
1752 	    cookie_array, ccount, km_flags);
1753 
1754 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1755 	    ccount) != DDI_SUCCESS) {
1756 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1757 		    "for device %p", f, driver, instance, (void *)rdip);
1758 		error = DDI_FAILURE;
1759 		goto out;
1760 	}
1761 
1762 	*cookiep = cookie_array[0];
1763 
1764 	return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1765 out:
1766 	if (cookie_array)
1767 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1768 
1769 	return (error);
1770 }
1771 
1772 /* Obsoleted DMA routines */
1773 
1774 /*ARGSUSED*/
1775 static int
1776 amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
1777     dev_info_t *rdip, struct ddi_dma_req *dmareq,
1778     ddi_dma_handle_t *dma_handle)
1779 {
1780 	ASSERT(0);
1781 	return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
1782 }
1783 
1784 /*ARGSUSED*/
1785 static int
1786 amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
1787     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1788     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
1789     caddr_t *objpp, uint_t cache_flags)
1790 {
1791 	ASSERT(0);
1792 	return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
1793 	    request, offp, lenp, objpp, cache_flags));
1794 }
1795 
1796 uint64_t
1797 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1798 {
1799 	split_t s;
1800 	uint32_t *ptr32 = (uint32_t *)regp;
1801 	uint64_t *s64p = &(s.u64);
1802 
1803 	s.u32[0] = ptr32[0];
1804 	s.u32[1] = ptr32[1];
1805 
1806 	return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1807 }
1808 
1809 uint64_t
1810 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1811 {
1812 	split_t s;
1813 	uint32_t *ptr32 = (uint32_t *)regp;
1814 	uint64_t *s64p = &(s.u64);
1815 
1816 	s.u32[0] = ptr32[0];
1817 	s.u32[1] = ptr32[1];
1818 
1819 	AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1820 
1821 	*regp = s.u64;
1822 
1823 	return (s.u64);
1824 }
1825 
1826 void
1827 amd_iommu_read_boot_props(void)
1828 {
1829 	char *propval;
1830 
1831 	/*
1832 	 * if "amd-iommu = no/false" boot property is set,
1833 	 * ignore AMD iommu
1834 	 */
1835 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1836 	    DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1837 		if (strcmp(propval, "no") == 0 ||
1838 		    strcmp(propval, "false") == 0) {
1839 			amd_iommu_disable = 1;
1840 		}
1841 		ddi_prop_free(propval);
1842 	}
1843 
1844 	/*
1845 	 * Copy the list of drivers for which IOMMU is disabled by user.
1846 	 */
1847 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1848 	    DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1849 	    == DDI_SUCCESS) {
1850 		amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1851 		    KM_SLEEP);
1852 		(void) strcpy(amd_iommu_disable_list, propval);
1853 		ddi_prop_free(propval);
1854 	}
1855 
1856 }
1857 
1858 void
1859 amd_iommu_lookup_conf_props(dev_info_t *dip)
1860 {
1861 	char *disable;
1862 
1863 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1864 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
1865 	    == DDI_PROP_SUCCESS) {
1866 		if (strcmp(disable, "no") == 0) {
1867 			amd_iommu_disable = 1;
1868 		}
1869 		ddi_prop_free(disable);
1870 	}
1871 
1872 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1873 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
1874 	    &disable) == DDI_PROP_SUCCESS) {
1875 		amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
1876 		    KM_SLEEP);
1877 		(void) strcpy(amd_iommu_disable_list, disable);
1878 		ddi_prop_free(disable);
1879 	}
1880 }
1881