1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/sunddi.h>
27 #include <sys/sunndi.h>
28 #include <sys/iommulib.h>
29 #include <sys/amd_iommu.h>
30 #include <sys/pci_cap.h>
31 #include <sys/bootconf.h>
32 #include <sys/ddidmareq.h>
33
34 #include "amd_iommu_impl.h"
35 #include "amd_iommu_acpi.h"
36 #include "amd_iommu_page_tables.h"
37
38 static int amd_iommu_fini(amd_iommu_t *iommu, int type);
39 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
40 static void amd_iommu_stop(amd_iommu_t *iommu);
41
42 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
43 static int amd_iommu_allochdl(iommulib_handle_t handle,
44 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
45 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
46 static int amd_iommu_freehdl(iommulib_handle_t handle,
47 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
48 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
49 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
50 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
51 uint_t *ccountp);
52 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
53 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
54 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
55 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
56 size_t len, uint_t cache_flags);
57 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
58 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
59 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
60 uint_t *ccountp);
61 static int amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
62 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
63 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao);
64 static int amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
65 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao);
66 static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
67 dev_info_t *rdip, struct ddi_dma_req *dmareq,
68 ddi_dma_handle_t *dma_handle);
69 static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
70 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
71 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
72 caddr_t *objpp, uint_t cache_flags);
73
74 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
75 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
76
77 extern void *device_arena_alloc(size_t size, int vm_flag);
78 extern void device_arena_free(void * vaddr, size_t size);
79
80 ddi_dma_attr_t amd_iommu_dma_attr = {
81 DMA_ATTR_V0,
82 0U, /* dma_attr_addr_lo */
83 0xffffffffffffffffULL, /* dma_attr_addr_hi */
84 0xffffffffU, /* dma_attr_count_max */
85 (uint64_t)4096, /* dma_attr_align */
86 1, /* dma_attr_burstsizes */
87 64, /* dma_attr_minxfer */
88 0xffffffffU, /* dma_attr_maxxfer */
89 0xffffffffU, /* dma_attr_seg */
90 1, /* dma_attr_sgllen, variable */
91 64, /* dma_attr_granular */
92 0 /* dma_attr_flags */
93 };
94
95 ddi_device_acc_attr_t amd_iommu_devacc = {
96 DDI_DEVICE_ATTR_V0,
97 DDI_NEVERSWAP_ACC,
98 DDI_STRICTORDER_ACC
99 };
100
101 struct iommulib_ops amd_iommulib_ops = {
102 IOMMU_OPS_VERSION,
103 AMD_IOMMU,
104 "AMD IOMMU Vers. 1",
105 NULL,
106 amd_iommu_probe,
107 amd_iommu_allochdl,
108 amd_iommu_freehdl,
109 amd_iommu_bindhdl,
110 amd_iommu_unbindhdl,
111 amd_iommu_sync,
112 amd_iommu_win,
113 amd_iommu_mapobject,
114 amd_iommu_unmapobject,
115 amd_iommu_map,
116 amd_iommu_mctl
117 };
118
119 static kmutex_t amd_iommu_pgtable_lock;
120
121 static int
amd_iommu_register(amd_iommu_t * iommu)122 amd_iommu_register(amd_iommu_t *iommu)
123 {
124 dev_info_t *dip = iommu->aiomt_dip;
125 const char *driver = ddi_driver_name(dip);
126 int instance = ddi_get_instance(dip);
127 iommulib_ops_t *iommulib_ops;
128 iommulib_handle_t handle;
129 const char *f = "amd_iommu_register";
130
131 iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
132
133 *iommulib_ops = amd_iommulib_ops;
134
135 iommulib_ops->ilops_data = (void *)iommu;
136 iommu->aiomt_iommulib_ops = iommulib_ops;
137
138 if (iommulib_iommu_register(dip, iommulib_ops, &handle)
139 != DDI_SUCCESS) {
140 cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
141 "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
142 kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
143 return (DDI_FAILURE);
144 }
145
146 iommu->aiomt_iommulib_handle = handle;
147
148 return (DDI_SUCCESS);
149 }
150
151 static int
amd_iommu_unregister(amd_iommu_t * iommu)152 amd_iommu_unregister(amd_iommu_t *iommu)
153 {
154 if (iommu->aiomt_iommulib_handle == NULL) {
155 /* we never registered */
156 return (DDI_SUCCESS);
157 }
158
159 if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
160 != DDI_SUCCESS) {
161 return (DDI_FAILURE);
162 }
163
164 kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
165 iommu->aiomt_iommulib_ops = NULL;
166 iommu->aiomt_iommulib_handle = NULL;
167
168 return (DDI_SUCCESS);
169 }
170
171 static int
amd_iommu_setup_passthru(amd_iommu_t * iommu)172 amd_iommu_setup_passthru(amd_iommu_t *iommu)
173 {
174 gfx_entry_t *gfxp;
175 dev_info_t *dip;
176
177 /*
178 * Setup passthru mapping for "special" devices
179 */
180 amd_iommu_set_passthru(iommu, NULL);
181
182 for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
183 gfxp->g_ref++;
184 dip = gfxp->g_dip;
185 if (dip) {
186 amd_iommu_set_passthru(iommu, dip);
187 }
188 gfxp->g_ref--;
189 }
190
191 return (DDI_SUCCESS);
192 }
193
194 static int
amd_iommu_start(amd_iommu_t * iommu)195 amd_iommu_start(amd_iommu_t *iommu)
196 {
197 dev_info_t *dip = iommu->aiomt_dip;
198 int instance = ddi_get_instance(dip);
199 const char *driver = ddi_driver_name(dip);
200 amd_iommu_acpi_ivhd_t *hinfop;
201 const char *f = "amd_iommu_start";
202
203 hinfop = amd_iommu_lookup_all_ivhd();
204
205 /*
206 * Disable HT tunnel translation.
207 * XXX use ACPI
208 */
209 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
210 AMD_IOMMU_HT_TUN_ENABLE, 0);
211
212 if (hinfop) {
213 if (amd_iommu_debug) {
214 cmn_err(CE_NOTE,
215 "amd_iommu: using ACPI for CTRL registers");
216 }
217 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
218 AMD_IOMMU_ISOC, hinfop->ach_Isoc);
219 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
220 AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
221 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
222 AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
223 }
224
225 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
226 AMD_IOMMU_INVTO, 5);
227
228
229 /*
230 * The Device table entry bit 0 (V) controls whether the device
231 * table entry is valid for address translation and Device table
232 * entry bit 128 (IV) controls whether interrupt remapping is valid.
233 * By setting both to zero we are essentially doing pass-thru. Since
234 * this table is zeroed on allocation, essentially we will have
235 * pass-thru when IOMMU is enabled.
236 */
237
238 /* Finally enable the IOMMU ... */
239 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
240 AMD_IOMMU_ENABLE, 1);
241
242 if (amd_iommu_debug) {
243 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
244 "Successfully started AMD IOMMU", f, driver, instance,
245 iommu->aiomt_idx);
246 }
247 cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
248 instance, iommu->aiomt_idx);
249
250 return (DDI_SUCCESS);
251 }
252
253 static void
amd_iommu_stop(amd_iommu_t * iommu)254 amd_iommu_stop(amd_iommu_t *iommu)
255 {
256 dev_info_t *dip = iommu->aiomt_dip;
257 int instance = ddi_get_instance(dip);
258 const char *driver = ddi_driver_name(dip);
259 const char *f = "amd_iommu_stop";
260
261 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
262 AMD_IOMMU_ENABLE, 0);
263 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
264 AMD_IOMMU_EVENTINT_ENABLE, 0);
265 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
266 AMD_IOMMU_COMWAITINT_ENABLE, 0);
267 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
268 AMD_IOMMU_EVENTLOG_ENABLE, 0);
269
270 /*
271 * Disable translation on HT tunnel traffic
272 */
273 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
274 AMD_IOMMU_HT_TUN_ENABLE, 0);
275 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
276 AMD_IOMMU_CMDBUF_ENABLE, 0);
277
278 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
279 "Successfully stopped AMD IOMMU", f, driver, instance,
280 iommu->aiomt_idx);
281 }
282
283 static int
amd_iommu_setup_tables_and_buffers(amd_iommu_t * iommu)284 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
285 {
286 dev_info_t *dip = iommu->aiomt_dip;
287 int instance = ddi_get_instance(dip);
288 const char *driver = ddi_driver_name(dip);
289 uint32_t dma_bufsz;
290 caddr_t addr;
291 uint32_t sz;
292 uint32_t p2sz;
293 int i;
294 uint64_t *dentry;
295 int err;
296 const char *f = "amd_iommu_setup_tables_and_buffers";
297
298 /*
299 * We will put the Device Table, Command Buffer and
300 * Event Log in contiguous memory. Allocate the maximum
301 * size allowed for such structures
302 * Device Table: 256b * 64K = 32B * 64K
303 * Command Buffer: 128b * 32K = 16B * 32K
304 * Event Log: 128b * 32K = 16B * 32K
305 */
306 iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
307 iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
308 iommu->aiomt_eventlog_sz =
309 (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
310
311 dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
312 + iommu->aiomt_eventlog_sz;
313
314 /*
315 * Alloc a DMA handle.
316 */
317 err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
318 DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
319 if (err != DDI_SUCCESS) {
320 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
321 "AMD IOMMU tables and buffers", f, driver, instance);
322 return (DDI_FAILURE);
323 }
324
325 /*
326 * Alloc memory for tables and buffers
327 * XXX remove cast to size_t
328 */
329 err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
330 &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
331 DDI_DMA_SLEEP, NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
332 (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
333 if (err != DDI_SUCCESS) {
334 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
335 "to AMD IOMMU tables and buffers", f, driver, instance);
336 iommu->aiomt_dma_bufva = NULL;
337 iommu->aiomt_dma_mem_realsz = 0;
338 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
339 iommu->aiomt_dmahdl = NULL;
340 return (DDI_FAILURE);
341 }
342
343 /*
344 * The VA must be 4K aligned and >= table size
345 */
346 ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
347 AMD_IOMMU_TABLE_ALIGN) == 0);
348 ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
349
350 /*
351 * Now bind the handle
352 */
353 err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
354 iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
355 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
356 NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
357 if (err != DDI_DMA_MAPPED) {
358 cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
359 "to AMD IOMMU tables and buffers. bufrealsz=%p",
360 f, driver, instance,
361 (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
362 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
363 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
364 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
365 iommu->aiomt_buf_dma_ncookie = 0;
366 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
367 iommu->aiomt_dma_mem_hdl = NULL;
368 iommu->aiomt_dma_bufva = NULL;
369 iommu->aiomt_dma_mem_realsz = 0;
370 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
371 iommu->aiomt_dmahdl = NULL;
372 return (DDI_FAILURE);
373 }
374
375 /*
376 * We assume the DMA engine on the IOMMU is capable of handling the
377 * whole table buffer in a single cookie. If not and multiple cookies
378 * are needed we fail.
379 */
380 if (iommu->aiomt_buf_dma_ncookie != 1) {
381 cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
382 "cookies for DMA to AMD IOMMU tables and buffers. "
383 "#cookies=%u", f, driver, instance,
384 iommu->aiomt_buf_dma_ncookie);
385 (void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
386 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
387 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
388 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
389 iommu->aiomt_buf_dma_ncookie = 0;
390 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
391 iommu->aiomt_dma_mem_hdl = NULL;
392 iommu->aiomt_dma_bufva = NULL;
393 iommu->aiomt_dma_mem_realsz = 0;
394 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
395 iommu->aiomt_dmahdl = NULL;
396 return (DDI_FAILURE);
397 }
398
399 /*
400 * The address in the cookie must be 4K aligned and >= table size
401 */
402 ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
403 & AMD_IOMMU_TABLE_ALIGN) == 0);
404 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
405 <= iommu->aiomt_dma_mem_realsz);
406 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
407
408 /*
409 * Setup the device table pointers in the iommu struct as
410 * well as the IOMMU device table register
411 */
412 iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
413 bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
414
415 /*
416 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
417 * page faults. Also set SE bit so we aren't swamped with
418 * page fault messages
419 */
420 for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
421 /*LINTED*/
422 dentry = (uint64_t *)&iommu->aiomt_devtbl
423 [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
424 AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
425 AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
426 }
427
428 addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
429 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
430 AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
431 sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
432 ASSERT(sz <= ((1 << 9) - 1));
433 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
434 AMD_IOMMU_DEVTABSIZE, sz);
435
436 /*
437 * Setup the command buffer pointers
438 */
439 iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
440 iommu->aiomt_devtbl_sz;
441 bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
442 addr += iommu->aiomt_devtbl_sz;
443 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
444 AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
445
446 p2sz = AMD_IOMMU_CMDBUF_SZ;
447 ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
448 p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
449 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
450 AMD_IOMMU_COMLEN, p2sz);
451 /*LINTED*/
452 iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
453 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
454 AMD_IOMMU_CMDHEADPTR, 0);
455 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
456 AMD_IOMMU_CMDTAILPTR, 0);
457
458 /*
459 * Setup the event log pointers
460 */
461 iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
462 iommu->aiomt_eventlog_sz;
463 bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
464 addr += iommu->aiomt_cmdbuf_sz;
465 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
466 AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
467 p2sz = AMD_IOMMU_EVENTLOG_SZ;
468 ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
469 p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
470 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
471 AMD_IOMMU_EVENTLEN, sz);
472 /*LINTED*/
473 iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
474 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
475 AMD_IOMMU_EVENTHEADPTR, 0);
476 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
477 AMD_IOMMU_EVENTTAILPTR, 0);
478
479 /* dma sync so device sees this init */
480 SYNC_FORDEV(iommu->aiomt_dmahdl);
481
482 if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
483 cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
484 "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
485 }
486
487 return (DDI_SUCCESS);
488 }
489
490 static void
amd_iommu_teardown_tables_and_buffers(amd_iommu_t * iommu,int type)491 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
492 {
493 dev_info_t *dip = iommu->aiomt_dip;
494 int instance = ddi_get_instance(dip);
495 const char *driver = ddi_driver_name(dip);
496 const char *f = "amd_iommu_teardown_tables_and_buffers";
497
498 iommu->aiomt_eventlog = NULL;
499 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
500 AMD_IOMMU_EVENTBASE, 0);
501 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
502 AMD_IOMMU_EVENTLEN, 0);
503 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
504 AMD_IOMMU_EVENTHEADPTR, 0);
505 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
506 AMD_IOMMU_EVENTTAILPTR, 0);
507
508
509 iommu->aiomt_cmdbuf = NULL;
510 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
511 AMD_IOMMU_COMBASE, 0);
512 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
513 AMD_IOMMU_COMLEN, 0);
514 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
515 AMD_IOMMU_CMDHEADPTR, 0);
516 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
517 AMD_IOMMU_CMDTAILPTR, 0);
518
519
520 iommu->aiomt_devtbl = NULL;
521 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
522 AMD_IOMMU_DEVTABBASE, 0);
523 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
524 AMD_IOMMU_DEVTABSIZE, 0);
525
526 if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
527 return;
528
529 /* Unbind the handle */
530 if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
531 cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
532 "%p for IOMMU idx=%d", f, driver, instance,
533 (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
534 }
535 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
536 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
537 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
538 iommu->aiomt_buf_dma_ncookie = 0;
539
540 /* Free the table memory allocated for DMA */
541 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
542 iommu->aiomt_dma_mem_hdl = NULL;
543 iommu->aiomt_dma_bufva = NULL;
544 iommu->aiomt_dma_mem_realsz = 0;
545
546 /* Free the DMA handle */
547 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
548 iommu->aiomt_dmahdl = NULL;
549 }
550
551 static void
amd_iommu_enable_interrupts(amd_iommu_t * iommu)552 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
553 {
554 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
555 AMD_IOMMU_CMDBUF_RUN) == 0);
556 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
557 AMD_IOMMU_EVENT_LOG_RUN) == 0);
558
559 /* Must be set prior to enabling command buffer */
560 /* Must be set prior to enabling event logging */
561 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
562 AMD_IOMMU_CMDBUF_ENABLE, 1);
563 /* No interrupts for completion wait - too heavy weight. use polling */
564 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
565 AMD_IOMMU_COMWAITINT_ENABLE, 0);
566 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
567 AMD_IOMMU_EVENTLOG_ENABLE, 1);
568 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
569 AMD_IOMMU_EVENTINT_ENABLE, 1);
570 }
571
572 static int
amd_iommu_setup_exclusion(amd_iommu_t * iommu)573 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
574 {
575 amd_iommu_acpi_ivmd_t *minfop;
576
577 minfop = amd_iommu_lookup_all_ivmd();
578
579 if (minfop && minfop->acm_ExclRange == 1) {
580 cmn_err(CE_NOTE, "Programming exclusion range");
581 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
582 AMD_IOMMU_EXCL_BASE_ADDR,
583 minfop->acm_ivmd_phys_start >> 12);
584 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
585 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
586 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
587 AMD_IOMMU_EXCL_BASE_EXEN, 1);
588 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
589 AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
590 minfop->acm_ivmd_phys_len) >> 12);
591 } else {
592 if (amd_iommu_debug) {
593 cmn_err(CE_NOTE, "Skipping exclusion range");
594 }
595 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
596 AMD_IOMMU_EXCL_BASE_ADDR, 0);
597 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
598 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
599 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
600 AMD_IOMMU_EXCL_BASE_EXEN, 0);
601 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
602 AMD_IOMMU_EXCL_LIM, 0);
603 }
604
605 return (DDI_SUCCESS);
606 }
607
608 static void
amd_iommu_teardown_exclusion(amd_iommu_t * iommu)609 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
610 {
611 (void) amd_iommu_setup_exclusion(iommu);
612 }
613
614 static uint_t
amd_iommu_intr_handler(caddr_t arg1,caddr_t arg2)615 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
616 {
617 /*LINTED*/
618 amd_iommu_t *iommu = (amd_iommu_t *)arg1;
619 dev_info_t *dip = iommu->aiomt_dip;
620 int instance = ddi_get_instance(dip);
621 const char *driver = ddi_driver_name(dip);
622 const char *f = "amd_iommu_intr_handler";
623
624 ASSERT(arg1);
625 ASSERT(arg2 == NULL);
626
627 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
628 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
629 f, driver, instance, iommu->aiomt_idx);
630 }
631
632 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
633 AMD_IOMMU_EVENT_LOG_INT) == 1) {
634 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
635 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
636 "Event Log Interrupt", f, driver, instance,
637 iommu->aiomt_idx);
638 }
639 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
640 WAIT_SEC(1);
641 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
642 AMD_IOMMU_EVENT_LOG_INT, 1);
643 return (DDI_INTR_CLAIMED);
644 }
645
646 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
647 AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
648 cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
649 "Event Overflow Interrupt", f, driver, instance,
650 iommu->aiomt_idx);
651 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
652 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
653 AMD_IOMMU_EVENT_LOG_INT, 1);
654 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
655 AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
656 return (DDI_INTR_CLAIMED);
657 }
658
659 return (DDI_INTR_UNCLAIMED);
660 }
661
662
663 static int
amd_iommu_setup_interrupts(amd_iommu_t * iommu)664 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
665 {
666 dev_info_t *dip = iommu->aiomt_dip;
667 int instance = ddi_get_instance(dip);
668 const char *driver = ddi_driver_name(dip);
669 int intrcap0;
670 int intrcapN;
671 int type;
672 int err;
673 int req;
674 int avail;
675 int p2req;
676 int actual;
677 int i;
678 int j;
679 const char *f = "amd_iommu_setup_interrupts";
680
681 if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
682 cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
683 "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
684 return (DDI_FAILURE);
685 }
686
687 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
688 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
689 "Interrupt types supported = 0x%x", f, driver, instance,
690 iommu->aiomt_idx, type);
691 }
692
693 /*
694 * for now we only support MSI
695 */
696 if ((type & DDI_INTR_TYPE_MSI) == 0) {
697 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
698 "MSI interrupts not supported. Failing init.",
699 f, driver, instance, iommu->aiomt_idx);
700 return (DDI_FAILURE);
701 }
702
703 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
704 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
705 f, driver, instance, iommu->aiomt_idx);
706 }
707
708 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
709 if (err != DDI_SUCCESS) {
710 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
711 "ddi_intr_get_nintrs failed err = %d",
712 f, driver, instance, iommu->aiomt_idx, err);
713 return (DDI_FAILURE);
714 }
715
716 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
717 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
718 "MSI number of interrupts requested: %d",
719 f, driver, instance, iommu->aiomt_idx, req);
720 }
721
722 if (req == 0) {
723 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
724 "interrupts requested. Failing init", f,
725 driver, instance, iommu->aiomt_idx);
726 return (DDI_FAILURE);
727 }
728
729 err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
730 if (err != DDI_SUCCESS) {
731 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
732 "ddi_intr_get_navail failed err = %d", f,
733 driver, instance, iommu->aiomt_idx, err);
734 return (DDI_FAILURE);
735 }
736
737 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
738 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
739 "MSI number of interrupts available: %d",
740 f, driver, instance, iommu->aiomt_idx, avail);
741 }
742
743 if (avail == 0) {
744 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
745 "interrupts available. Failing init", f,
746 driver, instance, iommu->aiomt_idx);
747 return (DDI_FAILURE);
748 }
749
750 if (avail < req) {
751 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
752 "interrupts: requested (%d) > available (%d). "
753 "Failing init", f, driver, instance, iommu->aiomt_idx,
754 req, avail);
755 return (DDI_FAILURE);
756 }
757
758 /* Allocate memory for DDI interrupt handles */
759 iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
760 iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
761 KM_SLEEP);
762
763 iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
764
765 /* Convert req to a power of two as required by ddi_intr_alloc */
766 p2req = 0;
767 while (1<<p2req <= req)
768 p2req++;
769 p2req--;
770 req = 1<<p2req;
771
772 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
773 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
774 "MSI power of 2 number of interrupts: %d,%d",
775 f, driver, instance, iommu->aiomt_idx, p2req, req);
776 }
777
778 err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
779 DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
780 if (err != DDI_SUCCESS) {
781 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
782 "ddi_intr_alloc failed: err = %d",
783 f, driver, instance, iommu->aiomt_idx, err);
784 amd_iommu_teardown_interrupts(iommu);
785 return (DDI_FAILURE);
786 }
787
788 iommu->aiomt_actual_intrs = actual;
789 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
790
791 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
792 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
793 "number of interrupts actually allocated %d",
794 f, driver, instance, iommu->aiomt_idx, actual);
795 }
796
797 if (iommu->aiomt_actual_intrs < req) {
798 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
799 "ddi_intr_alloc failed: actual (%d) < req (%d)",
800 f, driver, instance, iommu->aiomt_idx,
801 iommu->aiomt_actual_intrs, req);
802 amd_iommu_teardown_interrupts(iommu);
803 return (DDI_FAILURE);
804 }
805
806 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
807 if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
808 amd_iommu_intr_handler, (void *)iommu, NULL)
809 != DDI_SUCCESS) {
810 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
811 "ddi_intr_add_handler failed: intr = %d, err = %d",
812 f, driver, instance, iommu->aiomt_idx, i, err);
813 for (j = 0; j < i; j++) {
814 (void) ddi_intr_remove_handler(
815 iommu->aiomt_intr_htable[j]);
816 }
817 amd_iommu_teardown_interrupts(iommu);
818 return (DDI_FAILURE);
819 }
820 }
821 iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
822
823 intrcap0 = intrcapN = -1;
824 if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
825 != DDI_SUCCESS ||
826 ddi_intr_get_cap(
827 iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
828 != DDI_SUCCESS || intrcap0 != intrcapN) {
829 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
830 "ddi_intr_get_cap failed or inconsistent cap among "
831 "interrupts: intrcap0 (%d) < intrcapN (%d)",
832 f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
833 amd_iommu_teardown_interrupts(iommu);
834 return (DDI_FAILURE);
835 }
836 iommu->aiomt_intr_cap = intrcap0;
837
838 if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
839 /* Need to call block enable */
840 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
841 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
842 "Need to call block enable",
843 f, driver, instance, iommu->aiomt_idx);
844 }
845 if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
846 iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
847 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
848 "ddi_intr_block enable failed ", f, driver,
849 instance, iommu->aiomt_idx);
850 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
851 iommu->aiomt_actual_intrs);
852 amd_iommu_teardown_interrupts(iommu);
853 return (DDI_FAILURE);
854 }
855 } else {
856 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
857 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
858 "Need to call individual enable",
859 f, driver, instance, iommu->aiomt_idx);
860 }
861 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
862 if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
863 != DDI_SUCCESS) {
864 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
865 "ddi_intr_enable failed: intr = %d", f,
866 driver, instance, iommu->aiomt_idx, i);
867 for (j = 0; j < i; j++) {
868 (void) ddi_intr_disable(
869 iommu->aiomt_intr_htable[j]);
870 }
871 amd_iommu_teardown_interrupts(iommu);
872 return (DDI_FAILURE);
873 }
874 }
875 }
876 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
877
878 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
879 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
880 "Interrupts successfully %s enabled. # of interrupts = %d",
881 f, driver, instance, iommu->aiomt_idx,
882 (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
883 "(individually)", iommu->aiomt_actual_intrs);
884 }
885
886 return (DDI_SUCCESS);
887 }
888
889 static void
amd_iommu_teardown_interrupts(amd_iommu_t * iommu)890 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
891 {
892 int i;
893
894 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
895 if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
896 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
897 iommu->aiomt_actual_intrs);
898 } else {
899 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
900 (void) ddi_intr_disable(
901 iommu->aiomt_intr_htable[i]);
902 }
903 }
904 }
905
906 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
907 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
908 (void) ddi_intr_remove_handler(
909 iommu->aiomt_intr_htable[i]);
910 }
911 }
912
913 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
914 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
915 (void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
916 }
917 }
918 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
919 kmem_free(iommu->aiomt_intr_htable,
920 iommu->aiomt_intr_htable_sz);
921 }
922 iommu->aiomt_intr_htable = NULL;
923 iommu->aiomt_intr_htable_sz = 0;
924 iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
925 }
926
927 static amd_iommu_t *
amd_iommu_init(dev_info_t * dip,ddi_acc_handle_t handle,int idx,uint16_t cap_base)928 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
929 uint16_t cap_base)
930 {
931 amd_iommu_t *iommu;
932 int instance = ddi_get_instance(dip);
933 const char *driver = ddi_driver_name(dip);
934 uint32_t caphdr;
935 uint32_t low_addr32;
936 uint32_t hi_addr32;
937 uint32_t range;
938 uint32_t misc;
939 uint64_t pgoffset;
940 amd_iommu_acpi_global_t *global;
941 amd_iommu_acpi_ivhd_t *hinfop;
942 int bus, device, func;
943 const char *f = "amd_iommu_init";
944
945 low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
946 AMD_IOMMU_CAP_ADDR_LOW_OFF);
947 if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
948 cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
949 "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
950 instance, idx);
951 return (NULL);
952 }
953
954 iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
955 mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
956 mutex_enter(&iommu->aiomt_mutex);
957
958 mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
959 mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
960
961 iommu->aiomt_dip = dip;
962 iommu->aiomt_idx = idx;
963
964 if (acpica_get_bdf(iommu->aiomt_dip, &bus, &device, &func)
965 != DDI_SUCCESS) {
966 cmn_err(CE_WARN, "%s: %s%d: Failed to get BDF"
967 "Unable to use IOMMU unit idx=%d - skipping ...",
968 f, driver, instance, idx);
969 return (NULL);
970 }
971
972 iommu->aiomt_bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) |
973 (uint8_t)func;
974
975 /*
976 * Since everything in the capability block is locked and RO at this
977 * point, copy everything into the IOMMU struct
978 */
979
980 /* Get cap header */
981 caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
982 iommu->aiomt_cap_hdr = caphdr;
983 iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
984 AMD_IOMMU_CAP_NPCACHE);
985 iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
986
987 global = amd_iommu_lookup_acpi_global();
988 hinfop = amd_iommu_lookup_any_ivhd(iommu);
989
990 if (hinfop)
991 iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
992 else
993 iommu->aiomt_iotlb =
994 AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
995
996 iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
997 iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
998
999 /*
1000 * Get address of IOMMU control registers
1001 */
1002 hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
1003 AMD_IOMMU_CAP_ADDR_HI_OFF);
1004 iommu->aiomt_low_addr32 = low_addr32;
1005 iommu->aiomt_hi_addr32 = hi_addr32;
1006 low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
1007
1008 if (hinfop) {
1009 iommu->aiomt_reg_pa = hinfop->ach_IOMMU_reg_base;
1010 ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
1011 } else {
1012 iommu->aiomt_reg_pa = ((uint64_t)hi_addr32 << 32 | low_addr32);
1013 }
1014
1015 /*
1016 * Get cap range reg
1017 */
1018 range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
1019 iommu->aiomt_range = range;
1020 iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
1021 AMD_IOMMU_RNG_VALID);
1022 if (iommu->aiomt_rng_valid) {
1023 iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
1024 AMD_IOMMU_RNG_BUS);
1025 iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
1026 AMD_IOMMU_FIRST_DEVFN);
1027 iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
1028 AMD_IOMMU_LAST_DEVFN);
1029 } else {
1030 iommu->aiomt_rng_bus = 0;
1031 iommu->aiomt_first_devfn = 0;
1032 iommu->aiomt_last_devfn = 0;
1033 }
1034
1035 if (hinfop)
1036 iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1037 else
1038 iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1039 AMD_IOMMU_HT_UNITID);
1040
1041 /*
1042 * Get cap misc reg
1043 */
1044 misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1045 iommu->aiomt_misc = misc;
1046
1047 if (global) {
1048 iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1049 iommu->aiomt_vasize = global->acg_VAsize;
1050 iommu->aiomt_pasize = global->acg_PAsize;
1051 } else {
1052 iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1053 AMD_IOMMU_HT_ATSRSV);
1054 iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1055 AMD_IOMMU_VA_SIZE);
1056 iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1057 AMD_IOMMU_PA_SIZE);
1058 }
1059
1060 if (hinfop) {
1061 iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1062 } else {
1063 iommu->aiomt_msinum =
1064 AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1065 }
1066
1067 /*
1068 * Set up mapping between control registers PA and VA
1069 */
1070 pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1071 ASSERT(pgoffset == 0);
1072 iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1073 iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1074
1075 iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1076 ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1077 if (iommu->aiomt_va == 0) {
1078 cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1079 "control regs. Skipping IOMMU idx=%d", f, driver,
1080 instance, idx);
1081 mutex_exit(&iommu->aiomt_mutex);
1082 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1083 return (NULL);
1084 }
1085
1086 hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1087 iommu->aiomt_reg_size,
1088 mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1089 | HAT_STRICTORDER, HAT_LOAD_LOCK);
1090
1091 iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1092
1093 /*
1094 * Setup the various control register's VA
1095 */
1096 iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1097 AMD_IOMMU_DEVTBL_REG_OFF;
1098 iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1099 AMD_IOMMU_CMDBUF_REG_OFF;
1100 iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1101 AMD_IOMMU_EVENTLOG_REG_OFF;
1102 iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1103 AMD_IOMMU_CTRL_REG_OFF;
1104 iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1105 AMD_IOMMU_EXCL_BASE_REG_OFF;
1106 iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1107 AMD_IOMMU_EXCL_LIM_REG_OFF;
1108 iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1109 AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1110 iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1111 AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1112 iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1113 AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1114 iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1115 AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1116 iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1117 AMD_IOMMU_STATUS_REG_OFF;
1118
1119
1120 /*
1121 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1122 * memory and setup DMA access to this memory location
1123 */
1124 if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1125 mutex_exit(&iommu->aiomt_mutex);
1126 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1127 return (NULL);
1128 }
1129
1130 if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1131 mutex_exit(&iommu->aiomt_mutex);
1132 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1133 return (NULL);
1134 }
1135
1136 amd_iommu_enable_interrupts(iommu);
1137
1138 if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1139 mutex_exit(&iommu->aiomt_mutex);
1140 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1141 return (NULL);
1142 }
1143
1144 /*
1145 * need to setup domain table before gfx bypass
1146 */
1147 amd_iommu_init_page_tables(iommu);
1148
1149 /*
1150 * Set pass-thru for special devices like IOAPIC and HPET
1151 *
1152 * Also, gfx devices don't use DDI for DMA. No need to register
1153 * before setting up gfx passthru
1154 */
1155 if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1156 mutex_exit(&iommu->aiomt_mutex);
1157 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1158 return (NULL);
1159 }
1160
1161 /* Initialize device table entries based on ACPI settings */
1162 if (amd_iommu_acpi_init_devtbl(iommu) != DDI_SUCCESS) {
1163 cmn_err(CE_WARN, "%s: %s%d: Can't initialize device table",
1164 f, driver, instance);
1165 mutex_exit(&iommu->aiomt_mutex);
1166 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1167 return (NULL);
1168 }
1169
1170 if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1171 mutex_exit(&iommu->aiomt_mutex);
1172 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1173 return (NULL);
1174 }
1175
1176 /* xxx register/start race */
1177 if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1178 mutex_exit(&iommu->aiomt_mutex);
1179 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1180 return (NULL);
1181 }
1182
1183 if (amd_iommu_debug) {
1184 cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1185 instance, idx);
1186 }
1187
1188 return (iommu);
1189 }
1190
1191 static int
amd_iommu_fini(amd_iommu_t * iommu,int type)1192 amd_iommu_fini(amd_iommu_t *iommu, int type)
1193 {
1194 int idx = iommu->aiomt_idx;
1195 dev_info_t *dip = iommu->aiomt_dip;
1196 int instance = ddi_get_instance(dip);
1197 const char *driver = ddi_driver_name(dip);
1198 const char *f = "amd_iommu_fini";
1199
1200 if (type == AMD_IOMMU_TEARDOWN) {
1201 mutex_enter(&iommu->aiomt_mutex);
1202 if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1203 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1204 "idx = %d", f, driver, instance, idx);
1205 return (DDI_FAILURE);
1206 }
1207 }
1208
1209 amd_iommu_stop(iommu);
1210
1211 if (type == AMD_IOMMU_TEARDOWN) {
1212 amd_iommu_fini_page_tables(iommu);
1213 amd_iommu_teardown_interrupts(iommu);
1214 amd_iommu_teardown_exclusion(iommu);
1215 }
1216
1217 amd_iommu_teardown_tables_and_buffers(iommu, type);
1218
1219 if (type == AMD_IOMMU_QUIESCE)
1220 return (DDI_SUCCESS);
1221
1222 if (iommu->aiomt_va != NULL) {
1223 hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1224 iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1225 device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1226 ptob(iommu->aiomt_reg_pages));
1227 iommu->aiomt_va = NULL;
1228 iommu->aiomt_reg_va = NULL;
1229 }
1230 mutex_destroy(&iommu->aiomt_eventlock);
1231 mutex_destroy(&iommu->aiomt_cmdlock);
1232 mutex_exit(&iommu->aiomt_mutex);
1233 mutex_destroy(&iommu->aiomt_mutex);
1234 kmem_free(iommu, sizeof (amd_iommu_t));
1235
1236 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1237 f, driver, instance, idx);
1238
1239 return (DDI_SUCCESS);
1240 }
1241
1242 int
amd_iommu_setup(dev_info_t * dip,amd_iommu_state_t * statep)1243 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1244 {
1245 int instance = ddi_get_instance(dip);
1246 const char *driver = ddi_driver_name(dip);
1247 ddi_acc_handle_t handle;
1248 uint8_t base_class;
1249 uint8_t sub_class;
1250 uint8_t prog_class;
1251 int idx;
1252 uint32_t id;
1253 uint16_t cap_base;
1254 uint32_t caphdr;
1255 uint8_t cap_type;
1256 uint8_t cap_id;
1257 amd_iommu_t *iommu;
1258 const char *f = "amd_iommu_setup";
1259
1260 ASSERT(instance >= 0);
1261 ASSERT(driver);
1262
1263 /* First setup PCI access to config space */
1264
1265 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1266 cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1267 f, driver, instance);
1268 return (DDI_FAILURE);
1269 }
1270
1271 /*
1272 * The AMD IOMMU is part of an independent PCI function. There may be
1273 * more than one IOMMU in that PCI function
1274 */
1275 base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1276 sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1277 prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1278
1279 if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1280 prog_class != AMD_IOMMU_PCI_PROG_IF) {
1281 cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1282 "subclass(0x%x)/programming interface(0x%x)", f, driver,
1283 instance, base_class, sub_class, prog_class);
1284 pci_config_teardown(&handle);
1285 return (DDI_FAILURE);
1286 }
1287
1288 /*
1289 * Find and initialize all IOMMU units in this function
1290 */
1291 for (idx = 0; ; idx++) {
1292 if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1293 break;
1294
1295 /* check if cap ID is secure device cap id */
1296 if (id != PCI_CAP_ID_SECURE_DEV) {
1297 if (amd_iommu_debug) {
1298 cmn_err(CE_NOTE,
1299 "%s: %s%d: skipping IOMMU: idx(0x%x) "
1300 "cap ID (0x%x) != secure dev capid (0x%x)",
1301 f, driver, instance, idx, id,
1302 PCI_CAP_ID_SECURE_DEV);
1303 }
1304 continue;
1305 }
1306
1307 /* check if cap type is IOMMU cap type */
1308 caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1309 AMD_IOMMU_CAP_HDR_OFF);
1310 cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1311 cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1312
1313 if (cap_type != AMD_IOMMU_CAP) {
1314 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1315 "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1316 driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1317 continue;
1318 }
1319 ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1320 ASSERT(cap_id == id);
1321
1322 iommu = amd_iommu_init(dip, handle, idx, cap_base);
1323 if (iommu == NULL) {
1324 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1325 "failed to init IOMMU", f,
1326 driver, instance, idx);
1327 continue;
1328 }
1329
1330 if (statep->aioms_iommu_start == NULL) {
1331 statep->aioms_iommu_start = iommu;
1332 } else {
1333 statep->aioms_iommu_end->aiomt_next = iommu;
1334 }
1335 statep->aioms_iommu_end = iommu;
1336
1337 statep->aioms_nunits++;
1338 }
1339
1340 pci_config_teardown(&handle);
1341
1342 if (amd_iommu_debug) {
1343 cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1344 f, driver, instance, (void *)statep, statep->aioms_nunits);
1345 }
1346
1347 return (DDI_SUCCESS);
1348 }
1349
1350 int
amd_iommu_teardown(dev_info_t * dip,amd_iommu_state_t * statep,int type)1351 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
1352 {
1353 int instance = ddi_get_instance(dip);
1354 const char *driver = ddi_driver_name(dip);
1355 amd_iommu_t *iommu, *next_iommu;
1356 int teardown;
1357 int error = DDI_SUCCESS;
1358 const char *f = "amd_iommu_teardown";
1359
1360 teardown = 0;
1361 for (iommu = statep->aioms_iommu_start; iommu;
1362 iommu = next_iommu) {
1363 ASSERT(statep->aioms_nunits > 0);
1364 next_iommu = iommu->aiomt_next;
1365 if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
1366 error = DDI_FAILURE;
1367 continue;
1368 }
1369 statep->aioms_nunits--;
1370 teardown++;
1371 }
1372
1373 cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1374 "%d units left", f, driver, instance, (void *)statep,
1375 teardown, statep->aioms_nunits);
1376
1377 return (error);
1378 }
1379
1380 dev_info_t *
amd_iommu_pci_dip(dev_info_t * rdip,const char * path)1381 amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
1382 {
1383 dev_info_t *pdip;
1384 const char *driver = ddi_driver_name(rdip);
1385 int instance = ddi_get_instance(rdip);
1386 const char *f = "amd_iommu_pci_dip";
1387
1388 /* Hold rdip so it and its parents don't go away */
1389 ndi_hold_devi(rdip);
1390
1391 if (ddi_is_pci_dip(rdip))
1392 return (rdip);
1393
1394 pdip = rdip;
1395 while (pdip = ddi_get_parent(pdip)) {
1396 if (ddi_is_pci_dip(pdip)) {
1397 ndi_hold_devi(pdip);
1398 ndi_rele_devi(rdip);
1399 return (pdip);
1400 }
1401 }
1402
1403 cmn_err(
1404 #ifdef DEBUG
1405 CE_PANIC,
1406 #else
1407 CE_WARN,
1408 #endif /* DEBUG */
1409 "%s: %s%d dip = %p has no PCI parent, path = %s",
1410 f, driver, instance, (void *)rdip, path);
1411
1412 ndi_rele_devi(rdip);
1413
1414 return (NULL);
1415 }
1416
1417 /* Interface with IOMMULIB */
1418 /*ARGSUSED*/
1419 static int
amd_iommu_probe(iommulib_handle_t handle,dev_info_t * rdip)1420 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1421 {
1422 const char *driver = ddi_driver_name(rdip);
1423 char *s;
1424 int bus, device, func, bdf;
1425 amd_iommu_acpi_ivhd_t *hinfop;
1426 dev_info_t *pci_dip;
1427 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1428 const char *f = "amd_iommu_probe";
1429 int instance = ddi_get_instance(iommu->aiomt_dip);
1430 const char *idriver = ddi_driver_name(iommu->aiomt_dip);
1431 char *path, *pathp;
1432
1433 if (amd_iommu_disable_list) {
1434 s = strstr(amd_iommu_disable_list, driver);
1435 if (s == NULL)
1436 return (DDI_SUCCESS);
1437 if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1438 s += strlen(driver);
1439 if (*s == '\0' || *s == ':') {
1440 amd_iommu_set_passthru(iommu, rdip);
1441 return (DDI_FAILURE);
1442 }
1443 }
1444 }
1445
1446 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1447 if ((pathp = ddi_pathname(rdip, path)) == NULL)
1448 pathp = "<unknown>";
1449
1450 pci_dip = amd_iommu_pci_dip(rdip, path);
1451 if (pci_dip == NULL) {
1452 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
1453 "for rdip=%p, path = %s",
1454 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1455 pathp);
1456 kmem_free(path, MAXPATHLEN);
1457 return (DDI_FAILURE);
1458 }
1459
1460 if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
1461 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get BDF "
1462 "for rdip=%p, path = %s",
1463 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1464 pathp);
1465 kmem_free(path, MAXPATHLEN);
1466 return (DDI_FAILURE);
1467 }
1468 kmem_free(path, MAXPATHLEN);
1469
1470 /*
1471 * See whether device is described by IVRS as being managed
1472 * by this IOMMU
1473 */
1474 bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
1475 hinfop = amd_iommu_lookup_ivhd(bdf);
1476 if (hinfop && hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
1477 return (DDI_SUCCESS);
1478
1479 return (DDI_FAILURE);
1480 }
1481
1482 /*ARGSUSED*/
1483 static int
amd_iommu_allochdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)1484 amd_iommu_allochdl(iommulib_handle_t handle,
1485 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1486 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1487 {
1488 return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1489 arg, dma_handlep));
1490 }
1491
1492 /*ARGSUSED*/
1493 static int
amd_iommu_freehdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1494 amd_iommu_freehdl(iommulib_handle_t handle,
1495 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1496 {
1497 return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1498 }
1499
1500 /*ARGSUSED*/
1501 static int
map_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_attr_t * attrp,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookie_array,uint_t ccount,int km_flags)1502 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1503 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1504 int km_flags)
1505 {
1506 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1507 int instance = ddi_get_instance(iommu->aiomt_dip);
1508 int idx = iommu->aiomt_idx;
1509 int i;
1510 uint64_t start_va;
1511 char *path;
1512 int error = DDI_FAILURE;
1513 const char *f = "map_current_window";
1514
1515 path = kmem_alloc(MAXPATHLEN, km_flags);
1516 if (path == NULL) {
1517 return (DDI_DMA_NORESOURCES);
1518 }
1519
1520 (void) ddi_pathname(rdip, path);
1521 mutex_enter(&amd_iommu_pgtable_lock);
1522
1523 if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1524 cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
1525 "from handle for device %s",
1526 f, driver, instance, idx, path);
1527 }
1528
1529 start_va = 0;
1530 for (i = 0; i < ccount; i++) {
1531 if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1532 cookie_array[i].dmac_cookie_addr,
1533 cookie_array[i].dmac_size,
1534 AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1535 break;
1536 }
1537 cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1538 cookie_array[i].dmac_type = 0;
1539 }
1540
1541 if (i != ccount) {
1542 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1543 "for device %s", f, driver, instance, idx, i, path);
1544 (void) unmap_current_window(iommu, rdip, cookie_array,
1545 ccount, i, 1);
1546 goto out;
1547 }
1548
1549 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1550 cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1551 }
1552
1553 error = DDI_DMA_MAPPED;
1554 out:
1555 mutex_exit(&amd_iommu_pgtable_lock);
1556 kmem_free(path, MAXPATHLEN);
1557 return (error);
1558 }
1559
1560 /*ARGSUSED*/
1561 static int
unmap_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_cookie_t * cookie_array,uint_t ccount,int ncookies,int locked)1562 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1563 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1564 {
1565 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1566 int instance = ddi_get_instance(iommu->aiomt_dip);
1567 int idx = iommu->aiomt_idx;
1568 int i;
1569 int error = DDI_FAILURE;
1570 char *path;
1571 int pathfree;
1572 const char *f = "unmap_current_window";
1573
1574 if (!locked)
1575 mutex_enter(&amd_iommu_pgtable_lock);
1576
1577 path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1578 if (path) {
1579 (void) ddi_pathname(rdip, path);
1580 pathfree = 1;
1581 } else {
1582 path = "<path-mem-alloc-failed>";
1583 pathfree = 0;
1584 }
1585
1586 if (ncookies == -1)
1587 ncookies = ccount;
1588
1589 for (i = 0; i < ncookies; i++) {
1590 if (amd_iommu_unmap_va(iommu, rdip,
1591 cookie_array[i].dmac_cookie_addr,
1592 cookie_array[i].dmac_size,
1593 AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1594 break;
1595 }
1596 }
1597
1598 if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1599 != DDI_SUCCESS) {
1600 cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1601 f, path);
1602 }
1603
1604 if (i != ncookies) {
1605 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1606 "for device %s", f, driver, instance, idx, i, path);
1607 error = DDI_FAILURE;
1608 goto out;
1609 }
1610
1611 error = DDI_SUCCESS;
1612
1613 out:
1614 if (pathfree)
1615 kmem_free(path, MAXPATHLEN);
1616 if (!locked)
1617 mutex_exit(&amd_iommu_pgtable_lock);
1618 return (error);
1619 }
1620
1621 /*ARGSUSED*/
1622 static int
amd_iommu_bindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1623 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1624 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1625 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1626 uint_t *ccountp)
1627 {
1628 int dma_error = DDI_DMA_NOMAPPING;
1629 int error;
1630 char *path;
1631 ddi_dma_cookie_t *cookie_array = NULL;
1632 uint_t ccount = 0;
1633 ddi_dma_impl_t *hp;
1634 ddi_dma_attr_t *attrp;
1635 int km_flags;
1636 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1637 int instance = ddi_get_instance(rdip);
1638 const char *driver = ddi_driver_name(rdip);
1639 const char *f = "amd_iommu_bindhdl";
1640
1641 dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1642 dmareq, cookiep, ccountp);
1643
1644 if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1645 return (dma_error);
1646
1647 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1648
1649 path = kmem_alloc(MAXPATHLEN, km_flags);
1650 if (path) {
1651 (void) ddi_pathname(rdip, path);
1652 } else {
1653 dma_error = DDI_DMA_NORESOURCES;
1654 goto unbind;
1655 }
1656
1657 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1658 cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1659 f, path,
1660 (void *)cookiep->dmac_cookie_addr,
1661 *ccountp);
1662 }
1663
1664 cookie_array = NULL;
1665 ccount = 0;
1666 if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1667 &cookie_array, &ccount)) != DDI_SUCCESS) {
1668 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1669 "for device %s", f, driver, instance, path);
1670 dma_error = error;
1671 goto unbind;
1672 }
1673
1674 hp = (ddi_dma_impl_t *)dma_handle;
1675 attrp = &hp->dmai_attr;
1676
1677 error = map_current_window(iommu, rdip, attrp, dmareq,
1678 cookie_array, ccount, km_flags);
1679 if (error != DDI_SUCCESS) {
1680 dma_error = error;
1681 goto unbind;
1682 }
1683
1684 if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1685 cookie_array, ccount)) != DDI_SUCCESS) {
1686 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1687 "for device %s", f, driver, instance, path);
1688 dma_error = error;
1689 goto unbind;
1690 }
1691
1692 *cookiep = cookie_array[0];
1693
1694 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1695 cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1696 f, path,
1697 (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1698 *ccountp);
1699 }
1700
1701 kmem_free(path, MAXPATHLEN);
1702 ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1703 return (dma_error);
1704 unbind:
1705 kmem_free(path, MAXPATHLEN);
1706 (void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1707 return (dma_error);
1708 }
1709
1710 /*ARGSUSED*/
1711 static int
amd_iommu_unbindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1712 amd_iommu_unbindhdl(iommulib_handle_t handle,
1713 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1714 {
1715 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1716 ddi_dma_cookie_t *cookie_array = NULL;
1717 uint_t ccount = 0;
1718 int error = DDI_FAILURE;
1719 int instance = ddi_get_instance(rdip);
1720 const char *driver = ddi_driver_name(rdip);
1721 const char *f = "amd_iommu_unbindhdl";
1722
1723 cookie_array = NULL;
1724 ccount = 0;
1725 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1726 &ccount) != DDI_SUCCESS) {
1727 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1728 "for device %p", f, driver, instance, (void *)rdip);
1729 error = DDI_FAILURE;
1730 goto out;
1731 }
1732
1733 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1734 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1735 "for device %p", f, driver, instance, (void *)rdip);
1736 error = DDI_FAILURE;
1737 goto out;
1738 }
1739
1740 if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1741 != DDI_SUCCESS) {
1742 cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1743 f, driver, instance, (void *)rdip);
1744 error = DDI_FAILURE;
1745 goto out;
1746 }
1747
1748 if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1749 != DDI_SUCCESS) {
1750 cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1751 "for dip=%p", f, driver, instance, (void *)rdip);
1752 error = DDI_FAILURE;
1753 } else {
1754 error = DDI_SUCCESS;
1755 }
1756 out:
1757 if (cookie_array)
1758 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1759 return (error);
1760 }
1761
1762 /*ARGSUSED*/
1763 static int
amd_iommu_sync(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cache_flags)1764 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1765 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1766 size_t len, uint_t cache_flags)
1767 {
1768 ddi_dma_cookie_t *cookie_array = NULL;
1769 uint_t ccount = 0;
1770 int error;
1771 const char *f = "amd_iommu_sync";
1772
1773 cookie_array = NULL;
1774 ccount = 0;
1775 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1776 &ccount) != DDI_SUCCESS) {
1777 ASSERT(cookie_array == NULL);
1778 cmn_err(CE_WARN, "%s: Cannot get cookies "
1779 "for device %p", f, (void *)rdip);
1780 error = DDI_FAILURE;
1781 goto out;
1782 }
1783
1784 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1785 cmn_err(CE_WARN, "%s: Cannot clear cookies "
1786 "for device %p", f, (void *)rdip);
1787 error = DDI_FAILURE;
1788 goto out;
1789 }
1790
1791 error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1792 len, cache_flags);
1793
1794 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1795 ccount) != DDI_SUCCESS) {
1796 cmn_err(CE_WARN, "%s: Cannot set cookies "
1797 "for device %p", f, (void *)rdip);
1798 error = DDI_FAILURE;
1799 } else {
1800 cookie_array = NULL;
1801 ccount = 0;
1802 }
1803
1804 out:
1805 if (cookie_array)
1806 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1807 return (error);
1808 }
1809
1810 /*ARGSUSED*/
1811 static int
amd_iommu_win(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1812 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1813 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1814 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1815 uint_t *ccountp)
1816 {
1817 int error = DDI_FAILURE;
1818 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1819 ddi_dma_cookie_t *cookie_array = NULL;
1820 uint_t ccount = 0;
1821 int km_flags;
1822 ddi_dma_impl_t *hp;
1823 ddi_dma_attr_t *attrp;
1824 struct ddi_dma_req sdmareq = {0};
1825 int instance = ddi_get_instance(rdip);
1826 const char *driver = ddi_driver_name(rdip);
1827 const char *f = "amd_iommu_win";
1828
1829 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1830
1831 cookie_array = NULL;
1832 ccount = 0;
1833 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1834 &ccount) != DDI_SUCCESS) {
1835 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1836 "for device %p", f, driver, instance, (void *)rdip);
1837 error = DDI_FAILURE;
1838 goto out;
1839 }
1840
1841 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1842 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1843 "for device %p", f, driver, instance, (void *)rdip);
1844 error = DDI_FAILURE;
1845 goto out;
1846 }
1847
1848 if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1849 offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1850 cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1851 f, driver, instance, (void *)rdip);
1852 error = DDI_FAILURE;
1853 goto out;
1854 }
1855
1856 (void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1857
1858 if (cookie_array) {
1859 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1860 cookie_array = NULL;
1861 ccount = 0;
1862 }
1863
1864 cookie_array = NULL;
1865 ccount = 0;
1866 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1867 &ccount) != DDI_SUCCESS) {
1868 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1869 "for device %p", f, driver, instance, (void *)rdip);
1870 error = DDI_FAILURE;
1871 goto out;
1872 }
1873
1874 hp = (ddi_dma_impl_t *)dma_handle;
1875 attrp = &hp->dmai_attr;
1876
1877 sdmareq.dmar_flags = DDI_DMA_RDWR;
1878 error = map_current_window(iommu, rdip, attrp, &sdmareq,
1879 cookie_array, ccount, km_flags);
1880
1881 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1882 ccount) != DDI_SUCCESS) {
1883 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1884 "for device %p", f, driver, instance, (void *)rdip);
1885 error = DDI_FAILURE;
1886 goto out;
1887 }
1888
1889 *cookiep = cookie_array[0];
1890
1891 return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1892 out:
1893 if (cookie_array)
1894 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1895
1896 return (error);
1897 }
1898
1899 /* Obsoleted DMA routines */
1900
1901 /*ARGSUSED*/
1902 static int
amd_iommu_map(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareq,ddi_dma_handle_t * dma_handle)1903 amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
1904 dev_info_t *rdip, struct ddi_dma_req *dmareq,
1905 ddi_dma_handle_t *dma_handle)
1906 {
1907 ASSERT(0);
1908 return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
1909 }
1910
1911 /*ARGSUSED*/
1912 static int
amd_iommu_mctl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objpp,uint_t cache_flags)1913 amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
1914 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1915 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
1916 caddr_t *objpp, uint_t cache_flags)
1917 {
1918 ASSERT(0);
1919 return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
1920 request, offp, lenp, objpp, cache_flags));
1921 }
1922
1923 /*ARGSUSED*/
1924 static int
amd_iommu_mapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)1925 amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
1926 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1927 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao)
1928 {
1929 return (DDI_ENOTSUP);
1930 }
1931
1932 /*ARGSUSED*/
1933 static int
amd_iommu_unmapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)1934 amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
1935 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
1936 {
1937 return (DDI_ENOTSUP);
1938 }
1939
1940 uint64_t
amd_iommu_reg_get64_workaround(uint64_t * regp,uint32_t bits)1941 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1942 {
1943 split_t s;
1944 uint32_t *ptr32 = (uint32_t *)regp;
1945 uint64_t *s64p = &(s.u64);
1946
1947 s.u32[0] = ptr32[0];
1948 s.u32[1] = ptr32[1];
1949
1950 return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1951 }
1952
1953 uint64_t
amd_iommu_reg_set64_workaround(uint64_t * regp,uint32_t bits,uint64_t value)1954 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1955 {
1956 split_t s;
1957 uint32_t *ptr32 = (uint32_t *)regp;
1958 uint64_t *s64p = &(s.u64);
1959
1960 s.u32[0] = ptr32[0];
1961 s.u32[1] = ptr32[1];
1962
1963 AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1964
1965 *regp = s.u64;
1966
1967 return (s.u64);
1968 }
1969
1970 void
amd_iommu_read_boot_props(void)1971 amd_iommu_read_boot_props(void)
1972 {
1973 char *propval;
1974
1975 /*
1976 * if "amd-iommu = no/false" boot property is set,
1977 * ignore AMD iommu
1978 */
1979 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1980 DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1981 if (strcmp(propval, "no") == 0 ||
1982 strcmp(propval, "false") == 0) {
1983 amd_iommu_disable = 1;
1984 }
1985 ddi_prop_free(propval);
1986 }
1987
1988 /*
1989 * Copy the list of drivers for which IOMMU is disabled by user.
1990 */
1991 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1992 DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1993 == DDI_SUCCESS) {
1994 amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1995 KM_SLEEP);
1996 (void) strcpy(amd_iommu_disable_list, propval);
1997 ddi_prop_free(propval);
1998 }
1999
2000 }
2001
2002 void
amd_iommu_lookup_conf_props(dev_info_t * dip)2003 amd_iommu_lookup_conf_props(dev_info_t *dip)
2004 {
2005 char *disable;
2006
2007 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
2008 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
2009 == DDI_PROP_SUCCESS) {
2010 if (strcmp(disable, "no") == 0) {
2011 amd_iommu_disable = 1;
2012 }
2013 ddi_prop_free(disable);
2014 }
2015
2016 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
2017 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
2018 &disable) == DDI_PROP_SUCCESS) {
2019 amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
2020 KM_SLEEP);
2021 (void) strcpy(amd_iommu_disable_list, disable);
2022 ddi_prop_free(disable);
2023 }
2024 }
2025