xref: /openbsd-src/sys/dev/acpi/acpidmar.c (revision f6246b7f478ea7b2b6df549ae5998f8112d22650)
1 /*
2  * Copyright (c) 2015 Jordan Hargrave <jordan_hargrave@hotmail.com>
3  *
4  * Permission to use, copy, modify, and distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/kernel.h>
20 #include <sys/device.h>
21 #include <sys/malloc.h>
22 #include <sys/queue.h>
23 #include <sys/types.h>
24 #include <sys/mbuf.h>
25 #include <sys/proc.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #include <machine/apicvar.h>
30 #include <machine/biosvar.h>
31 #include <machine/cpuvar.h>
32 #include <machine/bus.h>
33 
34 #include <dev/acpi/acpireg.h>
35 #include <dev/acpi/acpivar.h>
36 #include <dev/acpi/acpidev.h>
37 #include <dev/acpi/amltypes.h>
38 #include <dev/acpi/dsdt.h>
39 
40 #include <uvm/uvm_extern.h>
41 
42 #include <machine/i8259.h>
43 #include <machine/i82093reg.h>
44 #include <machine/i82093var.h>
45 #include <machine/i82489reg.h>
46 #include <machine/i82489var.h>
47 
48 #include <machine/mpbiosvar.h>
49 
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <dev/pci/pcidevs.h>
53 #include <dev/pci/ppbreg.h>
54 
55 #include "ioapic.h"
56 
57 #include "acpidmar.h"
58 #include "amd_iommu.h"
59 
60 /* We don't want IOMMU to remap MSI */
61 #define MSI_BASE_ADDRESS	0xFEE00000L
62 #define MSI_BASE_SIZE		0x00100000L
63 #define MAX_DEVFN		65536
64 
65 #ifdef IOMMU_DEBUG
66 int acpidmar_dbg_lvl = 0;
67 #define DPRINTF(lvl,x...) if (acpidmar_dbg_lvl >= lvl) { printf(x) }
68 #else
69 #define DPRINTF(lvl,x...)
70 #endif
71 
72 #ifdef DDB
73 int	acpidmar_ddb = 0;
74 #endif
75 
76 int	acpidmar_force_cm = 1;
77 
78 /* Page Table Entry per domain */
79 struct iommu_softc;
80 
81 static inline int
82 mksid(int b, int d, int f)
83 {
84 	return (b << 8) + (d << 3) + f;
85 }
86 
87 static inline int
88 sid_devfn(int sid)
89 {
90 	return sid & 0xff;
91 }
92 
93 static inline int
94 sid_bus(int sid)
95 {
96 	return (sid >> 8) & 0xff;
97 }
98 
99 static inline int
100 sid_dev(int sid)
101 {
102 	return (sid >> 3) & 0x1f;
103 }
104 
105 static inline int
106 sid_fun(int sid)
107 {
108 	return (sid >> 0) & 0x7;
109 }
110 
111 /* Alias mapping */
112 #define SID_INVALID 0x80000000L
113 static uint32_t sid_flag[MAX_DEVFN];
114 
115 struct domain_dev {
116 	int			sid;
117 	int			sec;
118 	int			sub;
119 	TAILQ_ENTRY(domain_dev)	link;
120 };
121 
122 struct domain {
123 	struct iommu_softc	*iommu;
124 	int			did;
125 	int			gaw;
126 	struct pte_entry	*pte;
127 	paddr_t			ptep;
128 	struct bus_dma_tag	dmat;
129 	int			flag;
130 
131 	struct mutex		exlck;
132 	char			exname[32];
133 	struct extent		*iovamap;
134 	TAILQ_HEAD(,domain_dev)	devices;
135 	TAILQ_ENTRY(domain)	link;
136 };
137 
138 #define DOM_DEBUG 0x1
139 #define DOM_NOMAP 0x2
140 
141 struct dmar_devlist {
142 	int				type;
143 	int				bus;
144 	int				ndp;
145 	struct acpidmar_devpath		*dp;
146 	TAILQ_ENTRY(dmar_devlist)	link;
147 };
148 
149 TAILQ_HEAD(devlist_head, dmar_devlist);
150 
151 struct ivhd_devlist {
152 	int				start_id;
153 	int				end_id;
154 	int				cfg;
155 	TAILQ_ENTRY(ivhd_devlist)	link;
156 };
157 
158 struct rmrr_softc {
159 	TAILQ_ENTRY(rmrr_softc)	link;
160 	struct devlist_head	devices;
161 	int			segment;
162 	uint64_t		start;
163 	uint64_t		end;
164 };
165 
166 struct atsr_softc {
167 	TAILQ_ENTRY(atsr_softc)	link;
168 	struct devlist_head	devices;
169 	int			segment;
170 	int			flags;
171 };
172 
173 struct iommu_pic {
174 	struct pic		pic;
175 	struct iommu_softc	*iommu;
176 };
177 
178 #define IOMMU_FLAGS_CATCHALL		0x1
179 #define IOMMU_FLAGS_BAD			0x2
180 #define IOMMU_FLAGS_SUSPEND		0x4
181 
182 struct iommu_softc {
183 	TAILQ_ENTRY(iommu_softc)link;
184 	struct devlist_head	devices;
185 	int			id;
186 	int			flags;
187 	int			segment;
188 
189 	struct mutex		reg_lock;
190 
191 	bus_space_tag_t		iot;
192 	bus_space_handle_t	ioh;
193 
194 	uint64_t		cap;
195 	uint64_t		ecap;
196 	uint32_t		gcmd;
197 
198 	int			mgaw;
199 	int			agaw;
200 	int			ndoms;
201 
202 	struct root_entry	*root;
203 	struct context_entry	*ctx[256];
204 
205 	void			*intr;
206 	struct iommu_pic	pic;
207 	int			fedata;
208 	uint64_t		feaddr;
209 	uint64_t		rtaddr;
210 
211 	/* Queued Invalidation */
212 	int			qi_head;
213 	int			qi_tail;
214 	paddr_t			qip;
215 	struct qi_entry		*qi;
216 
217 	struct domain		*unity;
218 	TAILQ_HEAD(,domain)	domains;
219 
220 	/* AMD iommu */
221 	struct ivhd_dte		*dte;
222 	void			*cmd_tbl;
223 	void			*evt_tbl;
224 	paddr_t			cmd_tblp;
225 	paddr_t			evt_tblp;
226 };
227 
228 static inline int iommu_bad(struct iommu_softc *sc)
229 {
230 	return (sc->flags & IOMMU_FLAGS_BAD);
231 }
232 
233 static inline int iommu_enabled(struct iommu_softc *sc)
234 {
235 	if (sc->dte) {
236 		return 1;
237 	}
238 	return (sc->gcmd & GCMD_TE);
239 }
240 
241 struct acpidmar_softc {
242 	struct device		sc_dev;
243 
244 	pci_chipset_tag_t	sc_pc;
245 	bus_space_tag_t		sc_memt;
246 	int			sc_haw;
247 	int			sc_flags;
248 	bus_dma_tag_t		sc_dmat;
249 
250 	struct ivhd_dte		*sc_hwdte;
251 	paddr_t			sc_hwdtep;
252 
253 	TAILQ_HEAD(,iommu_softc)sc_drhds;
254 	TAILQ_HEAD(,rmrr_softc)	sc_rmrrs;
255 	TAILQ_HEAD(,atsr_softc)	sc_atsrs;
256 };
257 
258 int		acpidmar_activate(struct device *, int);
259 int		acpidmar_match(struct device *, void *, void *);
260 void		acpidmar_attach(struct device *, struct device *, void *);
261 struct domain	*acpidmar_pci_attach(struct acpidmar_softc *, int, int, int);
262 
263 struct cfattach acpidmar_ca = {
264 	sizeof(struct acpidmar_softc), acpidmar_match, acpidmar_attach, NULL,
265 	acpidmar_activate
266 };
267 
268 struct cfdriver acpidmar_cd = {
269 	NULL, "acpidmar", DV_DULL
270 };
271 
272 struct		acpidmar_softc *acpidmar_sc;
273 int		acpidmar_intr(void *);
274 int		acpiivhd_intr(void *);
275 
276 #define DID_UNITY 0x1
277 
278 void _dumppte(struct pte_entry *, int, vaddr_t);
279 
280 struct domain *domain_create(struct iommu_softc *, int);
281 struct domain *domain_lookup(struct acpidmar_softc *, int, int);
282 
283 void domain_unload_map(struct domain *, bus_dmamap_t);
284 void domain_load_map(struct domain *, bus_dmamap_t, int, int, const char *);
285 
286 void (*domain_map_page)(struct domain *, vaddr_t, paddr_t, uint64_t);
287 void domain_map_page_amd(struct domain *, vaddr_t, paddr_t, uint64_t);
288 void domain_map_page_intel(struct domain *, vaddr_t, paddr_t, uint64_t);
289 void domain_map_pthru(struct domain *, paddr_t, paddr_t);
290 
291 void acpidmar_pci_hook(pci_chipset_tag_t, struct pci_attach_args *);
292 void acpidmar_parse_devscope(union acpidmar_entry *, int, int,
293     struct devlist_head *);
294 int acpidmar_match_devscope(struct devlist_head *, pci_chipset_tag_t, int);
295 
296 void acpidmar_init(struct acpidmar_softc *, struct acpi_dmar *);
297 void acpidmar_drhd(struct acpidmar_softc *, union acpidmar_entry *);
298 void acpidmar_rmrr(struct acpidmar_softc *, union acpidmar_entry *);
299 void acpidmar_atsr(struct acpidmar_softc *, union acpidmar_entry *);
300 void acpiivrs_init(struct acpidmar_softc *, struct acpi_ivrs *);
301 
302 void *acpidmar_intr_establish(void *, int, int (*)(void *), void *,
303     const char *);
304 
305 void iommu_write_4(struct iommu_softc *, int, uint32_t);
306 uint32_t iommu_read_4(struct iommu_softc *, int);
307 void iommu_write_8(struct iommu_softc *, int, uint64_t);
308 uint64_t iommu_read_8(struct iommu_softc *, int);
309 void iommu_showfault(struct iommu_softc *, int,
310     struct fault_entry *);
311 void iommu_showcfg(struct iommu_softc *, int);
312 
313 int iommu_init(struct acpidmar_softc *, struct iommu_softc *,
314     struct acpidmar_drhd *);
315 int iommu_enable_translation(struct iommu_softc *, int);
316 void iommu_enable_qi(struct iommu_softc *, int);
317 void iommu_flush_cache(struct iommu_softc *, void *, size_t);
318 void *iommu_alloc_page(struct iommu_softc *, paddr_t *);
319 void iommu_flush_write_buffer(struct iommu_softc *);
320 void iommu_issue_qi(struct iommu_softc *, struct qi_entry *);
321 
322 void iommu_flush_ctx(struct iommu_softc *, int, int, int, int);
323 void iommu_flush_ctx_qi(struct iommu_softc *, int, int, int, int);
324 void iommu_flush_tlb(struct iommu_softc *, int, int);
325 void iommu_flush_tlb_qi(struct iommu_softc *, int, int);
326 
327 void iommu_set_rtaddr(struct iommu_softc *, paddr_t);
328 
329 void *iommu_alloc_hwdte(struct acpidmar_softc *, size_t, paddr_t *);
330 
331 const char *dmar_bdf(int);
332 
333 const char *
334 dmar_bdf(int sid)
335 {
336 	static char	bdf[32];
337 
338 	snprintf(bdf, sizeof(bdf), "%.4x:%.2x:%.2x.%x", 0,
339 	    sid_bus(sid), sid_dev(sid), sid_fun(sid));
340 
341 	return (bdf);
342 }
343 
344 /* busdma */
345 static int dmar_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
346     bus_size_t, int, bus_dmamap_t *);
347 static void dmar_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
348 static int dmar_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
349     struct proc *, int);
350 static int dmar_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *,
351     int);
352 static int dmar_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
353 static int dmar_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
354     bus_dma_segment_t *, int, bus_size_t, int);
355 static void dmar_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
356 static void dmar_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
357     bus_size_t, int);
358 static int dmar_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
359     bus_dma_segment_t *, int, int *, int);
360 static void dmar_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
361 static int dmar_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
362     caddr_t *, int);
363 static void dmar_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
364 static paddr_t	dmar_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t,
365     int, int);
366 
367 static void dmar_dumpseg(bus_dma_tag_t, int, bus_dma_segment_t *, const char *);
368 const char *dom_bdf(struct domain *);
369 void domain_map_check(struct domain *);
370 
371 struct pte_entry *pte_lvl(struct iommu_softc *, struct pte_entry *, vaddr_t, int, uint64_t);
372 int  ivhd_poll_events(struct iommu_softc *);
373 void ivhd_showreg(struct iommu_softc *);
374 void ivhd_showdte(struct iommu_softc *);
375 void ivhd_showcmd(struct iommu_softc *);
376 
377 static inline int
378 debugme(struct domain *dom)
379 {
380 	return 0;
381 	return (dom->flag & DOM_DEBUG);
382 }
383 
384 void
385 domain_map_check(struct domain *dom)
386 {
387 	struct iommu_softc *iommu;
388 	struct domain_dev *dd;
389 	struct context_entry *ctx;
390 	int v;
391 
392 	iommu = dom->iommu;
393 	TAILQ_FOREACH(dd, &dom->devices, link) {
394 		acpidmar_pci_attach(acpidmar_sc, iommu->segment, dd->sid, 1);
395 
396 		if (iommu->dte)
397 			continue;
398 
399 		/* Check if this is the first time we are mapped */
400 		ctx = &iommu->ctx[sid_bus(dd->sid)][sid_devfn(dd->sid)];
401 		v = context_user(ctx);
402 		if (v != 0xA) {
403 			printf("  map: %.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n",
404 			    iommu->segment,
405 			    sid_bus(dd->sid),
406 			    sid_dev(dd->sid),
407 			    sid_fun(dd->sid),
408 			    iommu->id,
409 			    dom->did);
410 			context_set_user(ctx, 0xA);
411 		}
412 	}
413 }
414 
415 /* Map a single page as passthrough - used for DRM */
416 void
417 dmar_ptmap(bus_dma_tag_t tag, bus_addr_t addr)
418 {
419 	struct domain *dom = tag->_cookie;
420 
421 	if (!acpidmar_sc)
422 		return;
423 	domain_map_check(dom);
424 	domain_map_page(dom, addr, addr, PTE_P | PTE_R | PTE_W);
425 }
426 
427 /* Map a range of pages 1:1 */
428 void
429 domain_map_pthru(struct domain *dom, paddr_t start, paddr_t end)
430 {
431 	domain_map_check(dom);
432 	while (start < end) {
433 		domain_map_page(dom, start, start, PTE_P | PTE_R | PTE_W);
434 		start += VTD_PAGE_SIZE;
435 	}
436 }
437 
438 /* Map a single paddr to IOMMU paddr */
439 void
440 domain_map_page_intel(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags)
441 {
442 	paddr_t paddr;
443 	struct pte_entry *pte, *npte;
444 	int lvl, idx;
445 	struct iommu_softc *iommu;
446 
447 	iommu = dom->iommu;
448 	/* Insert physical address into virtual address map
449 	 * XXX: could we use private pmap here?
450 	 * essentially doing a pmap_enter(map, va, pa, prot);
451 	 */
452 
453 	/* Only handle 4k pages for now */
454 	npte = dom->pte;
455 	for (lvl = iommu->agaw - VTD_STRIDE_SIZE; lvl>= VTD_LEVEL0;
456 	    lvl -= VTD_STRIDE_SIZE) {
457 		idx = (va >> lvl) & VTD_STRIDE_MASK;
458 		pte = &npte[idx];
459 		if (lvl == VTD_LEVEL0) {
460 			/* Level 1: Page Table - add physical address */
461 			pte->val = pa | flags;
462 			iommu_flush_cache(iommu, pte, sizeof(*pte));
463 			break;
464 		} else if (!(pte->val & PTE_P)) {
465 			/* Level N: Point to lower level table */
466 			iommu_alloc_page(iommu, &paddr);
467 			pte->val = paddr | PTE_P | PTE_R | PTE_W;
468 			iommu_flush_cache(iommu, pte, sizeof(*pte));
469 		}
470 		npte = (void *)PMAP_DIRECT_MAP((pte->val & VTD_PTE_MASK));
471 	}
472 }
473 
474 /* Map a single paddr to IOMMU paddr: AMD
475  * physical address breakdown into levels:
476  * xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
477  *        5.55555555.44444444.43333333,33222222.22211111.1111----.--------
478  * mode:
479  *  000 = none   shift
480  *  001 = 1 [21].12
481  *  010 = 2 [30].21
482  *  011 = 3 [39].30
483  *  100 = 4 [48].39
484  *  101 = 5 [57]
485  *  110 = 6
486  *  111 = reserved
487  */
488 struct pte_entry *
489 pte_lvl(struct iommu_softc *iommu, struct pte_entry *pte, vaddr_t va,
490 	int shift, uint64_t flags)
491 {
492 	paddr_t paddr;
493 	int idx;
494 
495 	idx = (va >> shift) & VTD_STRIDE_MASK;
496 	if (!(pte[idx].val & PTE_P)) {
497 		/* Page Table entry is not present... create a new page entry */
498 		iommu_alloc_page(iommu, &paddr);
499 		pte[idx].val = paddr | flags;
500 		iommu_flush_cache(iommu, &pte[idx], sizeof(pte[idx]));
501 	}
502 	return (void *)PMAP_DIRECT_MAP((pte[idx].val & PTE_PADDR_MASK));
503 }
504 
505 void
506 domain_map_page_amd(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags)
507 {
508 	struct pte_entry *pte;
509 	struct iommu_softc *iommu;
510 	int idx;
511 
512 	iommu = dom->iommu;
513 	/* Insert physical address into virtual address map
514 	 * XXX: could we use private pmap here?
515 	 * essentially doing a pmap_enter(map, va, pa, prot);
516 	 */
517 
518 	/* Always assume AMD levels=4                           */
519 	/*        39        30        21        12              */
520 	/* ---------|---------|---------|---------|------------ */
521 	pte = dom->pte;
522 	pte = pte_lvl(iommu, pte, va, 30, PTE_NXTLVL(2) | PTE_IR | PTE_IW | PTE_P);
523 	pte = pte_lvl(iommu, pte, va, 21, PTE_NXTLVL(1) | PTE_IR | PTE_IW | PTE_P);
524 	if (flags)
525 		flags = PTE_P | PTE_R | PTE_W | PTE_IW | PTE_IR | PTE_NXTLVL(0);
526 
527 	/* Level 1: Page Table - add physical address */
528 	idx = (va >> 12) & 0x1FF;
529 	pte[idx].val = pa | flags;
530 
531 	iommu_flush_cache(iommu, pte, sizeof(*pte));
532 }
533 
534 static void
535 dmar_dumpseg(bus_dma_tag_t tag, int nseg, bus_dma_segment_t *segs,
536     const char *lbl)
537 {
538 	struct domain *dom = tag->_cookie;
539 	int i;
540 
541 	return;
542 	if (!debugme(dom))
543 		return;
544 	printf("%s: %s\n", lbl, dom_bdf(dom));
545 	for (i = 0; i < nseg; i++) {
546 		printf("  %.16llx %.8x\n",
547 		    (uint64_t)segs[i].ds_addr,
548 		    (uint32_t)segs[i].ds_len);
549 	}
550 }
551 
552 /* Unload mapping */
553 void
554 domain_unload_map(struct domain *dom, bus_dmamap_t dmam)
555 {
556 	bus_dma_segment_t	*seg;
557 	paddr_t			base, end, idx;
558 	psize_t			alen;
559 	int			i;
560 
561 	if (iommu_bad(dom->iommu)) {
562 		printf("unload map no iommu\n");
563 		return;
564 	}
565 
566 	for (i = 0; i < dmam->dm_nsegs; i++) {
567 		seg = &dmam->dm_segs[i];
568 
569 		base = trunc_page(seg->ds_addr);
570 		end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE);
571 		alen = end - base;
572 
573 		if (debugme(dom)) {
574 			printf("  va:%.16llx len:%x\n",
575 			    (uint64_t)base, (uint32_t)alen);
576 		}
577 
578 		/* Clear PTE */
579 		for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE)
580 			domain_map_page(dom, base + idx, 0, 0);
581 
582 		if (dom->flag & DOM_NOMAP) {
583 			printf("%s: nomap %.16llx\n", dom_bdf(dom), (uint64_t)base);
584 			continue;
585 		}
586 
587 		mtx_enter(&dom->exlck);
588 		if (extent_free(dom->iovamap, base, alen, EX_NOWAIT)) {
589 			panic("domain_unload_map: extent_free");
590 		}
591 		mtx_leave(&dom->exlck);
592 	}
593 }
594 
595 /* map.segs[x].ds_addr is modified to IOMMU virtual PA */
596 void
597 domain_load_map(struct domain *dom, bus_dmamap_t map, int flags, int pteflag, const char *fn)
598 {
599 	bus_dma_segment_t	*seg;
600 	struct iommu_softc	*iommu;
601 	paddr_t			base, end, idx;
602 	psize_t			alen;
603 	u_long			res;
604 	int			i;
605 
606 	iommu = dom->iommu;
607 	if (!iommu_enabled(iommu)) {
608 		/* Lazy enable translation when required */
609 		if (iommu_enable_translation(iommu, 1)) {
610 			return;
611 		}
612 	}
613 	domain_map_check(dom);
614 	for (i = 0; i < map->dm_nsegs; i++) {
615 		seg = &map->dm_segs[i];
616 
617 		base = trunc_page(seg->ds_addr);
618 		end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE);
619 		alen = end - base;
620 		res = base;
621 
622 		if (dom->flag & DOM_NOMAP) {
623 			goto nomap;
624 		}
625 
626 		/* Allocate DMA Virtual Address */
627 		mtx_enter(&dom->exlck);
628 		if (extent_alloc(dom->iovamap, alen, VTD_PAGE_SIZE, 0,
629 		    map->_dm_boundary, EX_NOWAIT, &res)) {
630 			panic("domain_load_map: extent_alloc");
631 		}
632 		if (res == -1) {
633 			panic("got -1 address\n");
634 		}
635 		mtx_leave(&dom->exlck);
636 
637 		/* Reassign DMA address */
638 		seg->ds_addr = res | (seg->ds_addr & VTD_PAGE_MASK);
639 nomap:
640 		if (debugme(dom)) {
641 			printf("  LOADMAP: %.16llx %x => %.16llx\n",
642 			    (uint64_t)seg->ds_addr, (uint32_t)seg->ds_len,
643 			    (uint64_t)res);
644 		}
645 		for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE) {
646 			domain_map_page(dom, res + idx, base + idx,
647 			    PTE_P | pteflag);
648 		}
649 	}
650 	if ((iommu->cap & CAP_CM) || acpidmar_force_cm) {
651 		iommu_flush_tlb(iommu, IOTLB_DOMAIN, dom->did);
652 	} else {
653 		iommu_flush_write_buffer(iommu);
654 	}
655 }
656 
657 const char *
658 dom_bdf(struct domain *dom)
659 {
660 	struct domain_dev *dd;
661 	static char		mmm[48];
662 
663 	dd = TAILQ_FIRST(&dom->devices);
664 	snprintf(mmm, sizeof(mmm), "%s iommu:%d did:%.4x%s",
665 	    dmar_bdf(dd->sid), dom->iommu->id, dom->did,
666 	    dom->did == DID_UNITY ? " [unity]" : "");
667 	return (mmm);
668 }
669 
670 /* Bus DMA Map functions */
671 static int
672 dmar_dmamap_create(bus_dma_tag_t tag, bus_size_t size, int nsegments,
673     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
674 {
675 	int rc;
676 
677 	rc = _bus_dmamap_create(tag, size, nsegments, maxsegsz, boundary,
678 	    flags, dmamp);
679 	if (!rc) {
680 		dmar_dumpseg(tag, (*dmamp)->dm_nsegs, (*dmamp)->dm_segs,
681 		    __FUNCTION__);
682 	}
683 	return (rc);
684 }
685 
686 static void
687 dmar_dmamap_destroy(bus_dma_tag_t tag, bus_dmamap_t dmam)
688 {
689 	dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__);
690 	_bus_dmamap_destroy(tag, dmam);
691 }
692 
693 static int
694 dmar_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t dmam, void *buf,
695     bus_size_t buflen, struct proc *p, int flags)
696 {
697 	struct domain *dom = tag->_cookie;
698 	int		rc;
699 
700 	rc = _bus_dmamap_load(tag, dmam, buf, buflen, p, flags);
701 	if (!rc) {
702 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
703 		    __FUNCTION__);
704 		domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__);
705 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
706 		    __FUNCTION__);
707 	}
708 	return (rc);
709 }
710 
711 static int
712 dmar_dmamap_load_mbuf(bus_dma_tag_t tag, bus_dmamap_t dmam, struct mbuf *chain,
713     int flags)
714 {
715 	struct domain	*dom = tag->_cookie;
716 	int		rc;
717 
718 	rc = _bus_dmamap_load_mbuf(tag, dmam, chain, flags);
719 	if (!rc) {
720 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
721 		    __FUNCTION__);
722 		domain_load_map(dom, dmam, flags, PTE_R|PTE_W,__FUNCTION__);
723 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
724 		    __FUNCTION__);
725 	}
726 	return (rc);
727 }
728 
729 static int
730 dmar_dmamap_load_uio(bus_dma_tag_t tag, bus_dmamap_t dmam, struct uio *uio,
731     int flags)
732 {
733 	struct domain	*dom = tag->_cookie;
734 	int		rc;
735 
736 	rc = _bus_dmamap_load_uio(tag, dmam, uio, flags);
737 	if (!rc) {
738 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
739 		    __FUNCTION__);
740 		domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__);
741 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
742 		    __FUNCTION__);
743 	}
744 	return (rc);
745 }
746 
747 static int
748 dmar_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t dmam,
749     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
750 {
751 	struct domain *dom = tag->_cookie;
752 	int rc;
753 
754 	rc = _bus_dmamap_load_raw(tag, dmam, segs, nsegs, size, flags);
755 	if (!rc) {
756 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
757 		    __FUNCTION__);
758 		domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__);
759 		dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs,
760 		    __FUNCTION__);
761 	}
762 	return (rc);
763 }
764 
765 static void
766 dmar_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t dmam)
767 {
768 	struct domain *dom = tag->_cookie;
769 
770 	dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__);
771 	domain_unload_map(dom, dmam);
772 	_bus_dmamap_unload(tag, dmam);
773 }
774 
775 static void
776 dmar_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset,
777     bus_size_t len, int ops)
778 {
779 #if 0
780 	struct domain *dom = tag->_cookie;
781 	int		flag;
782 
783 	flag = PTE_P;
784 	if (ops == BUS_DMASYNC_PREREAD) {
785 		/* make readable */
786 		flag |= PTE_R;
787 	}
788 	else if (ops == BUS_DMASYNC_PREWRITE) {
789 		/* make writeable */
790 		flag |= PTE_W;
791 	}
792 	dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__);
793 #endif
794 	_bus_dmamap_sync(tag, dmam, offset, len, ops);
795 }
796 
797 static int
798 dmar_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment,
799     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
800     int flags)
801 {
802 	int rc;
803 
804 	rc = _bus_dmamem_alloc(tag, size, alignment, boundary, segs, nsegs,
805 	    rsegs, flags);
806 	if (!rc) {
807 		dmar_dumpseg(tag, *rsegs, segs, __FUNCTION__);
808 	}
809 	return (rc);
810 }
811 
812 static void
813 dmar_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs)
814 {
815 	dmar_dumpseg(tag, nsegs, segs, __FUNCTION__);
816 	_bus_dmamem_free(tag, segs, nsegs);
817 }
818 
819 static int
820 dmar_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
821     size_t size, caddr_t *kvap, int flags)
822 {
823 	dmar_dumpseg(tag, nsegs, segs, __FUNCTION__);
824 	return (_bus_dmamem_map(tag, segs, nsegs, size, kvap, flags));
825 }
826 
827 static void
828 dmar_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size)
829 {
830 	struct domain	*dom = tag->_cookie;
831 
832 	if (debugme(dom)) {
833 		printf("dmamap_unmap: %s\n", dom_bdf(dom));
834 	}
835 	_bus_dmamem_unmap(tag, kva, size);
836 }
837 
838 static paddr_t
839 dmar_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
840     off_t off, int prot, int flags)
841 {
842 	dmar_dumpseg(tag, nsegs, segs, __FUNCTION__);
843 	return (_bus_dmamem_mmap(tag, segs, nsegs, off, prot, flags));
844 }
845 
846 /*===================================
847  * IOMMU code
848  *===================================*/
849 
850 /* Intel: Set Context Root Address */
851 void
852 iommu_set_rtaddr(struct iommu_softc *iommu, paddr_t paddr)
853 {
854 	int i, sts;
855 
856 	mtx_enter(&iommu->reg_lock);
857 	iommu_write_8(iommu, DMAR_RTADDR_REG, paddr);
858 	iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd | GCMD_SRTP);
859 	for (i = 0; i < 5; i++) {
860 		sts = iommu_read_4(iommu, DMAR_GSTS_REG);
861 		if (sts & GSTS_RTPS)
862 			break;
863 	}
864 	mtx_leave(&iommu->reg_lock);
865 
866 	if (i == 5) {
867 		printf("set_rtaddr fails\n");
868 	}
869 }
870 
871 /* Allocate contiguous memory (1Mb) for the Device Table Entries */
872 void *
873 iommu_alloc_hwdte(struct acpidmar_softc *sc, size_t size, paddr_t *paddr)
874 {
875 	caddr_t vaddr;
876 	bus_dmamap_t map;
877 	bus_dma_segment_t seg;
878 	bus_dma_tag_t dmat = sc->sc_dmat;
879 	int rc, nsegs;
880 
881 	rc = _bus_dmamap_create(dmat, size, 1, size, 0,
882 	    BUS_DMA_NOWAIT, &map);
883 	if (rc != 0) {
884 		printf("hwdte_create fails\n");
885 		return NULL;
886 	}
887 	rc = _bus_dmamem_alloc(dmat, size, 4, 0, &seg, 1,
888 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
889 	if (rc != 0) {
890 		printf("hwdte alloc fails\n");
891 		return NULL;
892 	}
893 	rc = _bus_dmamem_map(dmat, &seg, 1, size, &vaddr,
894 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
895 	if (rc != 0) {
896 		printf("hwdte map fails\n");
897 		return NULL;
898 	}
899 	rc = _bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_NOWAIT);
900 	if (rc != 0) {
901 		printf("hwdte load raw fails\n");
902 		return NULL;
903 	}
904 	*paddr = map->dm_segs[0].ds_addr;
905 	return vaddr;
906 }
907 
908 /* COMMON: Allocate a new memory page */
909 void *
910 iommu_alloc_page(struct iommu_softc *iommu, paddr_t *paddr)
911 {
912 	void	*va;
913 
914 	*paddr = 0;
915 	va = km_alloc(VTD_PAGE_SIZE, &kv_page, &kp_zero, &kd_nowait);
916 	if (va == NULL) {
917 		panic("can't allocate page\n");
918 	}
919 	pmap_extract(pmap_kernel(), (vaddr_t)va, paddr);
920 	return (va);
921 }
922 
923 
924 /* Intel: Issue command via queued invalidation */
925 void
926 iommu_issue_qi(struct iommu_softc *iommu, struct qi_entry *qi)
927 {
928 #if 0
929 	struct qi_entry *pi, *pw;
930 
931 	idx = iommu->qi_head;
932 	pi = &iommu->qi[idx];
933 	pw = &iommu->qi[(idx+1) % MAXQ];
934 	iommu->qi_head = (idx+2) % MAXQ;
935 
936 	memcpy(pw, &qi, sizeof(qi));
937 	issue command;
938 	while (pw->xxx)
939 		;
940 #endif
941 }
942 
943 /* Intel: Flush TLB entries, Queued Invalidation mode */
944 void
945 iommu_flush_tlb_qi(struct iommu_softc *iommu, int mode, int did)
946 {
947 	struct qi_entry qi;
948 
949 	/* Use queued invalidation */
950 	qi.hi = 0;
951 	switch (mode) {
952 	case IOTLB_GLOBAL:
953 		qi.lo = QI_IOTLB | QI_IOTLB_IG_GLOBAL;
954 		break;
955 	case IOTLB_DOMAIN:
956 		qi.lo = QI_IOTLB | QI_IOTLB_IG_DOMAIN |
957 		    QI_IOTLB_DID(did);
958 		break;
959 	case IOTLB_PAGE:
960 		qi.lo = QI_IOTLB | QI_IOTLB_IG_PAGE | QI_IOTLB_DID(did);
961 		qi.hi = 0;
962 		break;
963 	}
964 	if (iommu->cap & CAP_DRD)
965 		qi.lo |= QI_IOTLB_DR;
966 	if (iommu->cap & CAP_DWD)
967 		qi.lo |= QI_IOTLB_DW;
968 	iommu_issue_qi(iommu, &qi);
969 }
970 
971 /* Intel: Flush Context entries, Queued Invalidation mode */
972 void
973 iommu_flush_ctx_qi(struct iommu_softc *iommu, int mode, int did,
974     int sid, int fm)
975 {
976 	struct qi_entry qi;
977 
978 	/* Use queued invalidation */
979 	qi.hi = 0;
980 	switch (mode) {
981 	case CTX_GLOBAL:
982 		qi.lo = QI_CTX | QI_CTX_IG_GLOBAL;
983 		break;
984 	case CTX_DOMAIN:
985 		qi.lo = QI_CTX | QI_CTX_IG_DOMAIN | QI_CTX_DID(did);
986 		break;
987 	case CTX_DEVICE:
988 		qi.lo = QI_CTX | QI_CTX_IG_DEVICE | QI_CTX_DID(did) |
989 		    QI_CTX_SID(sid) | QI_CTX_FM(fm);
990 		break;
991 	}
992 	iommu_issue_qi(iommu, &qi);
993 }
994 
995 /* Intel: Flush write buffers */
996 void
997 iommu_flush_write_buffer(struct iommu_softc *iommu)
998 {
999 	int i, sts;
1000 
1001 	if (iommu->dte)
1002 		return;
1003 	if (!(iommu->cap & CAP_RWBF))
1004 		return;
1005 	DPRINTF(1,"writebuf\n");
1006 	iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd | GCMD_WBF);
1007 	for (i = 0; i < 5; i++) {
1008 		sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1009 		if (sts & GSTS_WBFS)
1010 			break;
1011 		delay(10000);
1012 	}
1013 	if (i == 5) {
1014 		printf("write buffer flush fails\n");
1015 	}
1016 }
1017 
1018 void
1019 iommu_flush_cache(struct iommu_softc *iommu, void *addr, size_t size)
1020 {
1021 	if (iommu->dte) {
1022 		pmap_flush_cache((vaddr_t)addr, size);
1023 		return;
1024 	}
1025 	if (!(iommu->ecap & ECAP_C))
1026 		pmap_flush_cache((vaddr_t)addr, size);
1027 }
1028 
1029 /*
1030  * Intel: Flush IOMMU TLB Entries
1031  * Flushing can occur globally, per domain or per page
1032  */
1033 void
1034 iommu_flush_tlb(struct iommu_softc *iommu, int mode, int did)
1035 {
1036 	int		n;
1037 	uint64_t	val;
1038 
1039 	/* Call AMD */
1040 	if (iommu->dte) {
1041 		ivhd_invalidate_domain(iommu, did);
1042 		return;
1043 	}
1044 	val = IOTLB_IVT;
1045 	switch (mode) {
1046 	case IOTLB_GLOBAL:
1047 		val |= IIG_GLOBAL;
1048 		break;
1049 	case IOTLB_DOMAIN:
1050 		val |= IIG_DOMAIN | IOTLB_DID(did);
1051 		break;
1052 	case IOTLB_PAGE:
1053 		val |= IIG_PAGE | IOTLB_DID(did);
1054 		break;
1055 	}
1056 
1057 	/* Check for Read/Write Drain */
1058 	if (iommu->cap & CAP_DRD)
1059 		val |= IOTLB_DR;
1060 	if (iommu->cap & CAP_DWD)
1061 		val |= IOTLB_DW;
1062 
1063 	mtx_enter(&iommu->reg_lock);
1064 
1065 	iommu_write_8(iommu, DMAR_IOTLB_REG(iommu), val);
1066 	n = 0;
1067 	do {
1068 		val = iommu_read_8(iommu, DMAR_IOTLB_REG(iommu));
1069 	} while (n++ < 5 && val & IOTLB_IVT);
1070 
1071 	mtx_leave(&iommu->reg_lock);
1072 }
1073 
1074 /* Intel: Flush IOMMU settings
1075  * Flushes can occur globally, per domain, or per device
1076  */
1077 void
1078 iommu_flush_ctx(struct iommu_softc *iommu, int mode, int did, int sid, int fm)
1079 {
1080 	uint64_t	val;
1081 	int		n;
1082 
1083 	if (iommu->dte)
1084 		return;
1085 	val = CCMD_ICC;
1086 	switch (mode) {
1087 	case CTX_GLOBAL:
1088 		val |= CIG_GLOBAL;
1089 		break;
1090 	case CTX_DOMAIN:
1091 		val |= CIG_DOMAIN | CCMD_DID(did);
1092 		break;
1093 	case CTX_DEVICE:
1094 		val |= CIG_DEVICE | CCMD_DID(did) |
1095 		    CCMD_SID(sid) | CCMD_FM(fm);
1096 		break;
1097 	}
1098 
1099 	mtx_enter(&iommu->reg_lock);
1100 
1101 	n = 0;
1102 	iommu_write_8(iommu, DMAR_CCMD_REG, val);
1103 	do {
1104 		val = iommu_read_8(iommu, DMAR_CCMD_REG);
1105 	} while (n++ < 5 && val & CCMD_ICC);
1106 
1107 	mtx_leave(&iommu->reg_lock);
1108 }
1109 
1110 /* Intel: Enable Queued Invalidation */
1111 void
1112 iommu_enable_qi(struct iommu_softc *iommu, int enable)
1113 {
1114 	int	n = 0;
1115 	int	sts;
1116 
1117 	if (!(iommu->ecap & ECAP_QI))
1118 		return;
1119 
1120 	if (enable) {
1121 		iommu->gcmd |= GCMD_QIE;
1122 
1123 		mtx_enter(&iommu->reg_lock);
1124 
1125 		iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd);
1126 		do {
1127 			sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1128 		} while (n++ < 5 && !(sts & GSTS_QIES));
1129 
1130 		mtx_leave(&iommu->reg_lock);
1131 
1132 		DPRINTF(1,"set.qie: %d\n", n);
1133 	} else {
1134 		iommu->gcmd &= ~GCMD_QIE;
1135 
1136 		mtx_enter(&iommu->reg_lock);
1137 
1138 		iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd);
1139 		do {
1140 			sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1141 		} while (n++ < 5 && sts & GSTS_QIES);
1142 
1143 		mtx_leave(&iommu->reg_lock);
1144 
1145 		DPRINTF(1,"clr.qie: %d\n", n);
1146 	}
1147 }
1148 
1149 /* Intel: Enable IOMMU translation */
1150 int
1151 iommu_enable_translation(struct iommu_softc *iommu, int enable)
1152 {
1153 	uint32_t	sts;
1154 	uint64_t	reg;
1155 	int		n = 0;
1156 
1157 	if (iommu->dte)
1158 		return (0);
1159 	reg = 0;
1160 	if (enable) {
1161 		DPRINTF(0,"enable iommu %d\n", iommu->id);
1162 		iommu_showcfg(iommu, -1);
1163 
1164 		iommu->gcmd |= GCMD_TE;
1165 
1166 		/* Enable translation */
1167 		printf(" pre tes: ");
1168 
1169 		mtx_enter(&iommu->reg_lock);
1170 		iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd);
1171 		printf("xxx");
1172 		do {
1173 			printf("yyy");
1174 			sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1175 			delay(n * 10000);
1176 		} while (n++ < 5 && !(sts & GSTS_TES));
1177 		mtx_leave(&iommu->reg_lock);
1178 
1179 		printf(" set.tes: %d\n", n);
1180 
1181 		if (n >= 5) {
1182 			printf("error.. unable to initialize iommu %d\n",
1183 			    iommu->id);
1184 			iommu->flags |= IOMMU_FLAGS_BAD;
1185 
1186 			/* Disable IOMMU */
1187 			iommu->gcmd &= ~GCMD_TE;
1188 			mtx_enter(&iommu->reg_lock);
1189 			iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd);
1190 			mtx_leave(&iommu->reg_lock);
1191 
1192 			return (1);
1193 		}
1194 
1195 		iommu_flush_ctx(iommu, CTX_GLOBAL, 0, 0, 0);
1196 		iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0);
1197 	} else {
1198 		iommu->gcmd &= ~GCMD_TE;
1199 
1200 		mtx_enter(&iommu->reg_lock);
1201 
1202 		iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd);
1203 		do {
1204 			sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1205 		} while (n++ < 5 && sts & GSTS_TES);
1206 		mtx_leave(&iommu->reg_lock);
1207 
1208 		printf(" clr.tes: %d\n", n);
1209 	}
1210 
1211 	return (0);
1212 }
1213 
1214 /* Intel: Initialize IOMMU */
1215 int
1216 iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu,
1217     struct acpidmar_drhd *dh)
1218 {
1219 	static int	niommu;
1220 	int		len = VTD_PAGE_SIZE;
1221 	int		i, gaw;
1222 	uint32_t	sts;
1223 	paddr_t		paddr;
1224 
1225 	if (_bus_space_map(sc->sc_memt, dh->address, len, 0, &iommu->ioh) != 0) {
1226 		return (-1);
1227 	}
1228 
1229 	TAILQ_INIT(&iommu->domains);
1230 	iommu->id = ++niommu;
1231 	iommu->flags = dh->flags;
1232 	iommu->segment = dh->segment;
1233 	iommu->iot = sc->sc_memt;
1234 
1235 	iommu->cap = iommu_read_8(iommu, DMAR_CAP_REG);
1236 	iommu->ecap = iommu_read_8(iommu, DMAR_ECAP_REG);
1237 	iommu->ndoms = cap_nd(iommu->cap);
1238 
1239 	/* Print Capabilities & Extended Capabilities */
1240 	DPRINTF(0, "  caps: %s%s%s%s%s%s%s%s%s%s%s\n",
1241 	    iommu->cap & CAP_AFL ? "afl " : "",		/* adv fault */
1242 	    iommu->cap & CAP_RWBF ? "rwbf " : "",	/* write-buffer flush */
1243 	    iommu->cap & CAP_PLMR ? "plmr " : "",	/* protected lo region */
1244 	    iommu->cap & CAP_PHMR ? "phmr " : "",	/* protected hi region */
1245 	    iommu->cap & CAP_CM ? "cm " : "",		/* caching mode */
1246 	    iommu->cap & CAP_ZLR ? "zlr " : "",		/* zero-length read */
1247 	    iommu->cap & CAP_PSI ? "psi " : "",		/* page invalidate */
1248 	    iommu->cap & CAP_DWD ? "dwd " : "",		/* write drain */
1249 	    iommu->cap & CAP_DRD ? "drd " : "",		/* read drain */
1250 	    iommu->cap & CAP_FL1GP ? "Gb " : "",	/* 1Gb pages */
1251 	    iommu->cap & CAP_PI ? "pi " : "");		/* posted interrupts */
1252 	DPRINTF(0, "  ecap: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
1253 	    iommu->ecap & ECAP_C ? "c " : "",		/* coherent */
1254 	    iommu->ecap & ECAP_QI ? "qi " : "",		/* queued invalidate */
1255 	    iommu->ecap & ECAP_DT ? "dt " : "",		/* device iotlb */
1256 	    iommu->ecap & ECAP_IR ? "ir " : "",		/* intr remap */
1257 	    iommu->ecap & ECAP_EIM ? "eim " : "",	/* x2apic */
1258 	    iommu->ecap & ECAP_PT ? "pt " : "",		/* passthrough */
1259 	    iommu->ecap & ECAP_SC ? "sc " : "",		/* snoop control */
1260 	    iommu->ecap & ECAP_ECS ? "ecs " : "",	/* extended context */
1261 	    iommu->ecap & ECAP_MTS ? "mts " : "",	/* memory type */
1262 	    iommu->ecap & ECAP_NEST ? "nest " : "",	/* nested translations */
1263 	    iommu->ecap & ECAP_DIS ? "dis " : "",	/* deferred invalidation */
1264 	    iommu->ecap & ECAP_PASID ? "pas " : "",	/* pasid */
1265 	    iommu->ecap & ECAP_PRS ? "prs " : "",	/* page request */
1266 	    iommu->ecap & ECAP_ERS ? "ers " : "",	/* execute request */
1267 	    iommu->ecap & ECAP_SRS ? "srs " : "",	/* supervisor request */
1268 	    iommu->ecap & ECAP_NWFS ? "nwfs " : "",	/* no write flag */
1269 	    iommu->ecap & ECAP_EAFS ? "eafs " : "");	/* extended accessed flag */
1270 
1271 	mtx_init(&iommu->reg_lock, IPL_HIGH);
1272 
1273 	/* Clear Interrupt Masking */
1274 	iommu_write_4(iommu, DMAR_FSTS_REG, FSTS_PFO | FSTS_PPF);
1275 
1276 	iommu->intr = acpidmar_intr_establish(iommu, IPL_HIGH,
1277 	    acpidmar_intr, iommu, "dmarintr");
1278 
1279 	/* Enable interrupts */
1280 	sts = iommu_read_4(iommu, DMAR_FECTL_REG);
1281 	iommu_write_4(iommu, DMAR_FECTL_REG, sts & ~FECTL_IM);
1282 
1283 	/* Allocate root pointer */
1284 	iommu->root = iommu_alloc_page(iommu, &paddr);
1285 	DPRINTF(0, "Allocated root pointer: pa:%.16llx va:%p\n",
1286 	    (uint64_t)paddr, iommu->root);
1287 	iommu->rtaddr = paddr;
1288 	iommu_flush_write_buffer(iommu);
1289 	iommu_set_rtaddr(iommu, paddr);
1290 
1291 #if 0
1292 	if (iommu->ecap & ECAP_QI) {
1293 		/* Queued Invalidation support */
1294 		iommu->qi = iommu_alloc_page(iommu, &iommu->qip);
1295 		iommu_write_8(iommu, DMAR_IQT_REG, 0);
1296 		iommu_write_8(iommu, DMAR_IQA_REG, iommu->qip | IQA_QS_256);
1297 	}
1298 	if (iommu->ecap & ECAP_IR) {
1299 		/* Interrupt remapping support */
1300 		iommu_write_8(iommu, DMAR_IRTA_REG, 0);
1301 	}
1302 #endif
1303 
1304 	/* Calculate guest address width and supported guest widths */
1305 	gaw = -1;
1306 	iommu->mgaw = cap_mgaw(iommu->cap);
1307 	DPRINTF(0, "gaw: %d { ", iommu->mgaw);
1308 	for (i = 0; i < 5; i++) {
1309 		if (cap_sagaw(iommu->cap) & (1L << i)) {
1310 			gaw = VTD_LEVELTOAW(i);
1311 			DPRINTF(0, "%d ", gaw);
1312 			iommu->agaw = gaw;
1313 		}
1314 	}
1315 	DPRINTF(0, "}\n");
1316 
1317 	/* Cache current status register bits */
1318 	sts = iommu_read_4(iommu, DMAR_GSTS_REG);
1319 	if (sts & GSTS_TES)
1320 		iommu->gcmd |= GCMD_TE;
1321 	if (sts & GSTS_QIES)
1322 		iommu->gcmd |= GCMD_QIE;
1323 	if (sts & GSTS_IRES)
1324 		iommu->gcmd |= GCMD_IRE;
1325 	DPRINTF(0, "gcmd: %x preset\n", iommu->gcmd);
1326 	acpidmar_intr(iommu);
1327 	return (0);
1328 }
1329 
1330 /* Read/Write IOMMU register */
1331 uint32_t
1332 iommu_read_4(struct iommu_softc *iommu, int reg)
1333 {
1334 	uint32_t	v;
1335 
1336 	v = bus_space_read_4(iommu->iot, iommu->ioh, reg);
1337 	return (v);
1338 }
1339 
1340 
1341 void
1342 iommu_write_4(struct iommu_softc *iommu, int reg, uint32_t v)
1343 {
1344 	bus_space_write_4(iommu->iot, iommu->ioh, reg, (uint32_t)v);
1345 }
1346 
1347 uint64_t
1348 iommu_read_8(struct iommu_softc *iommu, int reg)
1349 {
1350 	uint64_t	v;
1351 
1352 	v = bus_space_read_8(iommu->iot, iommu->ioh, reg);
1353 	return (v);
1354 }
1355 
1356 void
1357 iommu_write_8(struct iommu_softc *iommu, int reg, uint64_t v)
1358 {
1359 	bus_space_write_8(iommu->iot, iommu->ioh, reg, v);
1360 }
1361 
1362 /* Check if a device is within a device scope */
1363 int
1364 acpidmar_match_devscope(struct devlist_head *devlist, pci_chipset_tag_t pc,
1365     int sid)
1366 {
1367 	struct dmar_devlist	*ds;
1368 	int			sub, sec, i;
1369 	int			bus, dev, fun, sbus;
1370 	pcireg_t		reg;
1371 	pcitag_t		tag;
1372 
1373 	sbus = sid_bus(sid);
1374 	TAILQ_FOREACH(ds, devlist, link) {
1375 		bus = ds->bus;
1376 		dev = ds->dp[0].device;
1377 		fun = ds->dp[0].function;
1378 		/* Walk PCI bridges in path */
1379 		for (i = 1; i < ds->ndp; i++) {
1380 			tag = pci_make_tag(pc, bus, dev, fun);
1381 			reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
1382 			bus = PPB_BUSINFO_SECONDARY(reg);
1383 			dev = ds->dp[i].device;
1384 			fun = ds->dp[i].function;
1385 		}
1386 
1387 		/* Check for device exact match */
1388 		if (sid == mksid(bus, dev, fun)) {
1389 			return DMAR_ENDPOINT;
1390 		}
1391 
1392 		/* Check for device subtree match */
1393 		if (ds->type == DMAR_BRIDGE) {
1394 			tag = pci_make_tag(pc, bus, dev, fun);
1395 			reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
1396 			sec = PPB_BUSINFO_SECONDARY(reg);
1397 			sub = PPB_BUSINFO_SUBORDINATE(reg);
1398 			if (sec <= sbus && sbus <= sub) {
1399 				return DMAR_BRIDGE;
1400 			}
1401 		}
1402 	}
1403 
1404 	return (0);
1405 }
1406 
1407 struct domain *
1408 domain_create(struct iommu_softc *iommu, int did)
1409 {
1410 	struct domain	*dom;
1411 	int gaw;
1412 
1413 	DPRINTF(0, "iommu%d: create domain: %.4x\n", iommu->id, did);
1414 	dom = malloc(sizeof(*dom), M_DEVBUF, M_ZERO | M_WAITOK);
1415 	dom->did = did;
1416 	dom->iommu = iommu;
1417 	dom->pte = iommu_alloc_page(iommu, &dom->ptep);
1418 	TAILQ_INIT(&dom->devices);
1419 
1420 	/* Setup DMA */
1421 	dom->dmat._cookie = dom;
1422 	dom->dmat._dmamap_create = dmar_dmamap_create;		/* nop */
1423 	dom->dmat._dmamap_destroy = dmar_dmamap_destroy;	/* nop */
1424 	dom->dmat._dmamap_load = dmar_dmamap_load;		/* lm */
1425 	dom->dmat._dmamap_load_mbuf = dmar_dmamap_load_mbuf;	/* lm */
1426 	dom->dmat._dmamap_load_uio = dmar_dmamap_load_uio;	/* lm */
1427 	dom->dmat._dmamap_load_raw = dmar_dmamap_load_raw;	/* lm */
1428 	dom->dmat._dmamap_unload = dmar_dmamap_unload;		/* um */
1429 	dom->dmat._dmamap_sync = dmar_dmamap_sync;		/* lm */
1430 	dom->dmat._dmamem_alloc = dmar_dmamem_alloc;		/* nop */
1431 	dom->dmat._dmamem_free = dmar_dmamem_free;		/* nop */
1432 	dom->dmat._dmamem_map = dmar_dmamem_map;		/* nop */
1433 	dom->dmat._dmamem_unmap = dmar_dmamem_unmap;		/* nop */
1434 	dom->dmat._dmamem_mmap = dmar_dmamem_mmap;
1435 
1436 	snprintf(dom->exname, sizeof(dom->exname), "did:%x.%.4x",
1437 	    iommu->id, dom->did);
1438 
1439 	/* Setup IOMMU address map */
1440 	gaw = min(iommu->agaw, iommu->mgaw);
1441 	dom->iovamap = extent_create(dom->exname, 1024*1024*16,
1442 	    (1LL << gaw)-1,
1443 	    M_DEVBUF, NULL, 0,
1444 	    EX_WAITOK|EX_NOCOALESCE);
1445 
1446 	/* Zero out MSI Interrupt region */
1447 	extent_alloc_region(dom->iovamap, MSI_BASE_ADDRESS, MSI_BASE_SIZE,
1448 	    EX_WAITOK);
1449 	mtx_init(&dom->exlck, IPL_HIGH);
1450 
1451 	TAILQ_INSERT_TAIL(&iommu->domains, dom, link);
1452 
1453 	return dom;
1454 }
1455 
1456 void
1457 domain_add_device(struct domain *dom, int sid)
1458 {
1459 	struct domain_dev *ddev;
1460 
1461 	DPRINTF(0, "add %s to iommu%d.%.4x\n", dmar_bdf(sid), dom->iommu->id, dom->did);
1462 	ddev = malloc(sizeof(*ddev), M_DEVBUF, M_ZERO | M_WAITOK);
1463 	ddev->sid = sid;
1464 	TAILQ_INSERT_TAIL(&dom->devices, ddev, link);
1465 
1466 	/* Should set context entry here?? */
1467 }
1468 
1469 void
1470 domain_remove_device(struct domain *dom, int sid)
1471 {
1472 	struct domain_dev *ddev, *tmp;
1473 
1474 	TAILQ_FOREACH_SAFE(ddev, &dom->devices, link, tmp) {
1475 		if (ddev->sid == sid) {
1476 			TAILQ_REMOVE(&dom->devices, ddev, link);
1477 			free(ddev, sizeof(*ddev), M_DEVBUF);
1478 		}
1479 	}
1480 }
1481 
1482 /* Lookup domain by segment & source id (bus.device.function) */
1483 struct domain *
1484 domain_lookup(struct acpidmar_softc *sc, int segment, int sid)
1485 {
1486 	struct iommu_softc	*iommu;
1487 	struct domain_dev	*ddev;
1488 	struct domain		*dom;
1489 	int			rc;
1490 
1491 	if (sc == NULL) {
1492 		return NULL;
1493 	}
1494 
1495 	/* Lookup IOMMU for this device */
1496 	TAILQ_FOREACH(iommu, &sc->sc_drhds, link) {
1497 		if (iommu->segment != segment)
1498 			continue;
1499 		/* Check for devscope match or catchall iommu */
1500 		rc = acpidmar_match_devscope(&iommu->devices, sc->sc_pc, sid);
1501 		if (rc != 0 || iommu->flags) {
1502 			break;
1503 		}
1504 	}
1505 	if (!iommu) {
1506 		printf("%s: no iommu found\n", dmar_bdf(sid));
1507 		return NULL;
1508 	}
1509 
1510 	/* Search domain devices */
1511 	TAILQ_FOREACH(dom, &iommu->domains, link) {
1512 		TAILQ_FOREACH(ddev, &dom->devices, link) {
1513 			/* XXX: match all functions? */
1514 			if (ddev->sid == sid) {
1515 				return dom;
1516 			}
1517 		}
1518 	}
1519 	if (iommu->ndoms <= 2) {
1520 		/* Running out of domains.. create catchall domain */
1521 		if (!iommu->unity) {
1522 			iommu->unity = domain_create(iommu, 1);
1523 		}
1524 		dom = iommu->unity;
1525 	} else {
1526 		dom = domain_create(iommu, --iommu->ndoms);
1527 	}
1528 	if (!dom) {
1529 		printf("no domain here\n");
1530 		return NULL;
1531 	}
1532 
1533 	/* Add device to domain */
1534 	domain_add_device(dom, sid);
1535 
1536 	return dom;
1537 }
1538 
1539 /* Map Guest Pages into IOMMU */
1540 void
1541 _iommu_map(void *dom, vaddr_t va, bus_addr_t gpa, bus_size_t len)
1542 {
1543 	bus_size_t i;
1544 	paddr_t hpa;
1545 
1546 	if (dom == NULL) {
1547 		return;
1548 	}
1549 	DPRINTF(1, "Mapping dma: %lx = %lx/%lx\n", va, gpa, len);
1550 	for (i = 0; i < len; i += PAGE_SIZE) {
1551 		hpa = 0;
1552 		pmap_extract(curproc->p_vmspace->vm_map.pmap, va, &hpa);
1553 		domain_map_page(dom, gpa, hpa, PTE_P | PTE_R | PTE_W);
1554 		gpa += PAGE_SIZE;
1555 		va  += PAGE_SIZE;
1556 	}
1557 }
1558 
1559 /* Find IOMMU for a given PCI device */
1560 void
1561 *_iommu_domain(int segment, int bus, int dev, int func, int *id)
1562 {
1563 	struct domain *dom;
1564 
1565 	dom = domain_lookup(acpidmar_sc, segment, mksid(bus, dev, func));
1566 	if (dom) {
1567 		*id = dom->did;
1568 	}
1569 	return dom;
1570 }
1571 
1572 void
1573 domain_map_device(struct domain *dom, int sid);
1574 
1575 void
1576 domain_map_device(struct domain *dom, int sid)
1577 {
1578 	struct iommu_softc	*iommu;
1579 	struct context_entry	*ctx;
1580 	paddr_t			paddr;
1581 	int			bus, devfn;
1582 	int			tt, lvl;
1583 
1584 	iommu = dom->iommu;
1585 
1586 	bus = sid_bus(sid);
1587 	devfn = sid_devfn(sid);
1588 	/* AMD attach device */
1589 	if (iommu->dte) {
1590 		struct ivhd_dte *dte = &iommu->dte[sid];
1591 		if (!dte->dw0) {
1592 			/* Setup Device Table Entry: bus.devfn */
1593 			DPRINTF(1, "@@@ PCI Attach: %.4x[%s] %.4x\n", sid, dmar_bdf(sid), dom->did);
1594 			dte_set_host_page_table_root_ptr(dte, dom->ptep);
1595 			dte_set_domain(dte, dom->did);
1596 			dte_set_mode(dte, 3);  /* Set 3 level PTE */
1597 			dte_set_tv(dte);
1598 			dte_set_valid(dte);
1599 			ivhd_flush_devtab(iommu, dom->did);
1600 #ifdef IOMMU_DEBUG
1601 			//ivhd_showreg(iommu);
1602 			ivhd_showdte(iommu);
1603 #endif
1604 		}
1605 		return;
1606 	}
1607 
1608 	/* Create Bus mapping */
1609 	if (!root_entry_is_valid(&iommu->root[bus])) {
1610 		iommu->ctx[bus] = iommu_alloc_page(iommu, &paddr);
1611 		iommu->root[bus].lo = paddr | ROOT_P;
1612 		iommu_flush_cache(iommu, &iommu->root[bus],
1613 		    sizeof(struct root_entry));
1614 		DPRINTF(0, "iommu%d: Allocate context for bus: %.2x pa:%.16llx va:%p\n",
1615 		    iommu->id, bus, (uint64_t)paddr,
1616 		    iommu->ctx[bus]);
1617 	}
1618 
1619 	/* Create DevFn mapping */
1620 	ctx = iommu->ctx[bus] + devfn;
1621 	if (!context_entry_is_valid(ctx)) {
1622 		tt = CTX_T_MULTI;
1623 		lvl = VTD_AWTOLEVEL(iommu->agaw);
1624 
1625 		/* Initialize context */
1626 		context_set_slpte(ctx, dom->ptep);
1627 		context_set_translation_type(ctx, tt);
1628 		context_set_domain_id(ctx, dom->did);
1629 		context_set_address_width(ctx, lvl);
1630 		context_set_present(ctx);
1631 
1632 		/* Flush it */
1633 		iommu_flush_cache(iommu, ctx, sizeof(struct context_entry));
1634 		if ((iommu->cap & CAP_CM) || acpidmar_force_cm) {
1635 			iommu_flush_ctx(iommu, CTX_DEVICE, dom->did, sid, 0);
1636 			iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0);
1637 		} else {
1638 			iommu_flush_write_buffer(iommu);
1639 		}
1640 		DPRINTF(0, "iommu%d: %s set context ptep:%.16llx lvl:%d did:%.4x tt:%d\n",
1641 		    iommu->id, dmar_bdf(sid), (uint64_t)dom->ptep, lvl,
1642 		    dom->did, tt);
1643 	}
1644 }
1645 
1646 struct domain *
1647 acpidmar_pci_attach(struct acpidmar_softc *sc, int segment, int sid, int mapctx)
1648 {
1649 	static struct domain	*dom;
1650 
1651 	dom = domain_lookup(sc, segment, sid);
1652 	if (!dom) {
1653 		printf("no domain: %s\n", dmar_bdf(sid));
1654 		return NULL;
1655 	}
1656 
1657 	if (mapctx) {
1658 		domain_map_device(dom, sid);
1659 	}
1660 
1661 	return dom;
1662 }
1663 
1664 void
1665 acpidmar_pci_hook(pci_chipset_tag_t pc, struct pci_attach_args *pa)
1666 {
1667 	int		bus, dev, fun, sid;
1668 	struct domain	*dom;
1669 	pcireg_t	reg;
1670 
1671 	if (!acpidmar_sc) {
1672 		/* No DMAR, ignore */
1673 		return;
1674 	}
1675 
1676 	/* Add device to our list if valid */
1677 	pci_decompose_tag(pc, pa->pa_tag, &bus, &dev, &fun);
1678 	sid = mksid(bus, dev, fun);
1679 	if (sid_flag[sid] & SID_INVALID)
1680 		return;
1681 
1682 	reg = pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG);
1683 
1684 	/* Add device to domain */
1685 	dom = acpidmar_pci_attach(acpidmar_sc, pa->pa_domain, sid, 0);
1686 	if (dom == NULL)
1687 		return;
1688 
1689 	if (PCI_CLASS(reg) == PCI_CLASS_DISPLAY &&
1690 	    PCI_SUBCLASS(reg) == PCI_SUBCLASS_DISPLAY_VGA) {
1691 		dom->flag = DOM_NOMAP;
1692 	}
1693 	if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE &&
1694 	    PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_ISA) {
1695 		/* For ISA Bridges, map 0-16Mb as 1:1 */
1696 		printf("dmar: %.4x:%.2x:%.2x.%x mapping ISA\n",
1697 		    pa->pa_domain, bus, dev, fun);
1698 		domain_map_pthru(dom, 0x00, 16*1024*1024);
1699 	}
1700 
1701 	/* Change DMA tag */
1702 	pa->pa_dmat = &dom->dmat;
1703 }
1704 
1705 /* Create list of device scope entries from ACPI table */
1706 void
1707 acpidmar_parse_devscope(union acpidmar_entry *de, int off, int segment,
1708     struct devlist_head *devlist)
1709 {
1710 	struct acpidmar_devscope	*ds;
1711 	struct dmar_devlist		*d;
1712 	int				dplen, i;
1713 
1714 	TAILQ_INIT(devlist);
1715 	while (off < de->length) {
1716 		ds = (struct acpidmar_devscope *)((unsigned char *)de + off);
1717 		off += ds->length;
1718 
1719 		/* We only care about bridges and endpoints */
1720 		if (ds->type != DMAR_ENDPOINT && ds->type != DMAR_BRIDGE)
1721 			continue;
1722 
1723 		dplen = ds->length - sizeof(*ds);
1724 		d = malloc(sizeof(*d) + dplen, M_DEVBUF, M_ZERO | M_WAITOK);
1725 		d->bus  = ds->bus;
1726 		d->type = ds->type;
1727 		d->ndp  = dplen / 2;
1728 		d->dp   = (void *)&d[1];
1729 		memcpy(d->dp, &ds[1], dplen);
1730 		TAILQ_INSERT_TAIL(devlist, d, link);
1731 
1732 		DPRINTF(1, "  %8s  %.4x:%.2x.%.2x.%x {",
1733 		    ds->type == DMAR_BRIDGE ? "bridge" : "endpoint",
1734 		    segment, ds->bus,
1735 		    d->dp[0].device,
1736 		    d->dp[0].function);
1737 
1738 		for (i = 1; i < d->ndp; i++) {
1739 			DPRINTF(1, " %2x.%x ",
1740 			    d->dp[i].device,
1741 			    d->dp[i].function);
1742 		}
1743 		DPRINTF(1, "}\n");
1744 	}
1745 }
1746 
1747 /* DMA Remapping Hardware Unit */
1748 void
1749 acpidmar_drhd(struct acpidmar_softc *sc, union acpidmar_entry *de)
1750 {
1751 	struct iommu_softc	*iommu;
1752 
1753 	printf("DRHD: segment:%.4x base:%.16llx flags:%.2x\n",
1754 	    de->drhd.segment,
1755 	    de->drhd.address,
1756 	    de->drhd.flags);
1757 	iommu = malloc(sizeof(*iommu), M_DEVBUF, M_ZERO | M_WAITOK);
1758 	acpidmar_parse_devscope(de, sizeof(de->drhd), de->drhd.segment,
1759 	    &iommu->devices);
1760 	iommu_init(sc, iommu, &de->drhd);
1761 
1762 	if (de->drhd.flags) {
1763 		/* Catchall IOMMU goes at end of list */
1764 		TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link);
1765 	} else {
1766 		TAILQ_INSERT_HEAD(&sc->sc_drhds, iommu, link);
1767 	}
1768 }
1769 
1770 /* Reserved Memory Region Reporting */
1771 void
1772 acpidmar_rmrr(struct acpidmar_softc *sc, union acpidmar_entry *de)
1773 {
1774 	struct rmrr_softc	*rmrr;
1775 	bios_memmap_t		*im, *jm;
1776 	uint64_t		start, end;
1777 
1778 	printf("RMRR: segment:%.4x range:%.16llx-%.16llx\n",
1779 	    de->rmrr.segment, de->rmrr.base, de->rmrr.limit);
1780 	if (de->rmrr.limit <= de->rmrr.base) {
1781 		printf("  buggy BIOS\n");
1782 		return;
1783 	}
1784 
1785 	rmrr = malloc(sizeof(*rmrr), M_DEVBUF, M_ZERO | M_WAITOK);
1786 	rmrr->start = trunc_page(de->rmrr.base);
1787 	rmrr->end = round_page(de->rmrr.limit);
1788 	rmrr->segment = de->rmrr.segment;
1789 	acpidmar_parse_devscope(de, sizeof(de->rmrr), de->rmrr.segment,
1790 	    &rmrr->devices);
1791 
1792 	for (im = bios_memmap; im->type != BIOS_MAP_END; im++) {
1793 		if (im->type != BIOS_MAP_RES)
1794 			continue;
1795 		/* Search for adjacent reserved regions */
1796 		start = im->addr;
1797 		end   = im->addr+im->size;
1798 		for (jm = im+1; jm->type == BIOS_MAP_RES && end == jm->addr;
1799 		    jm++) {
1800 			end = jm->addr+jm->size;
1801 		}
1802 		printf("e820: %.16llx - %.16llx\n", start, end);
1803 		if (start <= rmrr->start && rmrr->end <= end) {
1804 			/* Bah.. some buggy BIOS stomp outside RMRR */
1805 			printf("  ** inside E820 Reserved %.16llx %.16llx\n",
1806 			    start, end);
1807 			rmrr->start = trunc_page(start);
1808 			rmrr->end   = round_page(end);
1809 			break;
1810 		}
1811 	}
1812 	TAILQ_INSERT_TAIL(&sc->sc_rmrrs, rmrr, link);
1813 }
1814 
1815 /* Root Port ATS Reporting */
1816 void
1817 acpidmar_atsr(struct acpidmar_softc *sc, union acpidmar_entry *de)
1818 {
1819 	struct atsr_softc *atsr;
1820 
1821 	printf("ATSR: segment:%.4x flags:%x\n",
1822 	    de->atsr.segment,
1823 	    de->atsr.flags);
1824 
1825 	atsr = malloc(sizeof(*atsr), M_DEVBUF, M_ZERO | M_WAITOK);
1826 	atsr->flags = de->atsr.flags;
1827 	atsr->segment = de->atsr.segment;
1828 	acpidmar_parse_devscope(de, sizeof(de->atsr), de->atsr.segment,
1829 	    &atsr->devices);
1830 
1831 	TAILQ_INSERT_TAIL(&sc->sc_atsrs, atsr, link);
1832 }
1833 
1834 void
1835 acpidmar_init(struct acpidmar_softc *sc, struct acpi_dmar *dmar)
1836 {
1837 	struct rmrr_softc	*rmrr;
1838 	struct iommu_softc	*iommu;
1839 	struct domain		*dom;
1840 	struct dmar_devlist	*dl;
1841 	union acpidmar_entry	*de;
1842 	int			off, sid, rc;
1843 
1844 	domain_map_page = domain_map_page_intel;
1845 	printf(": hardware width: %d, intr_remap:%d x2apic_opt_out:%d\n",
1846 	    dmar->haw+1,
1847 	    !!(dmar->flags & 0x1),
1848 	    !!(dmar->flags & 0x2));
1849 	sc->sc_haw = dmar->haw+1;
1850 	sc->sc_flags = dmar->flags;
1851 
1852 	TAILQ_INIT(&sc->sc_drhds);
1853 	TAILQ_INIT(&sc->sc_rmrrs);
1854 	TAILQ_INIT(&sc->sc_atsrs);
1855 
1856 	off = sizeof(*dmar);
1857 	while (off < dmar->hdr.length) {
1858 		de = (union acpidmar_entry *)((unsigned char *)dmar + off);
1859 		switch (de->type) {
1860 		case DMAR_DRHD:
1861 			acpidmar_drhd(sc, de);
1862 			break;
1863 		case DMAR_RMRR:
1864 			acpidmar_rmrr(sc, de);
1865 			break;
1866 		case DMAR_ATSR:
1867 			acpidmar_atsr(sc, de);
1868 			break;
1869 		default:
1870 			printf("DMAR: unknown %x\n", de->type);
1871 			break;
1872 		}
1873 		off += de->length;
1874 	}
1875 
1876 	/* Pre-create domains for iommu devices */
1877 	TAILQ_FOREACH(iommu, &sc->sc_drhds, link) {
1878 		TAILQ_FOREACH(dl, &iommu->devices, link) {
1879 			sid = mksid(dl->bus, dl->dp[0].device,
1880 			    dl->dp[0].function);
1881 			dom = acpidmar_pci_attach(sc, iommu->segment, sid, 0);
1882 			if (dom != NULL) {
1883 				printf("%.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n",
1884 				    iommu->segment, dl->bus, dl->dp[0].device, dl->dp[0].function,
1885 				    iommu->id, dom->did);
1886 			}
1887 		}
1888 	}
1889 	/* Map passthrough pages for RMRR */
1890 	TAILQ_FOREACH(rmrr, &sc->sc_rmrrs, link) {
1891 		TAILQ_FOREACH(dl, &rmrr->devices, link) {
1892 			sid = mksid(dl->bus, dl->dp[0].device,
1893 			    dl->dp[0].function);
1894 			dom = acpidmar_pci_attach(sc, rmrr->segment, sid, 0);
1895 			if (dom != NULL) {
1896 				printf("%s map ident: %.16llx %.16llx\n",
1897 				    dom_bdf(dom), rmrr->start, rmrr->end);
1898 				domain_map_pthru(dom, rmrr->start, rmrr->end);
1899 				rc = extent_alloc_region(dom->iovamap,
1900 				    rmrr->start, rmrr->end, EX_WAITOK);
1901 			}
1902 		}
1903 	}
1904 }
1905 
1906 
1907 /*=====================================================
1908  * AMD Vi
1909  *=====================================================*/
1910 void	acpiivrs_ivhd(struct acpidmar_softc *, struct acpi_ivhd *);
1911 int	ivhd_iommu_init(struct acpidmar_softc *, struct iommu_softc *,
1912 		struct acpi_ivhd *);
1913 int	_ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *);
1914 void	ivhd_show_event(struct iommu_softc *, struct ivhd_event *evt, int);
1915 int	ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *, int);
1916 int	ivhd_invalidate_domain(struct iommu_softc *, int);
1917 void	ivhd_intr_map(struct iommu_softc *, int);
1918 void	ivhd_checkerr(struct iommu_softc *iommu);
1919 int	acpiivhd_intr(void *);
1920 
1921 int
1922 acpiivhd_intr(void *ctx)
1923 {
1924 	struct iommu_softc *iommu = ctx;
1925 
1926 	if (!iommu->dte)
1927 		return (0);
1928 	ivhd_poll_events(iommu);
1929 	return (1);
1930 }
1931 
1932 /* Setup interrupt for AMD */
1933 void
1934 ivhd_intr_map(struct iommu_softc *iommu, int devid) {
1935 	pci_intr_handle_t ih;
1936 
1937 	if (iommu->intr)
1938 		return;
1939 	ih.tag = pci_make_tag(NULL, sid_bus(devid), sid_dev(devid), sid_fun(devid));
1940 	ih.line = APIC_INT_VIA_MSG;
1941 	ih.pin = 0;
1942 	iommu->intr = pci_intr_establish(NULL, ih, IPL_NET | IPL_MPSAFE,
1943 				acpiivhd_intr, iommu, "amd_iommu");
1944 	printf("amd iommu intr: %p\n", iommu->intr);
1945 }
1946 
1947 void
1948 _dumppte(struct pte_entry *pte, int lvl, vaddr_t va)
1949 {
1950 	char *pfx[] = { "    ", "   ", "  ", " ", "" };
1951 	uint64_t i, sh;
1952 	struct pte_entry *npte;
1953 
1954 	for (i = 0; i < 512; i++) {
1955 		sh = (i << (((lvl-1) * 9) + 12));
1956 		if (pte[i].val & PTE_P) {
1957 			if (lvl > 1) {
1958 				npte = (void *)PMAP_DIRECT_MAP((pte[i].val & PTE_PADDR_MASK));
1959 				printf("%slvl%d: %.16llx nxt:%llu\n", pfx[lvl], lvl,
1960 				    pte[i].val, (pte[i].val >> 9) & 7);
1961 				_dumppte(npte, lvl-1, va | sh);
1962 			} else {
1963 				printf("%slvl%d: %.16llx <- %.16llx \n", pfx[lvl], lvl,
1964 				    pte[i].val, va | sh);
1965 			}
1966 		}
1967 	}
1968 }
1969 
1970 void
1971 ivhd_showpage(struct iommu_softc *iommu, int sid, paddr_t paddr)
1972 {
1973 	struct domain *dom;
1974 	static int show = 0;
1975 
1976 	if (show > 10)
1977 		return;
1978 	show++;
1979 	dom = acpidmar_pci_attach(acpidmar_sc, 0, sid, 0);
1980 	if (!dom)
1981 		return;
1982 	printf("DTE: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
1983 	    iommu->dte[sid].dw0,
1984 	    iommu->dte[sid].dw1,
1985 	    iommu->dte[sid].dw2,
1986 	    iommu->dte[sid].dw3,
1987 	    iommu->dte[sid].dw4,
1988 	    iommu->dte[sid].dw5,
1989 	    iommu->dte[sid].dw6,
1990 	    iommu->dte[sid].dw7);
1991 	_dumppte(dom->pte, 3, 0);
1992 }
1993 
1994 /* Display AMD IOMMU Error */
1995 void
1996 ivhd_show_event(struct iommu_softc *iommu, struct ivhd_event *evt, int head)
1997 {
1998 	int type, sid, did, flag;
1999 	uint64_t address;
2000 
2001 	/* Get Device, Domain, Address and Type of event */
2002 	sid  = __EXTRACT(evt->dw0, EVT_SID);
2003 	type = __EXTRACT(evt->dw1, EVT_TYPE);
2004 	did  = __EXTRACT(evt->dw1, EVT_DID);
2005 	flag = __EXTRACT(evt->dw1, EVT_FLAG);
2006 	address = _get64(&evt->dw2);
2007 
2008 	printf("=== IOMMU Error[%.4x]: ", head);
2009 	switch (type) {
2010 	case ILLEGAL_DEV_TABLE_ENTRY:
2011 		printf("illegal dev table entry dev=%s addr=0x%.16llx %s, %s, %s, %s\n",
2012 		    dmar_bdf(sid), address,
2013 		    evt->dw1 & EVT_TR ? "translation" : "transaction",
2014 		    evt->dw1 & EVT_RZ ? "reserved bit" : "invalid level",
2015 		    evt->dw1 & EVT_RW ? "write" : "read",
2016 		    evt->dw1 & EVT_I  ? "interrupt" : "memory");
2017 		ivhd_showdte(iommu);
2018 		break;
2019 	case IO_PAGE_FAULT:
2020 		printf("io page fault dev=%s did=0x%.4x addr=0x%.16llx\n%s, %s, %s, %s, %s, %s\n",
2021 		    dmar_bdf(sid), did, address,
2022 		    evt->dw1 & EVT_TR ? "translation" : "transaction",
2023 		    evt->dw1 & EVT_RZ ? "reserved bit" : "invalid level",
2024 		    evt->dw1 & EVT_PE ? "no perm" : "perm",
2025 		    evt->dw1 & EVT_RW ? "write" : "read",
2026 		    evt->dw1 & EVT_PR ? "present" : "not present",
2027 		    evt->dw1 & EVT_I  ? "interrupt" : "memory");
2028 		ivhd_showdte(iommu);
2029 		ivhd_showpage(iommu, sid, address);
2030 		break;
2031 	case DEV_TAB_HARDWARE_ERROR:
2032 		printf("device table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n",
2033 		    dmar_bdf(sid), address,
2034 		    evt->dw1 & EVT_TR ? "translation" : "transaction",
2035 		    evt->dw1 & EVT_RW ? "write" : "read",
2036 		    evt->dw1 & EVT_I  ? "interrupt" : "memory");
2037 		ivhd_showdte(iommu);
2038 		break;
2039 	case PAGE_TAB_HARDWARE_ERROR:
2040 		printf("page table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n",
2041 		    dmar_bdf(sid), address,
2042 		    evt->dw1 & EVT_TR ? "translation" : "transaction",
2043 		    evt->dw1 & EVT_RW ? "write" : "read",
2044 		    evt->dw1 & EVT_I  ? "interrupt" : "memory");
2045 		ivhd_showdte(iommu);
2046 		break;
2047 	case ILLEGAL_COMMAND_ERROR:
2048 		printf("illegal command addr=0x%.16llx\n", address);
2049 		ivhd_showcmd(iommu);
2050 		break;
2051 	case COMMAND_HARDWARE_ERROR:
2052 		printf("command hardware error addr=0x%.16llx flag=0x%.4x\n",
2053 		    address, flag);
2054 		ivhd_showcmd(iommu);
2055 		break;
2056 	case IOTLB_INV_TIMEOUT:
2057 		printf("iotlb invalidation timeout dev=%s address=0x%.16llx\n",
2058 		    dmar_bdf(sid), address);
2059 		break;
2060 	case INVALID_DEVICE_REQUEST:
2061 		printf("invalid device request dev=%s addr=0x%.16llx flag=0x%.4x\n",
2062 		    dmar_bdf(sid), address, flag);
2063 		break;
2064 	default:
2065 		printf("unknown type=0x%.2x\n", type);
2066 		break;
2067 	}
2068 	/* Clear old event */
2069 	evt->dw0 = 0;
2070 	evt->dw1 = 0;
2071 	evt->dw2 = 0;
2072 	evt->dw3 = 0;
2073 }
2074 
2075 /* AMD: Process IOMMU error from hardware */
2076 int
2077 ivhd_poll_events(struct iommu_softc *iommu)
2078 {
2079 	uint32_t head, tail;
2080 	int sz;
2081 
2082 	sz = sizeof(struct ivhd_event);
2083 	head = iommu_read_4(iommu, EVT_HEAD_REG);
2084 	tail = iommu_read_4(iommu, EVT_TAIL_REG);
2085 	if (head == tail) {
2086 		/* No pending events */
2087 		return (0);
2088 	}
2089 	while (head != tail) {
2090 		ivhd_show_event(iommu, iommu->evt_tbl + head, head);
2091 		head = (head + sz) % EVT_TBL_SIZE;
2092 	}
2093 	iommu_write_4(iommu, EVT_HEAD_REG, head);
2094 	return (0);
2095 }
2096 
2097 /* AMD: Issue command to IOMMU queue */
2098 int
2099 _ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd)
2100 {
2101 	u_long rf;
2102 	uint32_t head, tail, next;
2103 	int sz;
2104 
2105 	head = iommu_read_4(iommu, CMD_HEAD_REG);
2106 	sz = sizeof(*cmd);
2107 	rf = intr_disable();
2108 	tail = iommu_read_4(iommu, CMD_TAIL_REG);
2109 	next = (tail + sz) % CMD_TBL_SIZE;
2110 	if (next == head) {
2111 		printf("FULL\n");
2112 		/* Queue is full */
2113 		intr_restore(rf);
2114 		return -EBUSY;
2115 	}
2116 	memcpy(iommu->cmd_tbl + tail, cmd, sz);
2117 	iommu_write_4(iommu, CMD_TAIL_REG, next);
2118 	intr_restore(rf);
2119 	return (tail / sz);
2120 }
2121 
2122 #define IVHD_MAXDELAY 8
2123 
2124 int
2125 ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd, int wait)
2126 {
2127 	struct ivhd_command wq = { 0 };
2128 	volatile uint64_t wv __aligned(16) = 0LL;
2129 	paddr_t paddr;
2130 	int rc, i;
2131 
2132 	rc = _ivhd_issue_command(iommu, cmd);
2133 	if (rc >= 0 && wait) {
2134 		/* Wait for previous commands to complete.
2135 		 * Store address of completion variable to command */
2136 		pmap_extract(pmap_kernel(), (vaddr_t)&wv, &paddr);
2137 		wq.dw0 = (paddr & ~0xF) | 0x1;
2138 		wq.dw1 = (COMPLETION_WAIT << CMD_SHIFT) | ((paddr >> 32) & 0xFFFFF);
2139 		wq.dw2 = 0xDEADBEEF;
2140 		wq.dw3 = 0xFEEDC0DE;
2141 
2142 		rc = _ivhd_issue_command(iommu, &wq);
2143 		/* wv will change to value in dw2/dw3 when command is complete */
2144 		for (i = 0; i < IVHD_MAXDELAY && !wv; i++) {
2145 			DELAY(10 << i);
2146 		}
2147 		if (i == IVHD_MAXDELAY) {
2148 			printf("ivhd command timeout: %.8x %.8x %.8x %.8x wv:%llx idx:%x\n",
2149 			    cmd->dw0, cmd->dw1, cmd->dw2, cmd->dw3, wv, rc);
2150 		}
2151 	}
2152 	return rc;
2153 
2154 }
2155 
2156 /* AMD: Flush changes to Device Table Entry for a specific domain */
2157 int
2158 ivhd_flush_devtab(struct iommu_softc *iommu, int did)
2159 {
2160 	struct ivhd_command cmd = {
2161 	    .dw0 = did,
2162 	    .dw1 = INVALIDATE_DEVTAB_ENTRY << CMD_SHIFT
2163 	};
2164 
2165 	return ivhd_issue_command(iommu, &cmd, 1);
2166 }
2167 
2168 /* AMD: Invalidate all IOMMU device and page tables */
2169 int
2170 ivhd_invalidate_iommu_all(struct iommu_softc *iommu)
2171 {
2172 	struct ivhd_command cmd = {
2173 	    .dw1 = INVALIDATE_IOMMU_ALL << CMD_SHIFT
2174 	};
2175 
2176 	return ivhd_issue_command(iommu, &cmd, 0);
2177 }
2178 
2179 /* AMD: Invalidate interrupt remapping */
2180 int
2181 ivhd_invalidate_interrupt_table(struct iommu_softc *iommu, int did)
2182 {
2183 	struct ivhd_command cmd = {
2184 	    .dw0 = did,
2185 	    .dw1 = INVALIDATE_INTERRUPT_TABLE << CMD_SHIFT
2186 	};
2187 
2188 	return ivhd_issue_command(iommu, &cmd, 0);
2189 }
2190 
2191 /* AMD: Invalidate all page tables in a domain */
2192 int
2193 ivhd_invalidate_domain(struct iommu_softc *iommu, int did)
2194 {
2195 	struct ivhd_command cmd = { .dw1 = did | (INVALIDATE_IOMMU_PAGES << CMD_SHIFT) };
2196 
2197 	cmd.dw2 = 0xFFFFF000 | 0x3;
2198 	cmd.dw3 = 0x7FFFFFFF;
2199 	return ivhd_issue_command(iommu, &cmd, 1);
2200 }
2201 
2202 /* AMD: Display Registers */
2203 void
2204 ivhd_showreg(struct iommu_softc *iommu)
2205 {
2206 	printf("---- dt:%.16llx cmd:%.16llx evt:%.16llx ctl:%.16llx sts:%.16llx\n",
2207 	    iommu_read_8(iommu, DEV_TAB_BASE_REG),
2208 	    iommu_read_8(iommu, CMD_BASE_REG),
2209 	    iommu_read_8(iommu, EVT_BASE_REG),
2210 	    iommu_read_8(iommu, IOMMUCTL_REG),
2211 	    iommu_read_8(iommu, IOMMUSTS_REG));
2212 	printf("---- cmd queue:%.16llx %.16llx evt queue:%.16llx %.16llx\n",
2213 	    iommu_read_8(iommu, CMD_HEAD_REG),
2214 	    iommu_read_8(iommu, CMD_TAIL_REG),
2215 	    iommu_read_8(iommu, EVT_HEAD_REG),
2216 	    iommu_read_8(iommu, EVT_TAIL_REG));
2217 }
2218 
2219 /* AMD: Generate Errors to test event handler */
2220 void
2221 ivhd_checkerr(struct iommu_softc *iommu)
2222 {
2223 	struct ivhd_command cmd = { -1, -1, -1, -1 };
2224 
2225 	/* Generate ILLEGAL DEV TAB entry? */
2226 	iommu->dte[0x2303].dw0 = -1;		/* invalid */
2227 	iommu->dte[0x2303].dw2 = 0x1234;	/* domain */
2228 	iommu->dte[0x2303].dw7 = -1;		/* reserved */
2229 	ivhd_flush_devtab(iommu, 0x1234);
2230 	ivhd_poll_events(iommu);
2231 
2232 	/* Generate ILLEGAL_COMMAND_ERROR : ok */
2233 	ivhd_issue_command(iommu, &cmd, 0);
2234 	ivhd_poll_events(iommu);
2235 
2236 	/* Generate page hardware error */
2237 }
2238 
2239 /* AMD: Show Device Table Entry */
2240 void
2241 ivhd_showdte(struct iommu_softc *iommu)
2242 {
2243 	int i;
2244 
2245 	for (i = 0; i < 65536; i++) {
2246 		if (iommu->dte[i].dw0) {
2247 			printf("%.2x:%.2x.%x: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
2248 			    i >> 8, (i >> 3) & 0x1F, i & 0x7,
2249 			    iommu->dte[i].dw0, iommu->dte[i].dw1,
2250 			    iommu->dte[i].dw2, iommu->dte[i].dw3,
2251 			    iommu->dte[i].dw4, iommu->dte[i].dw5,
2252 			    iommu->dte[i].dw6, iommu->dte[i].dw7);
2253 		}
2254 	}
2255 }
2256 
2257 /* AMD: Show command entries */
2258 void
2259 ivhd_showcmd(struct iommu_softc *iommu)
2260 {
2261 	struct ivhd_command *ihd;
2262 	paddr_t phd;
2263 	int i;
2264 
2265 	ihd = iommu->cmd_tbl;
2266 	phd = iommu_read_8(iommu, CMD_BASE_REG) & CMD_BASE_MASK;
2267 	for (i = 0; i < 4096 / 128; i++) {
2268 		printf("%.2x: %.16llx %.8x %.8x %.8x %.8x\n", i,
2269 		    (uint64_t)phd + i * sizeof(*ihd),
2270 		    ihd[i].dw0,ihd[i].dw1,ihd[i].dw2,ihd[i].dw3);
2271 	}
2272 }
2273 
2274 #define _c(x) (int)((iommu->ecap >> x ##_SHIFT) & x ## _MASK)
2275 
2276 /* AMD: Initialize IOMMU */
2277 int
2278 ivhd_iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu,
2279 	struct acpi_ivhd *ivhd)
2280 {
2281 	static int niommu;
2282 	paddr_t paddr;
2283 	uint64_t ov;
2284 
2285 	if (sc == NULL || iommu == NULL || ivhd == NULL) {
2286 		printf("Bad pointer to iommu_init!\n");
2287 		return -1;
2288 	}
2289 	if (_bus_space_map(sc->sc_memt, ivhd->address, 0x80000, 0, &iommu->ioh) != 0) {
2290 		printf("Bus Space Map fails\n");
2291 		return -1;
2292 	}
2293 	TAILQ_INIT(&iommu->domains);
2294 	TAILQ_INIT(&iommu->devices);
2295 
2296 	/* Setup address width and number of domains */
2297 	iommu->id = ++niommu;
2298 	iommu->iot = sc->sc_memt;
2299 	iommu->mgaw = 48;
2300 	iommu->agaw = 48;
2301 	iommu->flags = 1;
2302 	iommu->segment = 0;
2303 	iommu->ndoms = 256;
2304 
2305 	printf(": AMD iommu%d at 0x%.8llx\n", iommu->id, ivhd->address);
2306 
2307 	iommu->ecap = iommu_read_8(iommu, EXTFEAT_REG);
2308 	DPRINTF("iommu%d: ecap:%.16llx ", iommu->id, iommu->ecap);
2309 	DPRINTF("%s%s%s%s%s%s%s%s\n",
2310 	    iommu->ecap & EFR_PREFSUP ? "pref " : "",
2311 	    iommu->ecap & EFR_PPRSUP  ? "ppr " : "",
2312 	    iommu->ecap & EFR_NXSUP   ? "nx " : "",
2313 	    iommu->ecap & EFR_GTSUP   ? "gt " : "",
2314 	    iommu->ecap & EFR_IASUP   ? "ia " : "",
2315 	    iommu->ecap & EFR_GASUP   ? "ga " : "",
2316 	    iommu->ecap & EFR_HESUP   ? "he " : "",
2317 	    iommu->ecap & EFR_PCSUP   ? "pc " : "");
2318 	DPRINTF(0,"hats:%x gats:%x glxsup:%x smif:%x smifrc:%x gam:%x\n",
2319 	    _c(EFR_HATS), _c(EFR_GATS), _c(EFR_GLXSUP), _c(EFR_SMIFSUP),
2320 	    _c(EFR_SMIFRC), _c(EFR_GAMSUP));
2321 
2322 	/* Turn off iommu */
2323 	ov = iommu_read_8(iommu, IOMMUCTL_REG);
2324 	iommu_write_8(iommu, IOMMUCTL_REG, ov & ~(CTL_IOMMUEN | CTL_COHERENT |
2325 		CTL_HTTUNEN | CTL_RESPASSPW | CTL_PASSPW | CTL_ISOC));
2326 
2327 	/* Enable intr, mark IOMMU device as invalid for remap */
2328 	sid_flag[ivhd->devid] |= SID_INVALID;
2329 	ivhd_intr_map(iommu, ivhd->devid);
2330 
2331 	/* Setup command buffer with 4k buffer (128 entries) */
2332 	iommu->cmd_tbl = iommu_alloc_page(iommu, &paddr);
2333 	iommu_write_8(iommu, CMD_BASE_REG, (paddr & CMD_BASE_MASK) | CMD_TBL_LEN_4K);
2334 	iommu_write_4(iommu, CMD_HEAD_REG, 0x00);
2335 	iommu_write_4(iommu, CMD_TAIL_REG, 0x00);
2336 	iommu->cmd_tblp = paddr;
2337 
2338 	/* Setup event log with 4k buffer (128 entries) */
2339 	iommu->evt_tbl = iommu_alloc_page(iommu, &paddr);
2340 	iommu_write_8(iommu, EVT_BASE_REG, (paddr & EVT_BASE_MASK) | EVT_TBL_LEN_4K);
2341 	iommu_write_4(iommu, EVT_HEAD_REG, 0x00);
2342 	iommu_write_4(iommu, EVT_TAIL_REG, 0x00);
2343 	iommu->evt_tblp = paddr;
2344 
2345 	/* Setup device table
2346 	 * 1 entry per source ID (bus:device:function - 64k entries)
2347 	 */
2348 	iommu->dte = sc->sc_hwdte;
2349 	iommu_write_8(iommu, DEV_TAB_BASE_REG, (sc->sc_hwdtep & DEV_TAB_MASK) | DEV_TAB_LEN);
2350 
2351 	/* Enable IOMMU */
2352 	ov |= (CTL_IOMMUEN | CTL_EVENTLOGEN | CTL_CMDBUFEN | CTL_EVENTINTEN);
2353 	if (ivhd->flags & IVHD_COHERENT)
2354 		ov |= CTL_COHERENT;
2355 	if (ivhd->flags & IVHD_HTTUNEN)
2356 		ov |= CTL_HTTUNEN;
2357 	if (ivhd->flags & IVHD_RESPASSPW)
2358 		ov |= CTL_RESPASSPW;
2359 	if (ivhd->flags & IVHD_PASSPW)
2360 		ov |= CTL_PASSPW;
2361 	if (ivhd->flags & IVHD_ISOC)
2362 		ov |= CTL_ISOC;
2363 	ov &= ~(CTL_INVTIMEOUT_MASK << CTL_INVTIMEOUT_SHIFT);
2364 	ov |=  (CTL_INVTIMEOUT_10MS << CTL_INVTIMEOUT_SHIFT);
2365 	iommu_write_8(iommu, IOMMUCTL_REG, ov);
2366 
2367 	ivhd_invalidate_iommu_all(iommu);
2368 
2369 	TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link);
2370 	return 0;
2371 }
2372 
2373 void
2374 acpiivrs_ivhd(struct acpidmar_softc *sc, struct acpi_ivhd *ivhd)
2375 {
2376 	struct iommu_softc *iommu;
2377 	struct acpi_ivhd_ext *ext;
2378 	union acpi_ivhd_entry *ie;
2379 	int start, off, dte, all_dte = 0;
2380 
2381 	if (ivhd->type == IVRS_IVHD_EXT) {
2382 		ext = (struct acpi_ivhd_ext *)ivhd;
2383 		DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x %.16llx\n",
2384 		    ext->type, ext->flags, ext->length,
2385 		    ext->segment, dmar_bdf(ext->devid), ext->cap,
2386 		    ext->address, ext->info,
2387 		    ext->attrib, ext->efr);
2388 		if (ext->flags & IVHD_PPRSUP)
2389 			DPRINTF(0," PPRSup");
2390 		if (ext->flags & IVHD_PREFSUP)
2391 			DPRINTF(0," PreFSup");
2392 		if (ext->flags & IVHD_COHERENT)
2393 			DPRINTF(0," Coherent");
2394 		if (ext->flags & IVHD_IOTLB)
2395 			DPRINTF(0," Iotlb");
2396 		if (ext->flags & IVHD_ISOC)
2397 			DPRINTF(0," ISoc");
2398 		if (ext->flags & IVHD_RESPASSPW)
2399 			DPRINTF(0," ResPassPW");
2400 		if (ext->flags & IVHD_PASSPW)
2401 			DPRINTF(0," PassPW");
2402 		if (ext->flags & IVHD_HTTUNEN)
2403 			DPRINTF(0, " HtTunEn");
2404 		if (ext->flags)
2405 			DPRINTF(0,"\n");
2406 		off = sizeof(*ext);
2407 		iommu = malloc(sizeof(*iommu), M_DEVBUF, M_ZERO|M_WAITOK);
2408 		ivhd_iommu_init(sc, iommu, ivhd);
2409 	} else {
2410 		DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x\n",
2411 		    ivhd->type, ivhd->flags, ivhd->length,
2412 		    ivhd->segment, dmar_bdf(ivhd->devid), ivhd->cap,
2413 		    ivhd->address, ivhd->info,
2414 		    ivhd->feature);
2415 		if (ivhd->flags & IVHD_PPRSUP)
2416 			DPRINTF(0," PPRSup");
2417 		if (ivhd->flags & IVHD_PREFSUP)
2418 			DPRINTF(0," PreFSup");
2419 		if (ivhd->flags & IVHD_COHERENT)
2420 			DPRINTF(0," Coherent");
2421 		if (ivhd->flags & IVHD_IOTLB)
2422 			DPRINTF(0," Iotlb");
2423 		if (ivhd->flags & IVHD_ISOC)
2424 			DPRINTF(0," ISoc");
2425 		if (ivhd->flags & IVHD_RESPASSPW)
2426 			DPRINTF(0," ResPassPW");
2427 		if (ivhd->flags & IVHD_PASSPW)
2428 			DPRINTF(0," PassPW");
2429 		if (ivhd->flags & IVHD_HTTUNEN)
2430 			DPRINTF(0, " HtTunEn");
2431 		if (ivhd->flags)
2432 			DPRINTF(0,"\n");
2433 		off = sizeof(*ivhd);
2434 	}
2435 	while (off < ivhd->length) {
2436 		ie = (void *)ivhd + off;
2437 		switch (ie->type) {
2438 		case IVHD_ALL:
2439 			all_dte = ie->all.data;
2440 			DPRINTF(0," ALL %.4x\n", dte);
2441 			off += sizeof(ie->all);
2442 			break;
2443 		case IVHD_SEL:
2444 			dte = ie->sel.data;
2445 			DPRINTF(0," SELECT: %s %.4x\n", dmar_bdf(ie->sel.devid), dte);
2446 			off += sizeof(ie->sel);
2447 			break;
2448 		case IVHD_SOR:
2449 			dte = ie->sor.data;
2450 			start = ie->sor.devid;
2451 			DPRINTF(0," SOR: %s %.4x\n", dmar_bdf(start), dte);
2452 			off += sizeof(ie->sor);
2453 			break;
2454 		case IVHD_EOR:
2455 			DPRINTF(0," EOR: %s\n", dmar_bdf(ie->eor.devid));
2456 			off += sizeof(ie->eor);
2457 			break;
2458 		case IVHD_ALIAS_SEL:
2459 			dte = ie->alias.data;
2460 			DPRINTF(0," ALIAS: src=%s: ", dmar_bdf(ie->alias.srcid));
2461 			DPRINTF(0," %s %.4x\n", dmar_bdf(ie->alias.devid), dte);
2462 			off += sizeof(ie->alias);
2463 			break;
2464 		case IVHD_ALIAS_SOR:
2465 			dte = ie->alias.data;
2466 			DPRINTF(0," ALIAS_SOR: %s %.4x ", dmar_bdf(ie->alias.devid), dte);
2467 			DPRINTF(0," src=%s\n", dmar_bdf(ie->alias.srcid));
2468 			off += sizeof(ie->alias);
2469 			break;
2470 		case IVHD_EXT_SEL:
2471 			dte = ie->ext.data;
2472 			DPRINTF(0," EXT SEL: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid),
2473 			    dte, ie->ext.extdata);
2474 			off += sizeof(ie->ext);
2475 			break;
2476 		case IVHD_EXT_SOR:
2477 			dte = ie->ext.data;
2478 			DPRINTF(0," EXT SOR: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid),
2479 			    dte, ie->ext.extdata);
2480 			off += sizeof(ie->ext);
2481 			break;
2482 		case IVHD_SPECIAL:
2483 			DPRINTF(0," SPECIAL\n");
2484 			off += sizeof(ie->special);
2485 			break;
2486 		default:
2487 			DPRINTF(0," 2:unknown %x\n", ie->type);
2488 			off = ivhd->length;
2489 			break;
2490 		}
2491 	}
2492 }
2493 
2494 void
2495 acpiivrs_init(struct acpidmar_softc *sc, struct acpi_ivrs *ivrs)
2496 {
2497 	union acpi_ivrs_entry *ie;
2498 	int off;
2499 
2500 	if (!sc->sc_hwdte) {
2501 		sc->sc_hwdte = iommu_alloc_hwdte(sc, HWDTE_SIZE, &sc->sc_hwdtep);
2502 		if (sc->sc_hwdte == NULL)
2503 			panic("Can't allocate HWDTE!\n");
2504 	}
2505 
2506 	domain_map_page = domain_map_page_amd;
2507 	DPRINTF(0,"IVRS Version: %d\n", ivrs->hdr.revision);
2508 	DPRINTF(0," VA Size: %d\n",
2509 	    (ivrs->ivinfo >> IVRS_VASIZE_SHIFT) & IVRS_VASIZE_MASK);
2510 	DPRINTF(0," PA Size: %d\n",
2511 	    (ivrs->ivinfo >> IVRS_PASIZE_SHIFT) & IVRS_PASIZE_MASK);
2512 
2513 	TAILQ_INIT(&sc->sc_drhds);
2514 	TAILQ_INIT(&sc->sc_rmrrs);
2515 	TAILQ_INIT(&sc->sc_atsrs);
2516 
2517 	DPRINTF(0,"======== IVRS\n");
2518 	off = sizeof(*ivrs);
2519 	while (off < ivrs->hdr.length) {
2520 		ie = (void *)ivrs + off;
2521 		switch (ie->type) {
2522 		case IVRS_IVHD:
2523 		case IVRS_IVHD_EXT:
2524 			acpiivrs_ivhd(sc, &ie->ivhd);
2525 			break;
2526 		case IVRS_IVMD_ALL:
2527 		case IVRS_IVMD_SPECIFIED:
2528 		case IVRS_IVMD_RANGE:
2529 			DPRINTF(0,"ivmd\n");
2530 			break;
2531 		default:
2532 			DPRINTF(0,"1:unknown: %x\n", ie->type);
2533 			break;
2534 		}
2535 		off += ie->length;
2536 	}
2537 	DPRINTF(0,"======== End IVRS\n");
2538 }
2539 
2540 static int
2541 acpiivhd_activate(struct iommu_softc *iommu, int act)
2542 {
2543 	switch (act) {
2544 	case DVACT_SUSPEND:
2545 		iommu->flags |= IOMMU_FLAGS_SUSPEND;
2546 		break;
2547 	case DVACT_RESUME:
2548 		iommu->flags &= ~IOMMU_FLAGS_SUSPEND;
2549 		break;
2550 	}
2551 	return (0);
2552 }
2553 
2554 int
2555 acpidmar_activate(struct device *self, int act)
2556 {
2557 	struct acpidmar_softc *sc = (struct acpidmar_softc *)self;
2558 	struct iommu_softc *iommu;
2559 
2560 	printf("called acpidmar_activate %d %p\n", act, sc);
2561 
2562 	if (sc == NULL) {
2563 		return (0);
2564 	}
2565 
2566 	switch (act) {
2567 	case DVACT_RESUME:
2568 		TAILQ_FOREACH(iommu, &sc->sc_drhds, link) {
2569 			printf("iommu%d resume\n", iommu->id);
2570 			if (iommu->dte) {
2571 				acpiivhd_activate(iommu, act);
2572 				continue;
2573 			}
2574 			iommu_flush_write_buffer(iommu);
2575 			iommu_set_rtaddr(iommu, iommu->rtaddr);
2576 			iommu_write_4(iommu, DMAR_FEDATA_REG, iommu->fedata);
2577 			iommu_write_4(iommu, DMAR_FEADDR_REG, iommu->feaddr);
2578 			iommu_write_4(iommu, DMAR_FEUADDR_REG,
2579 			    iommu->feaddr >> 32);
2580 			if ((iommu->flags & (IOMMU_FLAGS_BAD|IOMMU_FLAGS_SUSPEND)) ==
2581 			    IOMMU_FLAGS_SUSPEND) {
2582 				printf("enable wakeup translation\n");
2583 				iommu_enable_translation(iommu, 1);
2584 			}
2585 			iommu_showcfg(iommu, -1);
2586 		}
2587 		break;
2588 	case DVACT_SUSPEND:
2589 		TAILQ_FOREACH(iommu, &sc->sc_drhds, link) {
2590 			printf("iommu%d suspend\n", iommu->id);
2591 			if (iommu->flags & IOMMU_FLAGS_BAD)
2592 				continue;
2593 			if (iommu->dte) {
2594 				acpiivhd_activate(iommu, act);
2595 				continue;
2596 			}
2597 			iommu->flags |= IOMMU_FLAGS_SUSPEND;
2598 			iommu_enable_translation(iommu, 0);
2599 			iommu_showcfg(iommu, -1);
2600 		}
2601 		break;
2602 	}
2603 	return (0);
2604 }
2605 
2606 int
2607 acpidmar_match(struct device *parent, void *match, void *aux)
2608 {
2609 	struct acpi_attach_args		*aaa = aux;
2610 	struct acpi_table_header	*hdr;
2611 
2612 	/* If we do not have a table, it is not us */
2613 	if (aaa->aaa_table == NULL)
2614 		return (0);
2615 
2616 	/* If it is an DMAR table, we can attach */
2617 	hdr = (struct acpi_table_header *)aaa->aaa_table;
2618 	if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1) == 0)
2619 		return (1);
2620 	if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1) == 0)
2621 		return (1);
2622 
2623 	return (0);
2624 }
2625 
2626 void
2627 acpidmar_attach(struct device *parent, struct device *self, void *aux)
2628 {
2629 	struct acpidmar_softc *sc = (void *)self;
2630 	struct acpi_attach_args	*aaa = aux;
2631 	struct acpi_dmar *dmar = (struct acpi_dmar *)aaa->aaa_table;
2632 	struct acpi_ivrs *ivrs = (struct acpi_ivrs *)aaa->aaa_table;
2633 	struct acpi_table_header *hdr;
2634 
2635 	hdr = (struct acpi_table_header *)aaa->aaa_table;
2636 	sc->sc_memt = aaa->aaa_memt;
2637 	sc->sc_dmat = aaa->aaa_dmat;
2638 	if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1) == 0) {
2639 		acpidmar_sc = sc;
2640 		acpidmar_init(sc, dmar);
2641 	}
2642 	if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1) == 0) {
2643 		acpidmar_sc = sc;
2644 		acpiivrs_init(sc, ivrs);
2645 	}
2646 }
2647 
2648 /* Interrupt shiz */
2649 void acpidmar_msi_hwmask(struct pic *, int);
2650 void acpidmar_msi_hwunmask(struct pic *, int);
2651 void acpidmar_msi_addroute(struct pic *, struct cpu_info *, int, int, int);
2652 void acpidmar_msi_delroute(struct pic *, struct cpu_info *, int, int, int);
2653 
2654 void
2655 acpidmar_msi_hwmask(struct pic *pic, int pin)
2656 {
2657 	struct iommu_pic	*ip = (void *)pic;
2658 	struct iommu_softc	*iommu = ip->iommu;
2659 
2660 	printf("msi_hwmask\n");
2661 
2662 	mtx_enter(&iommu->reg_lock);
2663 
2664 	iommu_write_4(iommu, DMAR_FECTL_REG, FECTL_IM);
2665 	iommu_read_4(iommu, DMAR_FECTL_REG);
2666 
2667 	mtx_leave(&iommu->reg_lock);
2668 }
2669 
2670 void
2671 acpidmar_msi_hwunmask(struct pic *pic, int pin)
2672 {
2673 	struct iommu_pic	*ip = (void *)pic;
2674 	struct iommu_softc	*iommu = ip->iommu;
2675 
2676 	printf("msi_hwunmask\n");
2677 
2678 	mtx_enter(&iommu->reg_lock);
2679 
2680 	iommu_write_4(iommu, DMAR_FECTL_REG, 0);
2681 	iommu_read_4(iommu, DMAR_FECTL_REG);
2682 
2683 	mtx_leave(&iommu->reg_lock);
2684 }
2685 
2686 void
2687 acpidmar_msi_addroute(struct pic *pic, struct cpu_info *ci, int pin, int vec,
2688     int type)
2689 {
2690 	struct iommu_pic	*ip = (void *)pic;
2691 	struct iommu_softc	*iommu = ip->iommu;
2692 
2693 	mtx_enter(&iommu->reg_lock);
2694 
2695 	iommu->fedata = vec;
2696 	iommu->feaddr = 0xfee00000L | (ci->ci_apicid << 12);
2697 	iommu_write_4(iommu, DMAR_FEDATA_REG, vec);
2698 	iommu_write_4(iommu, DMAR_FEADDR_REG, iommu->feaddr);
2699 	iommu_write_4(iommu, DMAR_FEUADDR_REG, iommu->feaddr >> 32);
2700 
2701 	mtx_leave(&iommu->reg_lock);
2702 }
2703 
2704 void
2705 acpidmar_msi_delroute(struct pic *pic, struct cpu_info *ci, int pin, int vec,
2706     int type)
2707 {
2708 	printf("msi_delroute\n");
2709 }
2710 
2711 void *
2712 acpidmar_intr_establish(void *ctx, int level, int (*func)(void *),
2713     void *arg, const char *what)
2714 {
2715 	struct iommu_softc	*iommu = ctx;
2716 	struct pic		*pic;
2717 
2718 	pic = &iommu->pic.pic;
2719 	iommu->pic.iommu = iommu;
2720 
2721 	strlcpy(pic->pic_dev.dv_xname, "dmarpic",
2722 		sizeof(pic->pic_dev.dv_xname));
2723 	pic->pic_type = PIC_MSI;
2724 	pic->pic_hwmask = acpidmar_msi_hwmask;
2725 	pic->pic_hwunmask = acpidmar_msi_hwunmask;
2726 	pic->pic_addroute = acpidmar_msi_addroute;
2727 	pic->pic_delroute = acpidmar_msi_delroute;
2728 	pic->pic_edge_stubs = ioapic_edge_stubs;
2729 #ifdef MULTIPROCESSOR
2730 	mtx_init(&pic->pic_mutex, level);
2731 #endif
2732 
2733 	return intr_establish(-1, pic, 0, IST_PULSE, level, NULL, func, arg, what);
2734 }
2735 
2736 /* Intel: Handle DMAR Interrupt */
2737 int
2738 acpidmar_intr(void *ctx)
2739 {
2740 	struct iommu_softc		*iommu = ctx;
2741 	struct fault_entry		fe;
2742 	static struct fault_entry	ofe;
2743 	int				fro, nfr, fri, i;
2744 	uint32_t			sts;
2745 
2746 	/*splassert(IPL_HIGH);*/
2747 
2748 	if (!(iommu->gcmd & GCMD_TE)) {
2749 		return (1);
2750 	}
2751 	mtx_enter(&iommu->reg_lock);
2752 	sts = iommu_read_4(iommu, DMAR_FECTL_REG);
2753 	sts = iommu_read_4(iommu, DMAR_FSTS_REG);
2754 
2755 	if (!(sts & FSTS_PPF)) {
2756 		mtx_leave(&iommu->reg_lock);
2757 		return (1);
2758 	}
2759 
2760 	nfr = cap_nfr(iommu->cap);
2761 	fro = cap_fro(iommu->cap);
2762 	fri = (sts >> FSTS_FRI_SHIFT) & FSTS_FRI_MASK;
2763 	for (i = 0; i < nfr; i++) {
2764 		fe.hi = iommu_read_8(iommu, fro + (fri*16) + 8);
2765 		if (!(fe.hi & FRCD_HI_F))
2766 			break;
2767 
2768 		fe.lo = iommu_read_8(iommu, fro + (fri*16));
2769 		if (ofe.hi != fe.hi || ofe.lo != fe.lo) {
2770 			iommu_showfault(iommu, fri, &fe);
2771 			ofe.hi = fe.hi;
2772 			ofe.lo = fe.lo;
2773 		}
2774 		fri = (fri + 1) % nfr;
2775 	}
2776 
2777 	iommu_write_4(iommu, DMAR_FSTS_REG, FSTS_PFO | FSTS_PPF);
2778 
2779 	mtx_leave(&iommu->reg_lock);
2780 
2781 	return (1);
2782 }
2783 
2784 const char *vtd_faults[] = {
2785 	"Software",
2786 	"Root Entry Not Present",	/* ok (rtaddr + 4096) */
2787 	"Context Entry Not Present",	/* ok (no CTX_P) */
2788 	"Context Entry Invalid",	/* ok (tt = 3) */
2789 	"Address Beyond MGAW",
2790 	"Write",			/* ok */
2791 	"Read",				/* ok */
2792 	"Paging Entry Invalid",		/* ok */
2793 	"Root Table Invalid",
2794 	"Context Table Invalid",
2795 	"Root Entry Reserved",		/* ok (root.lo |= 0x4) */
2796 	"Context Entry Reserved",
2797 	"Paging Entry Reserved",
2798 	"Context Entry TT",
2799 	"Reserved",
2800 };
2801 
2802 void iommu_showpte(uint64_t, int, uint64_t);
2803 
2804 /* Intel: Show IOMMU page table entry */
2805 void
2806 iommu_showpte(uint64_t ptep, int lvl, uint64_t base)
2807 {
2808 	uint64_t nb, pb, i;
2809 	struct pte_entry *pte;
2810 
2811 	pte = (void *)PMAP_DIRECT_MAP(ptep);
2812 	for (i = 0; i < 512; i++) {
2813 		if (!(pte[i].val & PTE_P))
2814 			continue;
2815 		nb = base + (i << lvl);
2816 		pb = pte[i].val & ~VTD_PAGE_MASK;
2817 		if(lvl == VTD_LEVEL0) {
2818 			printf("   %3llx %.16llx = %.16llx %c%c %s\n",
2819 			    i, nb, pb,
2820 			    pte[i].val == PTE_R ? 'r' : ' ',
2821 			    pte[i].val & PTE_W ? 'w' : ' ',
2822 			    (nb == pb) ? " ident" : "");
2823 			if (nb == pb)
2824 				return;
2825 		} else {
2826 			iommu_showpte(pb, lvl - VTD_STRIDE_SIZE, nb);
2827 		}
2828 	}
2829 }
2830 
2831 /* Intel: Show IOMMU configuration */
2832 void
2833 iommu_showcfg(struct iommu_softc *iommu, int sid)
2834 {
2835 	int i, j, sts, cmd;
2836 	struct context_entry *ctx;
2837 	pcitag_t tag;
2838 	pcireg_t clc;
2839 
2840 	cmd = iommu_read_4(iommu, DMAR_GCMD_REG);
2841 	sts = iommu_read_4(iommu, DMAR_GSTS_REG);
2842 	printf("iommu%d: flags:%d root pa:%.16llx %s %s %s %.8x %.8x\n",
2843 	    iommu->id, iommu->flags, iommu_read_8(iommu, DMAR_RTADDR_REG),
2844 	    sts & GSTS_TES ? "enabled" : "disabled",
2845 	    sts & GSTS_QIES ? "qi" : "ccmd",
2846 	    sts & GSTS_IRES ? "ir" : "",
2847 	    cmd, sts);
2848 	for (i = 0; i < 256; i++) {
2849 		if (!root_entry_is_valid(&iommu->root[i])) {
2850 			continue;
2851 		}
2852 		for (j = 0; j < 256; j++) {
2853 			ctx = iommu->ctx[i] + j;
2854 			if (!context_entry_is_valid(ctx)) {
2855 				continue;
2856 			}
2857 			tag = pci_make_tag(NULL, i, (j >> 3), j & 0x7);
2858 			clc = pci_conf_read(NULL, tag, 0x08) >> 8;
2859 			printf("  %.2x:%.2x.%x lvl:%d did:%.4x tt:%d ptep:%.16llx flag:%x cc:%.6x\n",
2860 			    i, (j >> 3), j & 7,
2861 			    context_address_width(ctx),
2862 			    context_domain_id(ctx),
2863 			    context_translation_type(ctx),
2864 			    context_pte(ctx),
2865 			    context_user(ctx),
2866 			    clc);
2867 #if 0
2868 			/* dump pagetables */
2869 			iommu_showpte(ctx->lo & ~VTD_PAGE_MASK, iommu->agaw -
2870 			    VTD_STRIDE_SIZE, 0);
2871 #endif
2872 		}
2873 	}
2874 }
2875 
2876 /* Intel: Show IOMMU fault */
2877 void
2878 iommu_showfault(struct iommu_softc *iommu, int fri, struct fault_entry *fe)
2879 {
2880 	int bus, dev, fun, type, fr, df;
2881 	bios_memmap_t	*im;
2882 	const char *mapped;
2883 
2884 	if (!(fe->hi & FRCD_HI_F))
2885 		return;
2886 	type = (fe->hi & FRCD_HI_T) ? 'r' : 'w';
2887 	fr = (fe->hi >> FRCD_HI_FR_SHIFT) & FRCD_HI_FR_MASK;
2888 	bus = (fe->hi >> FRCD_HI_BUS_SHIFT) & FRCD_HI_BUS_MASK;
2889 	dev = (fe->hi >> FRCD_HI_DEV_SHIFT) & FRCD_HI_DEV_MASK;
2890 	fun = (fe->hi >> FRCD_HI_FUN_SHIFT) & FRCD_HI_FUN_MASK;
2891 	df  = (fe->hi >> FRCD_HI_FUN_SHIFT) & 0xFF;
2892 	iommu_showcfg(iommu, mksid(bus,dev,fun));
2893 	if (!iommu->ctx[bus]) {
2894 		/* Bus is not initialized */
2895 		mapped = "nobus";
2896 	} else if (!context_entry_is_valid(&iommu->ctx[bus][df])) {
2897 		/* DevFn not initialized */
2898 		mapped = "nodevfn";
2899 	} else if (context_user(&iommu->ctx[bus][df]) != 0xA) {
2900 		/* no bus_space_map */
2901 		mapped = "nomap";
2902 	} else {
2903 		/* bus_space_map */
2904 		mapped = "mapped";
2905 	}
2906 	printf("fri%d: dmar: %.2x:%.2x.%x %s error at %llx fr:%d [%s] iommu:%d [%s]\n",
2907 	    fri, bus, dev, fun,
2908 	    type == 'r' ? "read" : "write",
2909 	    fe->lo,
2910 	    fr, fr <= 13 ? vtd_faults[fr] : "unknown",
2911 	    iommu->id,
2912 	    mapped);
2913 	for (im = bios_memmap; im->type != BIOS_MAP_END; im++) {
2914 		if ((im->type == BIOS_MAP_RES) &&
2915 		    (im->addr <= fe->lo) &&
2916 		    (fe->lo <= im->addr+im->size)) {
2917 			printf("mem in e820.reserved\n");
2918 		}
2919 	}
2920 #ifdef DDB
2921 	if (acpidmar_ddb)
2922 		db_enter();
2923 #endif
2924 }
2925 
2926