xref: /netbsd-src/sys/arch/hppa/dev/astro.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: astro.c,v 1.4 2021/08/07 16:18:55 thorpej Exp $	*/
2 
3 /*	$OpenBSD: astro.c,v 1.8 2007/10/06 23:50:54 krw Exp $	*/
4 
5 /*
6  * Copyright (c) 2007 Mark Kettenis
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/device.h>
24 #include <sys/extent.h>
25 #include <sys/malloc.h>
26 #include <sys/reboot.h>
27 #include <sys/tree.h>
28 
29 #include <uvm/uvm.h>
30 
31 #include <machine/iomod.h>
32 #include <machine/autoconf.h>
33 #include <machine/pdc.h>
34 #include <machine/endian.h>
35 
36 #include <hppa/dev/cpudevs.h>
37 #include <hppa/hppa/machdep.h>
38 
39 struct astro_regs {
40 	uint32_t	rid;
41 	uint32_t	pad0000;
42 	uint32_t	ioc_ctrl;
43 	uint32_t	pad0008;
44 	uint8_t		resv1[0x0300 - 0x0010];
45 	uint64_t	lmmio_direct0_base;
46 	uint64_t	lmmio_direct0_mask;
47 	uint64_t	lmmio_direct0_route;
48 	uint64_t	lmmio_direct1_base;
49 	uint64_t	lmmio_direct1_mask;
50 	uint64_t	lmmio_direct1_route;
51 	uint64_t	lmmio_direct2_base;
52 	uint64_t	lmmio_direct2_mask;
53 	uint64_t	lmmio_direct2_route;
54 	uint64_t	lmmio_direct3_base;
55 	uint64_t	lmmio_direct3_mask;
56 	uint64_t	lmmio_direct3_route;
57 	uint64_t	lmmio_dist_base;
58 	uint64_t	lmmio_dist_mask;
59 	uint64_t	lmmio_dist_route;
60 	uint64_t	gmmio_dist_base;
61 	uint64_t	gmmio_dist_mask;
62 	uint64_t	gmmio_dist_route;
63 	uint64_t	ios_dist_base;
64 	uint64_t	ios_dist_mask;
65 	uint64_t	ios_dist_route;
66 	uint8_t		resv2[0x03c0 - 0x03a8];
67 	uint64_t	ios_direct_base;
68 	uint64_t	ios_direct_mask;
69 	uint64_t	ios_direct_route;
70 	uint8_t		resv3[0x22000 - 0x03d8];
71 	uint64_t	func_id;
72 	uint64_t	func_class;
73 	uint8_t		resv4[0x22040 - 0x22010];
74 	uint64_t	rope_config;
75 	uint8_t		resv5[0x22050 - 0x22048];
76 	uint64_t	rope_debug;
77 	uint8_t		resv6[0x22200 - 0x22058];
78 	uint64_t	rope0_control;
79 	uint64_t	rope1_control;
80 	uint64_t	rope2_control;
81 	uint64_t	rope3_control;
82 	uint64_t	rope4_control;
83 	uint64_t	rope5_control;
84 	uint64_t	rope6_control;
85 	uint64_t	rope7_control;
86 	uint8_t		resv7[0x22300 - 0x22240];
87 	uint32_t	tlb_ibase;
88 	uint32_t	pad22300;
89 	uint32_t	tlb_imask;
90 	uint32_t	pad22308;
91 	uint32_t	tlb_pcom;
92 	uint32_t	pad22310;
93 	uint32_t	tlb_tcnfg;
94 	uint32_t	pad22318;
95 	uint64_t	tlb_pdir_base;
96 };
97 
98 #define ASTRO_IOC_CTRL_TE	0x0001	/* TOC Enable */
99 #define ASTRO_IOC_CTRL_CE	0x0002	/* Coalesce Enable */
100 #define ASTRO_IOC_CTRL_DE	0x0004	/* Dillon Enable */
101 #define ASTRO_IOC_CTRL_IE	0x0008	/* IOS Enable */
102 #define ASTRO_IOC_CTRL_OS	0x0010	/* Outbound Synchronous */
103 #define ASTRO_IOC_CTRL_IS	0x0020	/* Inbound Synchronous */
104 #define ASTRO_IOC_CTRL_RC	0x0040	/* Read Current Enable */
105 #define ASTRO_IOC_CTRL_L0	0x0080	/* 0-length Read Enable */
106 #define ASTRO_IOC_CTRL_RM	0x0100	/* Real Mode */
107 #define ASTRO_IOC_CTRL_NC	0x0200	/* Non-coherent Mode */
108 #define ASTRO_IOC_CTRL_ID	0x0400	/* Interrupt Disable */
109 #define ASTRO_IOC_CTRL_D4	0x0800	/* Disable 4-byte Coalescing */
110 #define ASTRO_IOC_CTRL_CC	0x1000	/* Increase Coalescing counter value */
111 #define ASTRO_IOC_CTRL_DD	0x2000	/* Disable distr. range coalescing */
112 #define ASTRO_IOC_CTRL_DC	0x4000	/* Disable the coalescing counter */
113 
114 #define IOTTE_V		0x8000000000000000LL	/* Entry valid */
115 #define IOTTE_PAMASK	0x000000fffffff000LL
116 #define IOTTE_CI	0x00000000000000ffLL	/* Coherent index */
117 
118 struct astro_softc {
119 	device_t sc_dv;
120 
121 	bus_dma_tag_t sc_dmat;
122 	struct astro_regs volatile *sc_regs;
123 	uint64_t *sc_pdir;
124 
125 	char sc_dvmamapname[20];
126 	struct extent *sc_dvmamap;
127 	struct hppa_bus_dma_tag sc_dmatag;
128 };
129 
130 /*
131  * per-map DVMA page table
132  */
133 struct iommu_page_entry {
134 	SPLAY_ENTRY(iommu_page_entry) ipe_node;
135 	paddr_t	ipe_pa;
136 	vaddr_t	ipe_va;
137 	bus_addr_t ipe_dva;
138 };
139 
140 struct iommu_page_map {
141 	SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
142 	int ipm_maxpage;	/* Size of allocated page map */
143 	int ipm_pagecnt;	/* Number of entries in use */
144 	struct iommu_page_entry	ipm_map[1];
145 };
146 
147 /*
148  * per-map IOMMU state
149  */
150 struct iommu_map_state {
151 	struct astro_softc *ims_sc;
152 	bus_addr_t ims_dvmastart;
153 	bus_size_t ims_dvmasize;
154 	struct iommu_page_map ims_map;	/* map must be last (array at end) */
155 };
156 
157 int	astro_match(device_t, cfdata_t, void *);
158 void	astro_attach(device_t, device_t, void *);
159 static device_t astro_callback(device_t self, struct confargs *ca);
160 
161 CFATTACH_DECL_NEW(astro, sizeof(struct astro_softc),
162     astro_match, astro_attach, NULL, NULL);
163 
164 extern struct cfdriver astro_cd;
165 
166 int	iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
167 	    int, bus_dmamap_t *);
168 void	iommu_dvmamap_destroy(void *, bus_dmamap_t);
169 int	iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
170 	    struct proc *, int);
171 int	iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
172 int	iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
173 int	iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
174 	    int, bus_size_t, int);
175 void	iommu_dvmamap_unload(void *, bus_dmamap_t);
176 void	iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
177 int	iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
178 	    bus_dma_segment_t *, int, int *, int);
179 void	iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
180 int	iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
181 	    void **, int);
182 void	iommu_dvmamem_unmap(void *, void *, size_t);
183 paddr_t	iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
184 
185 void	iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
186 void	iommu_remove(struct astro_softc *, bus_addr_t);
187 
188 struct iommu_map_state *iommu_iomap_create(int);
189 void	iommu_iomap_destroy(struct iommu_map_state *);
190 int	iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
191 bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
192 void	iommu_iomap_clear_pages(struct iommu_map_state *);
193 
194 static int iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int);
195 
196 const struct hppa_bus_dma_tag astro_dmat = {
197 	NULL,
198 	iommu_dvmamap_create, iommu_dvmamap_destroy,
199 	iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
200 	iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
201 	iommu_dvmamap_unload, iommu_dvmamap_sync,
202 
203 	iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
204 	iommu_dvmamem_unmap, iommu_dvmamem_mmap
205 };
206 
207 int
208 astro_match(device_t parent, cfdata_t cf, void *aux)
209 {
210 	struct confargs *ca = aux;
211 
212 	/* Astro is a U-Turn variant. */
213 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
214 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
215 		return 0;
216 
217 	if (ca->ca_type.iodc_model == 0x58 &&
218 	    ca->ca_type.iodc_revision >= 0x20)
219 		return 1;
220 
221 	return 0;
222 }
223 
224 void
225 astro_attach(device_t parent, device_t self, void *aux)
226 {
227 	struct confargs *ca = aux, nca;
228 	struct astro_softc *sc = device_private(self);
229 	volatile struct astro_regs *r;
230 	bus_space_handle_t ioh;
231 	uint32_t rid, ioc_ctrl;
232 	psize_t size;
233 	vaddr_t va;
234 	paddr_t pa;
235 	void *p;
236 	struct vm_page *m;
237 	struct pglist mlist;
238 	int iova_bits;
239 	int pagezero_cookie;
240 
241 	sc->sc_dv = self;
242 	sc->sc_dmat = ca->ca_dmatag;
243 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
244 	    0, &ioh)) {
245 		aprint_error(": can't map IO space\n");
246 		return;
247 	}
248 	p = bus_space_vaddr(ca->ca_iot, ioh);
249 	sc->sc_regs = r = p;
250 	rid = le32toh(r->rid);
251 	aprint_normal(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
252 
253 	ioc_ctrl = le32toh(r->ioc_ctrl);
254 	ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
255 	ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
256 	ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
257 	r->ioc_ctrl = htole32(ioc_ctrl);
258 
259 	/*
260 	 * Setup the iommu.
261 	 */
262 
263 	/* XXX This gives us 256MB of iova space. */
264 	iova_bits = 28;
265 
266 	r->tlb_ibase = htole32(0);
267 	r->tlb_imask = htole32(0xffffffff << iova_bits);
268 
269 	/* Page size is 4K. */
270 	r->tlb_tcnfg = htole32(0);
271 
272 	/* Flush TLB. */
273 	r->tlb_pcom = htole32(31);
274 
275 	/*
276 	 * Allocate memory for I/O pagetables.  They need to be physically
277 	 * contiguous.
278 	 */
279 
280 	size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(uint64_t);
281 	TAILQ_INIT(&mlist);
282 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist, 1, 0) != 0) {
283 		aprint_error(": can't allocate PDIR\n");
284 		return;
285 	}
286 
287 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
288 
289 	if (va == 0) {
290 		aprint_error(": can't map PDIR\n");
291 		return;
292 	}
293 	sc->sc_pdir = (uint64_t *)va;
294 
295 	m = TAILQ_FIRST(&mlist);
296 	r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
297 
298 	/* Map the pages. */
299 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
300 		pa = VM_PAGE_TO_PHYS(m);
301 		pmap_enter(pmap_kernel(), va, pa,
302 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
303 		va += PAGE_SIZE;
304 	}
305 	pmap_update(pmap_kernel());
306 	memset(sc->sc_pdir, 0, size);
307 
308 	/*
309 	 * The PDC might have set up some devices to do DMA.  It will do
310 	 * this for the onboard USB controller if an USB keyboard is used
311 	 * for console input.  In that case, bad things will happen if we
312 	 * enable iova space.  So reset the PDC devices before we do that.
313 	 * Don't do this if we're using a serial console though, since it
314 	 * will stop working if we do.  This is fine since the serial port
315 	 * doesn't do DMA.
316 	 */
317 	pagezero_cookie = hppa_pagezero_map();
318 	if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
319 		pdcproc_ioreset();
320 	hppa_pagezero_unmap(pagezero_cookie);
321 
322 	/* Enable iova space. */
323 	r->tlb_ibase = htole32(1);
324 
325 	/*
326 	 * Now all the hardware's working we need to allocate a dvma map.
327 	 */
328 	snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
329 	    "%s_dvma", device_xname(sc->sc_dv));
330 	sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
331 	    0, 0, EX_WAITOK);
332 
333 	sc->sc_dmatag = astro_dmat;
334 	sc->sc_dmatag._cookie = sc;
335 
336 	nca = *ca;	/* clone from us */
337 	nca.ca_dmatag = &sc->sc_dmatag;
338 	nca.ca_hpabase = IOMOD_IO_IO_LOW(p);
339 	nca.ca_nmodules = MAXMODBUS;
340 	pdc_scanbus(self, &nca, astro_callback);
341 }
342 
343 static device_t
344 astro_callback(device_t self, struct confargs *ca)
345 {
346 
347 	return config_found(self, ca, mbprint,
348 	    CFARGS(.submatch = mbsubmatch));
349 }
350 
351 int
352 iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
353     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
354 {
355 	struct astro_softc *sc = v;
356 	bus_dmamap_t map;
357 	struct iommu_map_state *ims;
358 	int error;
359 
360 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
361 	    boundary, flags, &map);
362 	if (error)
363 		return (error);
364 
365 	ims = iommu_iomap_create(atop(round_page(size)));
366 	if (ims == NULL) {
367 		bus_dmamap_destroy(sc->sc_dmat, map);
368 		return (ENOMEM);
369 	}
370 
371 	ims->ims_sc = sc;
372 	map->_dm_cookie = ims;
373 	*dmamap = map;
374 
375 	return (0);
376 }
377 
378 void
379 iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
380 {
381 	struct astro_softc *sc = v;
382 
383 	/*
384 	 * The specification (man page) requires a loaded
385 	 * map to be unloaded before it is destroyed.
386 	 */
387 	if (map->dm_nsegs)
388 		iommu_dvmamap_unload(sc, map);
389 
390 	if (map->_dm_cookie)
391 		iommu_iomap_destroy(map->_dm_cookie);
392 	map->_dm_cookie = NULL;
393 
394 	bus_dmamap_destroy(sc->sc_dmat, map);
395 }
396 
397 static int
398 iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
399 {
400 	struct iommu_map_state *ims = map->_dm_cookie;
401 	struct iommu_page_map *ipm = &ims->ims_map;
402 	struct iommu_page_entry *e;
403 	int err, seg, s;
404 	paddr_t pa, paend;
405 	vaddr_t va;
406 	bus_size_t sgsize;
407 	bus_size_t align, boundary;
408 	u_long dvmaddr;
409 	bus_addr_t dva;
410 	int i;
411 
412 	/* XXX */
413 	boundary = map->_dm_boundary;
414 	align = PAGE_SIZE;
415 
416 	iommu_iomap_clear_pages(ims);
417 
418 	for (seg = 0; seg < map->dm_nsegs; seg++) {
419 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
420 
421 		paend = round_page(ds->ds_addr + ds->ds_len);
422 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
423 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
424 			err = iommu_iomap_insert_page(ims, va, pa);
425 			if (err) {
426 				printf("iomap insert error: %d for "
427 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
428 				bus_dmamap_unload(sc->sc_dmat, map);
429 				iommu_iomap_clear_pages(ims);
430 			}
431 		}
432 	}
433 
434 	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
435 	/* XXXNH */
436 	s = splhigh();
437 	err = extent_alloc(sc->sc_dvmamap, sgsize, align, boundary,
438 	    EX_NOWAIT | EX_BOUNDZERO, &dvmaddr);
439 	splx(s);
440 	if (err)
441 		return (err);
442 
443 	ims->ims_dvmastart = dvmaddr;
444 	ims->ims_dvmasize = sgsize;
445 
446 	dva = dvmaddr;
447 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
448 		e->ipe_dva = dva;
449 		iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
450 		dva += PAGE_SIZE;
451 	}
452 
453 	for (seg = 0; seg < map->dm_nsegs; seg++) {
454 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
455 		ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
456 	}
457 
458 	return (0);
459 }
460 
461 int
462 iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
463     struct proc *p, int flags)
464 {
465 	struct astro_softc *sc = v;
466 	int err;
467 
468 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
469 	if (err)
470 		return (err);
471 
472 	return iommu_iomap_load_map(sc, map, flags);
473 }
474 
475 int
476 iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
477 {
478 	struct astro_softc *sc = v;
479 	int err;
480 
481 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
482 	if (err)
483 		return (err);
484 
485 	return iommu_iomap_load_map(sc, map, flags);
486 }
487 
488 int
489 iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
490 {
491 	struct astro_softc *sc = v;
492 
493 	printf("load_uio\n");
494 
495 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
496 }
497 
498 int
499 iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
500     int nsegs, bus_size_t size, int flags)
501 {
502 	struct astro_softc *sc = v;
503 
504 	printf("load_raw\n");
505 
506 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
507 }
508 
509 void
510 iommu_dvmamap_unload(void *v, bus_dmamap_t map)
511 {
512 	struct astro_softc *sc = v;
513 	struct iommu_map_state *ims = map->_dm_cookie;
514 	struct iommu_page_map *ipm = &ims->ims_map;
515 	struct iommu_page_entry *e;
516 	int err, i, s;
517 
518 	/* Remove the IOMMU entries. */
519 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
520 		iommu_remove(sc, e->ipe_dva);
521 
522 	/* Clear the iomap. */
523 	iommu_iomap_clear_pages(ims);
524 
525 	bus_dmamap_unload(sc->sc_dmat, map);
526 
527 	s = splhigh();
528 	err = extent_free(sc->sc_dvmamap, ims->ims_dvmastart,
529 	    ims->ims_dvmasize, EX_NOWAIT);
530 	ims->ims_dvmastart = 0;
531 	ims->ims_dvmasize = 0;
532 	splx(s);
533 	if (err)
534 		printf("warning: %ld of DVMA space lost\n", ims->ims_dvmasize);
535 }
536 
537 void
538 iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
539     bus_size_t len, int ops)
540 {
541 	/* Nothing to do; DMA is cache-coherent. */
542 }
543 
544 int
545 iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
546     bus_size_t boundary, bus_dma_segment_t *segs,
547     int nsegs, int *rsegs, int flags)
548 {
549 	struct astro_softc *sc = v;
550 
551 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
552 	    segs, nsegs, rsegs, flags));
553 }
554 
555 void
556 iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
557 {
558 	struct astro_softc *sc = v;
559 
560 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
561 }
562 
563 int
564 iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
565     void **kvap, int flags)
566 {
567 	struct astro_softc *sc = v;
568 
569 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
570 }
571 
572 void
573 iommu_dvmamem_unmap(void *v, void *kva, size_t size)
574 {
575 	struct astro_softc *sc = v;
576 
577 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
578 }
579 
580 paddr_t
581 iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
582     int prot, int flags)
583 {
584 	struct astro_softc *sc = v;
585 
586 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
587 }
588 
589 /*
590  * Utility function used by splay tree to order page entries by pa.
591  */
592 static inline int
593 iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
594 {
595 	return ((a->ipe_pa > b->ipe_pa) ? 1 :
596 		(a->ipe_pa < b->ipe_pa) ? -1 : 0);
597 }
598 
599 SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
600 
601 SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
602 
603 /*
604  * Create a new iomap.
605  */
606 struct iommu_map_state *
607 iommu_iomap_create(int n)
608 {
609 	struct iommu_map_state *ims;
610 
611 	/* Safety for heavily fragmented data, such as mbufs */
612 	n += 4;
613 	if (n < 16)
614 		n = 16;
615 
616 	ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
617 	    M_DEVBUF, M_NOWAIT | M_ZERO);
618 	if (ims == NULL)
619 		return (NULL);
620 
621 	/* Initialize the map. */
622 	ims->ims_map.ipm_maxpage = n;
623 	SPLAY_INIT(&ims->ims_map.ipm_tree);
624 
625 	return (ims);
626 }
627 
628 /*
629  * Destroy an iomap.
630  */
631 void
632 iommu_iomap_destroy(struct iommu_map_state *ims)
633 {
634 #ifdef DIAGNOSTIC
635 	if (ims->ims_map.ipm_pagecnt > 0)
636 		printf("iommu_iomap_destroy: %d page entries in use\n",
637 		    ims->ims_map.ipm_pagecnt);
638 #endif
639 
640 	free(ims, M_DEVBUF);
641 }
642 
643 /*
644  * Insert a pa entry in the iomap.
645  */
646 int
647 iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
648 {
649 	struct iommu_page_map *ipm = &ims->ims_map;
650 	struct iommu_page_entry *e;
651 
652 	if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
653 		struct iommu_page_entry ipe;
654 
655 		ipe.ipe_pa = pa;
656 		if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
657 			return (0);
658 
659 		return (ENOMEM);
660 	}
661 
662 	e = &ipm->ipm_map[ipm->ipm_pagecnt];
663 
664 	e->ipe_pa = pa;
665 	e->ipe_va = va;
666 	e->ipe_dva = 0;
667 
668 	e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
669 
670 	/* Duplicates are okay, but only count them once. */
671 	if (e)
672 		return (0);
673 
674 	++ipm->ipm_pagecnt;
675 
676 	return (0);
677 }
678 
679 /*
680  * Translate a physical address (pa) into a DVMA address.
681  */
682 bus_addr_t
683 iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
684 {
685 	struct iommu_page_map *ipm = &ims->ims_map;
686 	struct iommu_page_entry *e;
687 	struct iommu_page_entry pe;
688 	paddr_t offset = pa & PAGE_MASK;
689 
690 	pe.ipe_pa = trunc_page(pa);
691 
692 	e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
693 
694 	if (e == NULL) {
695 		panic("couldn't find pa %lx\n", pa);
696 		return 0;
697 	}
698 
699 	return (e->ipe_dva | offset);
700 }
701 
702 /*
703  * Clear the iomap table and tree.
704  */
705 void
706 iommu_iomap_clear_pages(struct iommu_map_state *ims)
707 {
708 	ims->ims_map.ipm_pagecnt = 0;
709 	SPLAY_INIT(&ims->ims_map.ipm_tree);
710 }
711 
712 /*
713  * Add an entry to the IOMMU table.
714  */
715 void
716 iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
717     int flags)
718 {
719 	volatile uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
720 	uint64_t tte;
721 	uint32_t ci;
722 
723 #ifdef ASTRODEBUG
724 	printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
725 #endif
726 
727 #ifdef DIAGNOSTIC
728 	tte = le64toh(*tte_ptr);
729 
730 	if (tte & IOTTE_V) {
731 		printf("Overwriting valid tte entry (dva %lx pa %lx "
732 		    "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
733 		extent_print(sc->sc_dvmamap);
734 		panic("IOMMU overwrite");
735 	}
736 #endif
737 
738 	ci = lci(HPPA_SID_KERNEL, va);
739 
740 	tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
741 	tte |= IOTTE_V;
742 
743 	*tte_ptr = htole64(tte);
744 	fdcache(HPPA_SID_KERNEL, (vaddr_t)tte_ptr, sizeof(*tte_ptr));
745 }
746 
747 /*
748  * Remove an entry from the IOMMU table.
749  */
750 void
751 iommu_remove(struct astro_softc *sc, bus_addr_t dva)
752 {
753 	volatile struct astro_regs *r = sc->sc_regs;
754 	uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
755 	uint64_t tte;
756 
757 #ifdef DIAGNOSTIC
758 	if (dva != trunc_page(dva)) {
759 		printf("iommu_remove: unaligned dva: %lx\n", dva);
760 		dva = trunc_page(dva);
761 	}
762 #endif
763 
764 	tte = le64toh(*tte_ptr);
765 
766 #ifdef DIAGNOSTIC
767 	if ((tte & IOTTE_V) == 0) {
768 		printf("Removing invalid tte entry (dva %lx &tte %p "
769 		    "tte %llx)\n", dva, tte_ptr, tte);
770 		extent_print(sc->sc_dvmamap);
771 		panic("IOMMU remove overwrite");
772 	}
773 #endif
774 
775 	*tte_ptr = htole64(tte & ~IOTTE_V);
776 
777 	/* Flush IOMMU. */
778 	r->tlb_pcom = htole32(dva | PAGE_SHIFT);
779 }
780