xref: /openbsd-src/sys/arch/hppa/dev/astro.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: astro.c,v 1.13 2011/04/07 15:30:15 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/reboot.h>
25 #include <sys/tree.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #include <machine/iomod.h>
30 #include <machine/autoconf.h>
31 
32 #include <hppa/dev/cpudevs.h>
33 
34 struct astro_regs {
35 	u_int32_t	rid;
36 	u_int32_t	pad0000;
37 	u_int32_t	ioc_ctrl;
38 	u_int32_t	pad0008;
39 	u_int8_t	resv1[0x0300 - 0x0010];
40 	u_int64_t	lmmio_direct0_base;
41 	u_int64_t	lmmio_direct0_mask;
42 	u_int64_t	lmmio_direct0_route;
43 	u_int64_t	lmmio_direct1_base;
44 	u_int64_t	lmmio_direct1_mask;
45 	u_int64_t	lmmio_direct1_route;
46 	u_int64_t	lmmio_direct2_base;
47 	u_int64_t	lmmio_direct2_mask;
48 	u_int64_t	lmmio_direct2_route;
49 	u_int64_t	lmmio_direct3_base;
50 	u_int64_t	lmmio_direct3_mask;
51 	u_int64_t	lmmio_direct3_route;
52 	u_int64_t	lmmio_dist_base;
53 	u_int64_t	lmmio_dist_mask;
54 	u_int64_t	lmmio_dist_route;
55 	u_int64_t	gmmio_dist_base;
56 	u_int64_t	gmmio_dist_mask;
57 	u_int64_t	gmmio_dist_route;
58 	u_int64_t	ios_dist_base;
59 	u_int64_t	ios_dist_mask;
60 	u_int64_t	ios_dist_route;
61 	u_int8_t	resv2[0x03c0 - 0x03a8];
62 	u_int64_t	ios_direct_base;
63 	u_int64_t	ios_direct_mask;
64 	u_int64_t	ios_direct_route;
65 	u_int8_t	resv3[0x22000 - 0x03d8];
66 	u_int64_t	func_id;
67 	u_int64_t	func_class;
68 	u_int8_t	resv4[0x22040 - 0x22010];
69 	u_int64_t	rope_config;
70 	u_int8_t	resv5[0x22050 - 0x22048];
71 	u_int64_t	rope_debug;
72 	u_int8_t	resv6[0x22200 - 0x22058];
73 	u_int64_t	rope0_control;
74 	u_int64_t	rope1_control;
75 	u_int64_t	rope2_control;
76 	u_int64_t	rope3_control;
77 	u_int64_t	rope4_control;
78 	u_int64_t	rope5_control;
79 	u_int64_t	rope6_control;
80 	u_int64_t	rope7_control;
81 	u_int8_t	resv7[0x22300 - 0x22240];
82 	u_int32_t	tlb_ibase;
83 	u_int32_t	pad22300;
84 	u_int32_t	tlb_imask;
85 	u_int32_t	pad22308;
86 	u_int32_t	tlb_pcom;
87 	u_int32_t	pad22310;
88 	u_int32_t	tlb_tcnfg;
89 	u_int32_t	pad22318;
90 	u_int64_t	tlb_pdir_base;
91 };
92 
93 #define ASTRO_IOC_CTRL_TE	0x0001	/* TOC Enable */
94 #define ASTRO_IOC_CTRL_CE	0x0002	/* Coalesce Enable */
95 #define ASTRO_IOC_CTRL_DE	0x0004	/* Dillon Enable */
96 #define ASTRO_IOC_CTRL_IE	0x0008	/* IOS Enable */
97 #define ASTRO_IOC_CTRL_OS	0x0010	/* Outbound Synchronous */
98 #define ASTRO_IOC_CTRL_IS	0x0020	/* Inbound Synchronous */
99 #define ASTRO_IOC_CTRL_RC	0x0040	/* Read Current Enable */
100 #define ASTRO_IOC_CTRL_L0	0x0080	/* 0-length Read Enable */
101 #define ASTRO_IOC_CTRL_RM	0x0100	/* Real Mode */
102 #define ASTRO_IOC_CTRL_NC	0x0200	/* Non-coherent Mode */
103 #define ASTRO_IOC_CTRL_ID	0x0400	/* Interrupt Disable */
104 #define ASTRO_IOC_CTRL_D4	0x0800	/* Disable 4-byte Coalescing */
105 #define ASTRO_IOC_CTRL_CC	0x1000	/* Increase Coalescing counter value */
106 #define ASTRO_IOC_CTRL_DD	0x2000	/* Disable distr. range coalescing */
107 #define ASTRO_IOC_CTRL_DC	0x4000	/* Disable the coalescing counter */
108 
109 #define IOTTE_V		0x8000000000000000LL	/* Entry valid */
110 #define IOTTE_PAMASK	0x000000fffffff000LL
111 #define IOTTE_CI	0x00000000000000ffLL	/* Coherent index */
112 
113 struct astro_softc {
114 	struct device sc_dv;
115 
116 	bus_dma_tag_t sc_dmat;
117 	struct astro_regs volatile *sc_regs;
118 	u_int64_t *sc_pdir;
119 
120 	char sc_dvmamapname[20];
121 	struct extent *sc_dvmamap;
122 	struct hppa_bus_dma_tag sc_dmatag;
123 };
124 
125 /*
126  * per-map DVMA page table
127  */
128 struct iommu_page_entry {
129 	SPLAY_ENTRY(iommu_page_entry) ipe_node;
130 	paddr_t	ipe_pa;
131 	vaddr_t	ipe_va;
132 	bus_addr_t ipe_dva;
133 };
134 
135 struct iommu_page_map {
136 	SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
137 	int ipm_maxpage;	/* Size of allocated page map */
138 	int ipm_pagecnt;	/* Number of entries in use */
139 	struct iommu_page_entry	ipm_map[1];
140 };
141 
142 /*
143  * per-map IOMMU state
144  */
145 struct iommu_map_state {
146 	struct astro_softc *ims_sc;
147 	bus_addr_t ims_dvmastart;
148 	bus_size_t ims_dvmasize;
149 	struct iommu_page_map ims_map;	/* map must be last (array at end) */
150 };
151 
152 int	astro_match(struct device *, void *, void *);
153 void	astro_attach(struct device *, struct device *, void *);
154 
155 struct cfattach astro_ca = {
156 	sizeof(struct astro_softc), astro_match, astro_attach
157 };
158 
159 struct cfdriver astro_cd = {
160 	NULL, "astro", DV_DULL
161 };
162 
163 int	iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
164 	    int, bus_dmamap_t *);
165 void	iommu_dvmamap_destroy(void *, bus_dmamap_t);
166 int	iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
167 	    struct proc *, int);
168 int	iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int);
169 int	iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
170 int	iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
171 int	iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
172 	    int, bus_size_t, int);
173 void	iommu_dvmamap_unload(void *, bus_dmamap_t);
174 void	iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
175 int	iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
176 	    bus_dma_segment_t *, int, int *, int);
177 void	iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
178 int	iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
179 	    caddr_t *, int);
180 void	iommu_dvmamem_unmap(void *, caddr_t, size_t);
181 paddr_t	iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
182 
183 void	iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
184 void	iommu_remove(struct astro_softc *, bus_addr_t);
185 
186 struct iommu_map_state *iommu_iomap_create(int);
187 void	iommu_iomap_destroy(struct iommu_map_state *);
188 int	iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
189 bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
190 void	iommu_iomap_clear_pages(struct iommu_map_state *);
191 
192 const struct hppa_bus_dma_tag astro_dmat = {
193 	NULL,
194 	iommu_dvmamap_create, iommu_dvmamap_destroy,
195 	iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
196 	iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
197 	iommu_dvmamap_unload, iommu_dvmamap_sync,
198 
199 	iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
200 	iommu_dvmamem_unmap, iommu_dvmamem_mmap
201 };
202 
203 int
204 astro_match(struct device *parent, void *cfdata, void *aux)
205 {
206 	struct confargs *ca = aux;
207 
208 	/* Astro is a U-Turn variant. */
209 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
210 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
211 		return 0;
212 
213 	if (ca->ca_type.iodc_model == 0x58 &&
214 	    ca->ca_type.iodc_revision >= 0x20)
215 		return 1;
216 
217 	return 0;
218 }
219 
220 void
221 astro_attach(struct device *parent, struct device *self, void *aux)
222 {
223 	struct confargs *ca = aux, nca;
224 	struct astro_softc *sc = (struct astro_softc *)self;
225 	volatile struct astro_regs *r;
226 	bus_space_handle_t ioh;
227 	u_int32_t rid, ioc_ctrl;
228 	psize_t size;
229 	vaddr_t va;
230 	paddr_t pa;
231 	struct vm_page *m;
232 	struct pglist mlist;
233 	int iova_bits;
234 
235 	sc->sc_dmat = ca->ca_dmatag;
236 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
237 	    0, &ioh)) {
238 		printf(": can't map IO space\n");
239 		return;
240 	}
241 	sc->sc_regs = r = (struct astro_regs *)ca->ca_hpa;
242 
243 	rid = letoh32(r->rid);
244 	printf(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
245 
246 	ioc_ctrl = letoh32(r->ioc_ctrl);
247 	ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
248 	ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
249 	ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
250 	r->ioc_ctrl = htole32(ioc_ctrl);
251 
252 	/*
253 	 * Setup the iommu.
254 	 */
255 
256 	/* XXX This gives us 256MB of iova space. */
257 	iova_bits = 28;
258 
259 	r->tlb_ibase = htole32(0);
260 	r->tlb_imask = htole32(0xffffffff << iova_bits);
261 
262 	/* Page size is 4K. */
263 	r->tlb_tcnfg = htole32(0);
264 
265 	/* Flush TLB. */
266 	r->tlb_pcom = htole32(31);
267 
268 	/*
269 	 * Allocate memory for I/O pagetables.  They need to be physically
270 	 * contiguous.
271 	 */
272 
273 	size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(u_int64_t);
274 	TAILQ_INIT(&mlist);
275 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist,
276 	    1, UVM_PLA_NOWAIT) != 0)
277 		panic("astrottach: no memory");
278 
279 	va = uvm_km_valloc(kernel_map, size);
280 	if (va == 0)
281 		panic("astroattach: no memory");
282 	sc->sc_pdir = (u_int64_t *)va;
283 
284 	m = TAILQ_FIRST(&mlist);
285 	r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
286 
287 	/* Map the pages. */
288 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
289 		pa = VM_PAGE_TO_PHYS(m);
290 		pmap_enter(pmap_kernel(), va, pa,
291 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
292 		va += PAGE_SIZE;
293 	}
294 	pmap_update(pmap_kernel());
295 	memset(sc->sc_pdir, 0, size);
296 
297 	/*
298 	 * The PDC might have set up some devices to do DMA.  It will do
299 	 * this for the onboard USB controller if an USB keyboard is used
300 	 * for console input.  In that case, bad things will happen if we
301 	 * enable iova space.  So reset the PDC devices before we do that.
302 	 * Don't do this if we're using a serial console though, since it
303 	 * will stop working if we do.  This is fine since the serial port
304 	 * doesn't do DMA.
305 	 */
306 	if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
307 		pdc_call((iodcio_t)pdc, 0, PDC_IO, PDC_IO_RESET_DEVICES);
308 
309 	/* Enable iova space. */
310 	r->tlb_ibase = htole32(1);
311 
312         /*
313          * Now all the hardware's working we need to allocate a dvma map.
314          */
315 	snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
316 	    "%s_dvma", sc->sc_dv.dv_xname);
317         sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
318             M_DEVBUF, 0, 0, EX_NOWAIT);
319 
320 	sc->sc_dmatag = astro_dmat;
321 	sc->sc_dmatag._cookie = sc;
322 
323 	nca = *ca;	/* clone from us */
324 	nca.ca_hpamask = HPPA_IOBEGIN;
325 	nca.ca_dmatag = &sc->sc_dmatag;
326 	pdc_scanbus(self, &nca, MAXMODBUS, 0, 0);
327 }
328 
329 int
330 iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
331     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
332 {
333 	struct astro_softc *sc = v;
334 	bus_dmamap_t map;
335 	struct iommu_map_state *ims;
336 	int error;
337 
338 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
339 	    boundary, flags, &map);
340 	if (error)
341 		return (error);
342 
343 	ims = iommu_iomap_create(atop(round_page(size)));
344 	if (ims == NULL) {
345 		bus_dmamap_destroy(sc->sc_dmat, map);
346 		return (ENOMEM);
347 	}
348 
349 	ims->ims_sc = sc;
350 	map->_dm_cookie = ims;
351 	*dmamap = map;
352 
353 	return (0);
354 }
355 
356 void
357 iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
358 {
359 	struct astro_softc *sc = v;
360 
361 	/*
362 	 * The specification (man page) requires a loaded
363 	 * map to be unloaded before it is destroyed.
364 	 */
365 	if (map->dm_nsegs)
366 		iommu_dvmamap_unload(sc, map);
367 
368         if (map->_dm_cookie)
369                 iommu_iomap_destroy(map->_dm_cookie);
370 	map->_dm_cookie = NULL;
371 
372 	bus_dmamap_destroy(sc->sc_dmat, map);
373 }
374 
375 int
376 iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
377 {
378 	struct iommu_map_state *ims = map->_dm_cookie;
379 	struct iommu_page_map *ipm = &ims->ims_map;
380 	struct iommu_page_entry *e;
381 	int err, seg, s;
382 	paddr_t pa, paend;
383 	vaddr_t va;
384 	bus_size_t sgsize;
385 	bus_size_t align, boundary;
386 	u_long dvmaddr;
387 	bus_addr_t dva;
388 	int i;
389 
390 	/* XXX */
391 	boundary = map->_dm_boundary;
392 	align = PAGE_SIZE;
393 
394 	iommu_iomap_clear_pages(ims);
395 
396 	for (seg = 0; seg < map->dm_nsegs; seg++) {
397 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
398 
399 		paend = round_page(ds->ds_addr + ds->ds_len);
400 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
401 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
402 			err = iommu_iomap_insert_page(ims, va, pa);
403 			if (err) {
404                                printf("iomap insert error: %d for "
405                                     "va 0x%lx pa 0x%lx\n", err, va, pa);
406 				bus_dmamap_unload(sc->sc_dmat, map);
407 				iommu_iomap_clear_pages(ims);
408 			}
409 		}
410 	}
411 
412 	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
413 	s = splhigh();
414 	err = extent_alloc(sc->sc_dvmamap, sgsize, align, 0, boundary,
415 	    EX_NOWAIT | EX_BOUNDZERO, &dvmaddr);
416 	splx(s);
417 	if (err)
418 		return (err);
419 
420 	ims->ims_dvmastart = dvmaddr;
421 	ims->ims_dvmasize = sgsize;
422 
423 	dva = dvmaddr;
424 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
425 		e->ipe_dva = dva;
426 		iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
427 		dva += PAGE_SIZE;
428 	}
429 
430 	for (seg = 0; seg < map->dm_nsegs; seg++) {
431 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
432 		ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
433 	}
434 
435 	return (0);
436 }
437 
438 int
439 iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
440     struct proc *p, int flags)
441 {
442 	struct astro_softc *sc = v;
443 	int err;
444 
445 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
446 	if (err)
447 		return (err);
448 
449 	return iommu_iomap_load_map(sc, map, flags);
450 }
451 
452 int
453 iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
454 {
455 	struct astro_softc *sc = v;
456 	int err;
457 
458 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
459 	if (err)
460 		return (err);
461 
462 	return iommu_iomap_load_map(sc, map, flags);
463 }
464 
465 int
466 iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
467 {
468 	struct astro_softc *sc = v;
469 
470 	printf("load_uio\n");
471 
472 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
473 }
474 
475 int
476 iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
477     int nsegs, bus_size_t size, int flags)
478 {
479 	struct astro_softc *sc = v;
480 
481 	printf("load_raw\n");
482 
483 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
484 }
485 
486 void
487 iommu_dvmamap_unload(void *v, bus_dmamap_t map)
488 {
489 	struct astro_softc *sc = v;
490 	struct iommu_map_state *ims = map->_dm_cookie;
491 	struct iommu_page_map *ipm = &ims->ims_map;
492 	struct iommu_page_entry *e;
493 	int err, i, s;
494 
495 	/* Remove the IOMMU entries. */
496 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
497 		iommu_remove(sc, e->ipe_dva);
498 
499 	/* Clear the iomap. */
500 	iommu_iomap_clear_pages(ims);
501 
502 	bus_dmamap_unload(sc->sc_dmat, map);
503 
504 	s = splhigh();
505 	err = extent_free(sc->sc_dvmamap, ims->ims_dvmastart,
506 	    ims->ims_dvmasize, EX_NOWAIT);
507 	ims->ims_dvmastart = 0;
508 	ims->ims_dvmasize = 0;
509 	splx(s);
510 	if (err)
511 		printf("warning: %ld of DVMA space lost\n", ims->ims_dvmasize);
512 }
513 
514 void
515 iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
516     bus_size_t len, int ops)
517 {
518 	/* Nothing to do; DMA is cache-coherent. */
519 }
520 
521 int
522 iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
523     bus_size_t boundary, bus_dma_segment_t *segs,
524     int nsegs, int *rsegs, int flags)
525 {
526 	struct astro_softc *sc = v;
527 
528 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
529 	    segs, nsegs, rsegs, flags));
530 }
531 
532 void
533 iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
534 {
535 	struct astro_softc *sc = v;
536 
537 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
538 }
539 
540 int
541 iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
542     caddr_t *kvap, int flags)
543 {
544 	struct astro_softc *sc = v;
545 
546 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
547 }
548 
549 void
550 iommu_dvmamem_unmap(void *v, caddr_t kva, size_t size)
551 {
552 	struct astro_softc *sc = v;
553 
554 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
555 }
556 
557 paddr_t
558 iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
559     int prot, int flags)
560 {
561 	struct astro_softc *sc = v;
562 
563 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
564 }
565 
566 /*
567  * Utility function used by splay tree to order page entries by pa.
568  */
569 static inline int
570 iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
571 {
572 	return ((a->ipe_pa > b->ipe_pa) ? 1 :
573 		(a->ipe_pa < b->ipe_pa) ? -1 : 0);
574 }
575 
576 SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
577 
578 SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
579 
580 /*
581  * Create a new iomap.
582  */
583 struct iommu_map_state *
584 iommu_iomap_create(int n)
585 {
586 	struct iommu_map_state *ims;
587 
588 	/* Safety for heavily fragmented data, such as mbufs */
589 	n += 4;
590 	if (n < 16)
591 		n = 16;
592 
593 	ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
594 	    M_DEVBUF, M_NOWAIT | M_ZERO);
595 	if (ims == NULL)
596 		return (NULL);
597 
598 	/* Initialize the map. */
599 	ims->ims_map.ipm_maxpage = n;
600 	SPLAY_INIT(&ims->ims_map.ipm_tree);
601 
602 	return (ims);
603 }
604 
605 /*
606  * Destroy an iomap.
607  */
608 void
609 iommu_iomap_destroy(struct iommu_map_state *ims)
610 {
611 #ifdef DIAGNOSTIC
612 	if (ims->ims_map.ipm_pagecnt > 0)
613 		printf("iommu_iomap_destroy: %d page entries in use\n",
614 		    ims->ims_map.ipm_pagecnt);
615 #endif
616 
617 	free(ims, M_DEVBUF);
618 }
619 
620 /*
621  * Insert a pa entry in the iomap.
622  */
623 int
624 iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
625 {
626 	struct iommu_page_map *ipm = &ims->ims_map;
627 	struct iommu_page_entry *e;
628 
629 	if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
630 		struct iommu_page_entry ipe;
631 
632 		ipe.ipe_pa = pa;
633 		if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
634 			return (0);
635 
636 		return (ENOMEM);
637 	}
638 
639 	e = &ipm->ipm_map[ipm->ipm_pagecnt];
640 
641 	e->ipe_pa = pa;
642 	e->ipe_va = va;
643 	e->ipe_dva = 0;
644 
645 	e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
646 
647 	/* Duplicates are okay, but only count them once. */
648 	if (e)
649 		return (0);
650 
651 	++ipm->ipm_pagecnt;
652 
653 	return (0);
654 }
655 
656 /*
657  * Translate a physical address (pa) into a DVMA address.
658  */
659 bus_addr_t
660 iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
661 {
662 	struct iommu_page_map *ipm = &ims->ims_map;
663 	struct iommu_page_entry *e;
664 	struct iommu_page_entry pe;
665 	paddr_t offset = pa & PAGE_MASK;
666 
667 	pe.ipe_pa = trunc_page(pa);
668 
669 	e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
670 
671 	if (e == NULL) {
672 		panic("couldn't find pa %lx", pa);
673 		return 0;
674 	}
675 
676 	return (e->ipe_dva | offset);
677 }
678 
679 /*
680  * Clear the iomap table and tree.
681  */
682 void
683 iommu_iomap_clear_pages(struct iommu_map_state *ims)
684 {
685         ims->ims_map.ipm_pagecnt = 0;
686         SPLAY_INIT(&ims->ims_map.ipm_tree);
687 }
688 
689 /*
690  * Add an entry to the IOMMU table.
691  */
692 void
693 iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
694     int flags)
695 {
696 	volatile u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
697 	u_int64_t tte;
698 	u_int32_t ci;
699 
700 #ifdef DEBUG
701 	printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
702 #endif
703 
704 #ifdef DIAGNOSTIC
705 	tte = letoh64(*tte_ptr);
706 
707 	if (tte & IOTTE_V) {
708 		printf("Overwriting valid tte entry (dva %lx pa %lx "
709 		    "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
710 		extent_print(sc->sc_dvmamap);
711 		panic("IOMMU overwrite");
712 	}
713 #endif
714 
715 	mtsp(HPPA_SID_KERNEL, 1);
716 	__asm volatile("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (va));
717 
718 	tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
719 	tte |= IOTTE_V;
720 
721 	*tte_ptr = htole64(tte);
722 	__asm volatile("fdc 0(%%sr1, %0)\n\tsync" : : "r" (tte_ptr));
723 }
724 
725 /*
726  * Remove an entry from the IOMMU table.
727  */
728 void
729 iommu_remove(struct astro_softc *sc, bus_addr_t dva)
730 {
731 	volatile struct astro_regs *r = sc->sc_regs;
732 	u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
733 	u_int64_t tte;
734 
735 #ifdef DIAGNOSTIC
736 	if (dva != trunc_page(dva)) {
737 		printf("iommu_remove: unaligned dva: %lx\n", dva);
738 		dva = trunc_page(dva);
739 	}
740 #endif
741 
742 	tte = letoh64(*tte_ptr);
743 
744 #ifdef DIAGNOSTIC
745 	if ((tte & IOTTE_V) == 0) {
746 		printf("Removing invalid tte entry (dva %lx &tte %p "
747 		    "tte %llx)\n", dva, tte_ptr, tte);
748 		extent_print(sc->sc_dvmamap);
749 		panic("IOMMU remove overwrite");
750 	}
751 #endif
752 
753 	*tte_ptr = htole64(tte & ~IOTTE_V);
754 
755 	/* Flush IOMMU. */
756 	r->tlb_pcom = htole32(dva | PAGE_SHIFT);
757 }
758