xref: /netbsd-src/sys/arch/hppa/dev/uturn.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: uturn.c,v 1.3 2020/06/14 01:40:04 chs Exp $	*/
2 
3 /*	$OpenBSD: uturn.c,v 1.6 2007/12/29 01:26:14 kettenis Exp $	*/
4 
5 /*-
6  * Copyright (c) 2012 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Nick Hudson.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2007 Mark Kettenis
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 /*
51  * Copyright (c) 2004 Michael Shalayeff
52  * All rights reserved.
53  *
54  * Redistribution and use in source and binary forms, with or without
55  * modification, are permitted provided that the following conditions
56  * are met:
57  * 1. Redistributions of source code must retain the above copyright
58  *    notice, this list of conditions and the following disclaimer.
59  * 2. Redistributions in binary form must reproduce the above copyright
60  *    notice, this list of conditions and the following disclaimer in the
61  *    documentation and/or other materials provided with the distribution.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
64  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
65  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
66  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
67  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
69  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
71  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
72  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
73  * THE POSSIBILITY OF SUCH DAMAGE.
74  */
75 
76 /*
77  * References:
78  * 1. Hardware Cache Coherent Input/Output. Hewlett-Packard Journal, February
79  *    1996.
80  * 2. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
81  *    Hewlett-Packard, February 1994, Third Edition
82  */
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/device.h>
87 #include <sys/reboot.h>
88 #include <sys/malloc.h>
89 #include <sys/extent.h>
90 #include <sys/mbuf.h>
91 #include <sys/tree.h>
92 
93 #include <uvm/uvm.h>
94 
95 #include <sys/bus.h>
96 #include <machine/iomod.h>
97 #include <machine/autoconf.h>
98 
99 #include <hppa/dev/cpudevs.h>
100 
101 #define UTURNDEBUG
102 #ifdef UTURNDEBUG
103 
104 #define	DPRINTF(s)	do {	\
105 	if (uturndebug)		\
106 		printf s;	\
107 } while(0)
108 
109 int uturndebug = 0;
110 #else
111 #define	DPRINTF(s)	/* */
112 #endif
113 
114 struct uturn_regs {
115 	/* Runway Supervisory Set */
116 	int32_t		unused1[12];
117 	uint32_t	io_command;		/* Offset 12 */
118 #define	UTURN_CMD_TLB_PURGE		33	/* Purge I/O TLB entry */
119 #define	UTURN_CMD_TLB_DIRECT_WRITE	35	/* I/O TLB Writes */
120 
121 	uint32_t	io_status;		/* Offset 13 */
122 	uint32_t	io_control;		/* Offset 14 */
123 #define	UTURN_IOCTRL_TLB_REAL		0x00000000
124 #define	UTURN_IOCTRL_TLB_ERROR		0x00010000
125 #define	UTURN_IOCTRL_TLB_NORMAL		0x00020000
126 
127 #define	UTURN_IOCTRL_MODE_OFF		0x00000000
128 #define	UTURN_IOCTRL_MODE_INCLUDE	0x00000080
129 #define	UTURN_IOCTRL_MODE_PEEK		0x00000180
130 
131 #define	UTURN_VIRTUAL_MODE	\
132 	(UTURN_IOCTRL_TLB_NORMAL | UTURN_IOCTRL_MODE_INCLUDE)
133 
134 #define	UTURN_REAL_MODE		\
135 	UTURN_IOCTRL_MODE_INCLUDE
136 
137 	int32_t		unused2[1];
138 
139 	/* Runway Auxiliary Register Set */
140 	uint32_t	io_err_resp;		/* Offset  0 */
141 	uint32_t	io_err_info;		/* Offset  1 */
142 	uint32_t	io_err_req;		/* Offset  2 */
143 	uint32_t	io_err_resp_hi;		/* Offset  3 */
144 	uint32_t	io_tlb_entry_m;		/* Offset  4 */
145 	uint32_t	io_tlb_entry_l;		/* Offset  5 */
146 	uint32_t	unused3[1];
147 	uint32_t	io_pdir_base;		/* Offset  7 */
148 	uint32_t	io_io_low_hv;		/* Offset  8 */
149 	uint32_t	io_io_high_hv;		/* Offset  9 */
150 	uint32_t	unused4[1];
151 	uint32_t	io_chain_id_mask;	/* Offset 11 */
152 	uint32_t	unused5[2];
153 	uint32_t	io_io_low;		/* Offset 14 */
154 	uint32_t	io_io_high;		/* Offset 15 */
155 };
156 
157 
158 /* Uturn supports 256 TLB entries */
159 #define	UTURN_CHAINID_SHIFT	8
160 #define	UTURN_CHAINID_MASK	0xff
161 #define	UTURN_TLB_ENTRIES	(1 << UTURN_CHAINID_SHIFT)
162 
163 #define	UTURN_IOVP_SIZE		PAGE_SIZE
164 #define	UTURN_IOVP_SHIFT	PAGE_SHIFT
165 #define	UTURN_IOVP_MASK		PAGE_MASK
166 
167 #define	UTURN_IOVA(iovp, off)	((iovp) | (off))
168 #define	UTURN_IOVP(iova)	((iova) & UTURN_IOVP_MASK)
169 #define	UTURN_IOVA_INDEX(iova)	((iova) >> UTURN_IOVP_SHIFT)
170 
171 struct uturn_softc {
172 	device_t sc_dv;
173 
174 	bus_dma_tag_t sc_dmat;
175 	struct uturn_regs volatile *sc_regs;
176 	uint64_t *sc_pdir;
177 	uint32_t sc_chainid_shift;
178 
179 	char sc_mapname[20];
180 	struct extent *sc_map;
181 
182 	struct hppa_bus_dma_tag sc_dmatag;
183 };
184 
185 /*
186  * per-map IOVA page table
187  */
188 struct uturn_page_entry {
189 	SPLAY_ENTRY(uturn_page_entry) upe_node;
190 	paddr_t	upe_pa;
191 	vaddr_t	upe_va;
192 	bus_addr_t upe_iova;
193 };
194 
195 struct uturn_page_map {
196 	SPLAY_HEAD(uturn_page_tree, uturn_page_entry) upm_tree;
197 	int upm_maxpage;	/* Size of allocated page map */
198 	int upm_pagecnt;	/* Number of entries in use */
199 	struct uturn_page_entry	upm_map[1];
200 };
201 
202 /*
203  * per-map UTURN state
204  */
205 struct uturn_map_state {
206 	struct uturn_softc *ums_sc;
207 	bus_addr_t ums_iovastart;
208 	bus_size_t ums_iovasize;
209 	struct uturn_page_map ums_map;	/* map must be last (array at end) */
210 };
211 
212 int	uturnmatch(device_t, cfdata_t, void *);
213 void	uturnattach(device_t, device_t, void *);
214 static device_t uturn_callback(device_t, struct confargs *);
215 
216 CFATTACH_DECL_NEW(uturn, sizeof(struct uturn_softc),
217     uturnmatch, uturnattach, NULL, NULL);
218 
219 extern struct cfdriver uturn_cd;
220 
221 int uturn_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int,
222     bus_dmamap_t *);
223 void uturn_dmamap_destroy(void *, bus_dmamap_t);
224 int uturn_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *,
225     int);
226 int uturn_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
227 int uturn_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
228 int uturn_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int,
229     bus_size_t, int);
230 void uturn_dmamap_unload(void *, bus_dmamap_t);
231 void uturn_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
232 int uturn_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
233     bus_dma_segment_t *, int, int *, int);
234 void uturn_dmamem_free(void *, bus_dma_segment_t *, int);
235 int uturn_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
236 void uturn_dmamem_unmap(void *, void *, size_t);
237 paddr_t uturn_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
238 
239 static void uturn_iommu_enter(struct uturn_softc *, bus_addr_t, pa_space_t,
240     vaddr_t, paddr_t);
241 static void uturn_iommu_remove(struct uturn_softc *, bus_addr_t, bus_size_t);
242 
243 struct uturn_map_state *uturn_iomap_create(int);
244 void	uturn_iomap_destroy(struct uturn_map_state *);
245 int	uturn_iomap_insert_page(struct uturn_map_state *, vaddr_t, paddr_t);
246 bus_addr_t uturn_iomap_translate(struct uturn_map_state *, paddr_t);
247 void	uturn_iomap_clear_pages(struct uturn_map_state *);
248 
249 static int uturn_iomap_load_map(struct uturn_softc *, bus_dmamap_t, int);
250 
251 const struct hppa_bus_dma_tag uturn_dmat = {
252 	NULL,
253 	uturn_dmamap_create, uturn_dmamap_destroy,
254 	uturn_dmamap_load, uturn_dmamap_load_mbuf,
255 	uturn_dmamap_load_uio, uturn_dmamap_load_raw,
256 	uturn_dmamap_unload, uturn_dmamap_sync,
257 
258 	uturn_dmamem_alloc, uturn_dmamem_free, uturn_dmamem_map,
259 	uturn_dmamem_unmap, uturn_dmamem_mmap
260 };
261 
262 int
263 uturnmatch(device_t parent, cfdata_t cf, void *aux)
264 {
265 	struct confargs *ca = aux;
266 
267 	/* there will be only one */
268 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
269 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
270 		return 0;
271 
272 	if (ca->ca_type.iodc_model == 0x58 &&
273 	    ca->ca_type.iodc_revision >= 0x20)
274 		return 0;
275 
276 	return 1;
277 }
278 
279 void
280 uturnattach(device_t parent, device_t self, void *aux)
281 {
282 	struct confargs *ca = aux, nca;
283 	struct uturn_softc *sc = device_private(self);
284 	bus_space_handle_t ioh;
285 	volatile struct uturn_regs *r;
286 	struct pglist pglist;
287 	int iova_bits;
288 	vaddr_t va;
289 	psize_t size;
290 	int i;
291 
292 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, IOMOD_HPASIZE, 0, &ioh)) {
293 		aprint_error(": can't map IO space\n");
294 		return;
295 	}
296 
297 	sc->sc_dv = self;
298 	sc->sc_dmat = ca->ca_dmatag;
299 	sc->sc_regs = r = bus_space_vaddr(ca->ca_iot, ioh);
300 
301 	aprint_normal(": %x-%x", r->io_io_low << 16, r->io_io_high << 16);
302 	aprint_normal(": %x-%x", r->io_io_low_hv << 16, r->io_io_high_hv << 16);
303 
304 	aprint_normal(": %s rev %d\n",
305 	    ca->ca_type.iodc_revision < 0x10 ? "U2" : "UTurn",
306 	    ca->ca_type.iodc_revision & 0xf);
307 
308 	/*
309 	 * Setup the iommu.
310 	 */
311 
312 	/* XXX 28 bits gives us 256Mb of iova space */
313 	/* Calculate based on %age of RAM */
314 	iova_bits = 28;
315 
316 	/*
317 	 * size is # of pdir entries (64bits) in bytes.  1 entry per IOVA
318 	 * page.
319 	 */
320 	size = (1 << (iova_bits - UTURN_IOVP_SHIFT)) * sizeof(uint64_t);
321 
322 	/*
323 	 * Chainid is the upper most bits of an IOVP used to determine which
324 	 * TLB entry an IOVP will use.
325 	 */
326 	sc->sc_chainid_shift = iova_bits - UTURN_CHAINID_SHIFT;
327 
328 	/*
329 	 * Allocate memory for I/O pagetables.  They need to be physically
330 	 * contiguous.
331 	 */
332 
333 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &pglist, 1, 0) != 0)
334 		panic("%s: no memory", __func__);
335 
336 	va = (vaddr_t)VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
337 	sc->sc_pdir = (int64_t *)va;
338 
339 	memset(sc->sc_pdir, 0, size);
340 
341 	r->io_chain_id_mask = UTURN_CHAINID_MASK << sc->sc_chainid_shift;
342 	r->io_pdir_base = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
343 
344 	r->io_tlb_entry_m = 0;
345 	r->io_tlb_entry_l = 0;
346 
347 	/* for (i = UTURN_TLB_ENTRIES; i != 0; i--) { */
348 	for (i = 0; i < UTURN_TLB_ENTRIES; i++) {
349 		r->io_command =
350 		    UTURN_CMD_TLB_DIRECT_WRITE | (i << sc->sc_chainid_shift);
351 	}
352 	/*
353 	 * Go to "Virtual Mode"
354 	 */
355 	r->io_control = UTURN_VIRTUAL_MODE;
356 
357 	snprintf(sc->sc_mapname, sizeof(sc->sc_mapname), "%s_map",
358 	    device_xname(sc->sc_dv));
359 	sc->sc_map = extent_create(sc->sc_mapname, 0, (1 << iova_bits),
360 	    0, 0, EX_WAITOK);
361 
362 	sc->sc_dmatag = uturn_dmat;
363 	sc->sc_dmatag._cookie = sc;
364 
365 	/*
366 	 * U2/UTurn is actually a combination of an Upper Bus Converter (UBC)
367 	 * and a Lower Bus Converter (LBC).  This driver attaches to the UBC;
368 	 * the LBC isn't very interesting, so we skip it.  This is easy, since
369 	 * it always is module 63, hence the MAXMODBUS - 1 below.
370 	 */
371 	nca = *ca;
372 	nca.ca_hpabase = r->io_io_low << 16;
373 	nca.ca_dmatag = &sc->sc_dmatag;
374 	nca.ca_nmodules = MAXMODBUS - 1;
375 	pdc_scanbus(self, &nca, uturn_callback);
376 }
377 
378 static device_t
379 uturn_callback(device_t self, struct confargs *ca)
380 {
381 
382 	return config_found_sm_loc(self, "gedoens", NULL, ca, mbprint,
383 	    mbsubmatch);
384 }
385 
386 /*
387  * PDIR entry format (HP bit number)
388  *
389  * +-------+----------------+----------------------------------------------+
390  * |0     3|4             15|16                                          31|
391  * | PPN   | Virtual Index  |         Physical Page Number (PPN)           |
392  * | [0:3] |    [0:11]      |                 [4:19]                       |
393  * +-------+----------------+----------------------------------------------+
394  *
395  * +-----------------------+-----------------------------------------------+
396  * |0           19|20    24|   25   |       |       |      |  30   |   31  |
397  * |     PPN      |  Rsvd  | PH     |Update | Rsvd  |Lock  | Safe  | Valid |
398  * |    [20:39    |        | Enable |Enable |       |Enable| DMA   |       |
399  * +-----------------------+-----------------------------------------------+
400  *
401  */
402 
403 #define UTURN_PENTRY_PREFETCH	0x40
404 #define UTURN_PENTRY_UPDATE	0x20
405 #define UTURN_PENTRY_LOCK	0x04	/* eisa devices only */
406 #define UTURN_PENTRY_SAFEDMA	0x02	/* use safe dma - for subcacheline */
407 #define UTURN_PENTRY_VALID	0x01
408 
409 static void
410 uturn_iommu_enter(struct uturn_softc *sc, bus_addr_t iova, pa_space_t sp,
411     vaddr_t va, paddr_t pa)
412 {
413 	uint64_t pdir_entry;
414 	uint64_t *pdirp;
415 	uint32_t ci; /* coherent index */
416 
417 	pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
418 
419 	DPRINTF(("%s: iova %lx pdir %p pdirp %p pa %lx", __func__, iova,
420 	    sc->sc_pdir, pdirp, pa));
421 
422 	ci = lci(HPPA_SID_KERNEL, va);
423 
424 	/* setup hints, etc */
425 	pdir_entry = (UTURN_PENTRY_LOCK | UTURN_PENTRY_SAFEDMA |
426 	     UTURN_PENTRY_VALID);
427 
428 	/*
429 	 * bottom 36 bits of pa map directly into entry to form PPN[4:39]
430 	 * leaving last 12 bits for hints, etc.
431 	 */
432 	pdir_entry |= (pa & ~PAGE_MASK);
433 
434 	/* mask off top PPN bits */
435 	pdir_entry &= 0x0000ffffffffffffUL;
436 
437 	/* insert the virtual index bits */
438 	pdir_entry |= (((uint64_t)ci >> 12) << 48);
439 
440 	/* PPN[0:3] of the 40bit PPN go in entry[0:3] */
441 	pdir_entry |= ((((uint64_t)pa & 0x000f000000000000UL) >> 48) << 60);
442 
443 	*pdirp = pdir_entry;
444 
445 	DPRINTF((": pdir_entry %llx\n", pdir_entry));
446 
447 	/*
448 	 * We could use PDC_MODEL_CAPABILITIES here
449 	 */
450  	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
451 }
452 
453 
454 static void
455 uturn_iommu_remove(struct uturn_softc *sc, bus_addr_t iova, bus_size_t size)
456 {
457 	uint32_t chain_size = 1 << sc->sc_chainid_shift;
458 	bus_size_t len;
459 
460 	KASSERT((iova & PAGE_MASK) == 0);
461 	KASSERT((size & PAGE_MASK) == 0);
462 
463 	DPRINTF(("%s: sc %p iova %lx size %lx\n", __func__, sc, iova, size));
464 	len = size;
465 	while (len != 0) {
466 		uint64_t *pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
467 
468 		/* XXX Just the valid bit??? */
469 		*pdirp = 0;
470 
471 		/*
472 		* We could use PDC_MODEL_CAPABILITIES here
473 		*/
474 	 	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
475 
476 		iova += PAGE_SIZE;
477 		len -= PAGE_SIZE;
478 	}
479 
480 	len = size + chain_size;
481 
482 	while (len > chain_size) {
483 		sc->sc_regs->io_command = UTURN_CMD_TLB_PURGE | iova;
484 		iova += chain_size;
485 		len -= chain_size;
486 	}
487 }
488 
489 int
490 uturn_dmamap_create(void *v, bus_size_t size, int nsegments,
491     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
492 {
493 	struct uturn_softc *sc = v;
494 	bus_dmamap_t map;
495 	struct uturn_map_state *ums;
496 	int error;
497 
498 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
499 	    boundary, flags, &map);
500 	if (error)
501 		return (error);
502 
503 	ums = uturn_iomap_create(atop(round_page(size)));
504 	if (ums == NULL) {
505 		bus_dmamap_destroy(sc->sc_dmat, map);
506 		return (ENOMEM);
507 	}
508 
509 	ums->ums_sc = sc;
510 	map->_dm_cookie = ums;
511 	*dmamap = map;
512 
513 	return (0);
514 }
515 
516 void
517 uturn_dmamap_destroy(void *v, bus_dmamap_t map)
518 {
519 	struct uturn_softc *sc = v;
520 
521 	/*
522 	 * The specification (man page) requires a loaded
523 	 * map to be unloaded before it is destroyed.
524 	 */
525 	if (map->dm_nsegs)
526 		uturn_dmamap_unload(sc, map);
527 
528 	if (map->_dm_cookie)
529 		uturn_iomap_destroy(map->_dm_cookie);
530 	map->_dm_cookie = NULL;
531 
532 	bus_dmamap_destroy(sc->sc_dmat, map);
533 }
534 
535 static int
536 uturn_iomap_load_map(struct uturn_softc *sc, bus_dmamap_t map, int flags)
537 {
538 	struct uturn_map_state *ums = map->_dm_cookie;
539 	struct uturn_page_map *upm = &ums->ums_map;
540 	struct uturn_page_entry *e;
541 	int err, seg, s;
542 	paddr_t pa, paend;
543 	vaddr_t va;
544 	bus_size_t sgsize;
545 	bus_size_t align, boundary;
546 	u_long iovaddr;
547 	bus_addr_t iova;
548 	int i;
549 
550 	/* XXX */
551 	boundary = map->_dm_boundary;
552 	align = PAGE_SIZE;
553 
554 	uturn_iomap_clear_pages(ums);
555 
556 	for (seg = 0; seg < map->dm_nsegs; seg++) {
557 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
558 
559 		paend = round_page(ds->ds_addr + ds->ds_len);
560 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
561 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
562 			err = uturn_iomap_insert_page(ums, va, pa);
563 			if (err) {
564 				printf("iomap insert error: %d for "
565 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
566 				bus_dmamap_unload(sc->sc_dmat, map);
567 				uturn_iomap_clear_pages(ums);
568 			}
569 		}
570 	}
571 
572 	sgsize = ums->ums_map.upm_pagecnt * PAGE_SIZE;
573 	/* XXXNH */
574 	s = splhigh();
575 	err = extent_alloc(sc->sc_map, sgsize, align, boundary,
576 	    EX_NOWAIT | EX_BOUNDZERO, &iovaddr);
577 	splx(s);
578 	if (err)
579 		return (err);
580 
581 	ums->ums_iovastart = iovaddr;
582 	ums->ums_iovasize = sgsize;
583 
584 	iova = iovaddr;
585 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e) {
586 		e->upe_iova = iova;
587 		uturn_iommu_enter(sc, e->upe_iova, HPPA_SID_KERNEL, e->upe_va,
588 		    e->upe_pa);
589 		iova += PAGE_SIZE;
590 	}
591 
592 	for (seg = 0; seg < map->dm_nsegs; seg++) {
593 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
594 		ds->ds_addr = uturn_iomap_translate(ums, ds->ds_addr);
595 	}
596 
597 	return (0);
598 }
599 
600 int
601 uturn_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
602     struct proc *p, int flags)
603 {
604 	struct uturn_softc *sc = v;
605 	int err;
606 
607 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
608 	if (err)
609 		return (err);
610 
611 	return uturn_iomap_load_map(sc, map, flags);
612 }
613 
614 int
615 uturn_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
616 {
617 	struct uturn_softc *sc = v;
618 	int err;
619 
620 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
621 	if (err)
622 		return (err);
623 
624 	return uturn_iomap_load_map(sc, map, flags);
625 }
626 
627 int
628 uturn_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
629 {
630 	struct uturn_softc *sc = v;
631 
632 	printf("load_uio\n");
633 
634 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
635 }
636 
637 int
638 uturn_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
639     int nsegs, bus_size_t size, int flags)
640 {
641 	struct uturn_softc *sc = v;
642 
643 	printf("load_raw\n");
644 
645 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
646 }
647 
648 void
649 uturn_dmamap_unload(void *v, bus_dmamap_t map)
650 {
651 	struct uturn_softc *sc = v;
652 	struct uturn_map_state *ums = map->_dm_cookie;
653 	struct uturn_page_map *upm = &ums->ums_map;
654 	struct uturn_page_entry *e;
655 	int err, i, s;
656 
657 	/* Remove the IOMMU entries. */
658 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e)
659 		uturn_iommu_remove(sc, e->upe_iova, PAGE_SIZE);
660 
661 	/* Clear the iomap. */
662 	uturn_iomap_clear_pages(ums);
663 
664 	bus_dmamap_unload(sc->sc_dmat, map);
665 
666 	s = splhigh();
667 	err = extent_free(sc->sc_map, ums->ums_iovastart,
668 	    ums->ums_iovasize, EX_NOWAIT);
669 	ums->ums_iovastart = 0;
670 	ums->ums_iovasize = 0;
671 	splx(s);
672 	if (err)
673 		printf("warning: %ld of IOVA space lost\n", ums->ums_iovasize);
674 }
675 
676 void
677 uturn_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
678     bus_size_t len, int ops)
679 {
680 	/* Nothing to do; DMA is cache-coherent. */
681 }
682 
683 int
684 uturn_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
685     bus_size_t boundary, bus_dma_segment_t *segs,
686     int nsegs, int *rsegs, int flags)
687 {
688 	struct uturn_softc *sc = v;
689 
690 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
691 	    segs, nsegs, rsegs, flags));
692 }
693 
694 void
695 uturn_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
696 {
697 	struct uturn_softc *sc = v;
698 
699 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
700 }
701 
702 int
703 uturn_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
704     void **kvap, int flags)
705 {
706 	struct uturn_softc *sc = v;
707 
708 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
709 }
710 
711 void
712 uturn_dmamem_unmap(void *v, void *kva, size_t size)
713 {
714 	struct uturn_softc *sc = v;
715 
716 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
717 }
718 
719 paddr_t
720 uturn_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
721     int prot, int flags)
722 {
723 	struct uturn_softc *sc = v;
724 
725 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
726 }
727 
728 /*
729  * Utility function used by splay tree to order page entries by pa.
730  */
731 static inline int
732 upe_compare(struct uturn_page_entry *a, struct uturn_page_entry *b)
733 {
734 	return ((a->upe_pa > b->upe_pa) ? 1 :
735 		(a->upe_pa < b->upe_pa) ? -1 : 0);
736 }
737 
738 SPLAY_PROTOTYPE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
739 
740 SPLAY_GENERATE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
741 
742 /*
743  * Create a new iomap.
744  */
745 struct uturn_map_state *
746 uturn_iomap_create(int n)
747 {
748 	struct uturn_map_state *ums;
749 
750 	/* Safety for heavily fragmented data, such as mbufs */
751 	n += 4;
752 	if (n < 16)
753 		n = 16;
754 
755 	ums = malloc(sizeof(*ums) + (n - 1) * sizeof(ums->ums_map.upm_map[0]),
756 	    M_DEVBUF, M_NOWAIT | M_ZERO);
757 	if (ums == NULL)
758 		return (NULL);
759 
760 	/* Initialize the map. */
761 	ums->ums_map.upm_maxpage = n;
762 	SPLAY_INIT(&ums->ums_map.upm_tree);
763 
764 	return (ums);
765 }
766 
767 /*
768  * Destroy an iomap.
769  */
770 void
771 uturn_iomap_destroy(struct uturn_map_state *ums)
772 {
773 	KASSERT(ums->ums_map.upm_pagecnt == 0);
774 
775 	free(ums, M_DEVBUF);
776 }
777 
778 /*
779  * Insert a pa entry in the iomap.
780  */
781 int
782 uturn_iomap_insert_page(struct uturn_map_state *ums, vaddr_t va, paddr_t pa)
783 {
784 	struct uturn_page_map *upm = &ums->ums_map;
785 	struct uturn_page_entry *e;
786 
787 	if (upm->upm_pagecnt >= upm->upm_maxpage) {
788 		struct uturn_page_entry upe;
789 
790 		upe.upe_pa = pa;
791 		if (SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &upe))
792 			return (0);
793 
794 		return (ENOMEM);
795 	}
796 
797 	e = &upm->upm_map[upm->upm_pagecnt];
798 
799 	e->upe_pa = pa;
800 	e->upe_va = va;
801 	e->upe_iova = 0;
802 
803 	e = SPLAY_INSERT(uturn_page_tree, &upm->upm_tree, e);
804 
805 	/* Duplicates are okay, but only count them once. */
806 	if (e)
807 		return (0);
808 
809 	++upm->upm_pagecnt;
810 
811 	return (0);
812 }
813 
814 /*
815  * Translate a physical address (pa) into a IOVA address.
816  */
817 bus_addr_t
818 uturn_iomap_translate(struct uturn_map_state *ums, paddr_t pa)
819 {
820 	struct uturn_page_map *upm = &ums->ums_map;
821 	struct uturn_page_entry *e;
822 	struct uturn_page_entry pe;
823 	paddr_t offset = pa & PAGE_MASK;
824 
825 	pe.upe_pa = trunc_page(pa);
826 
827 	e = SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &pe);
828 
829 	if (e == NULL) {
830 		panic("couldn't find pa %lx\n", pa);
831 		return 0;
832 	}
833 
834 	return (e->upe_iova | offset);
835 }
836 
837 /*
838  * Clear the iomap table and tree.
839  */
840 void
841 uturn_iomap_clear_pages(struct uturn_map_state *ums)
842 {
843 	ums->ums_map.upm_pagecnt = 0;
844 	SPLAY_INIT(&ums->ums_map.upm_tree);
845 }
846