xref: /netbsd-src/sys/arch/hppa/dev/uturn.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: uturn.c,v 1.4 2021/04/24 23:36:39 thorpej Exp $	*/
2 
3 /*	$OpenBSD: uturn.c,v 1.6 2007/12/29 01:26:14 kettenis Exp $	*/
4 
5 /*-
6  * Copyright (c) 2012 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Nick Hudson.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2007 Mark Kettenis
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 /*
51  * Copyright (c) 2004 Michael Shalayeff
52  * All rights reserved.
53  *
54  * Redistribution and use in source and binary forms, with or without
55  * modification, are permitted provided that the following conditions
56  * are met:
57  * 1. Redistributions of source code must retain the above copyright
58  *    notice, this list of conditions and the following disclaimer.
59  * 2. Redistributions in binary form must reproduce the above copyright
60  *    notice, this list of conditions and the following disclaimer in the
61  *    documentation and/or other materials provided with the distribution.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
64  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
65  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
66  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
67  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
69  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
71  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
72  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
73  * THE POSSIBILITY OF SUCH DAMAGE.
74  */
75 
76 /*
77  * References:
78  * 1. Hardware Cache Coherent Input/Output. Hewlett-Packard Journal, February
79  *    1996.
80  * 2. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
81  *    Hewlett-Packard, February 1994, Third Edition
82  */
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/device.h>
87 #include <sys/reboot.h>
88 #include <sys/malloc.h>
89 #include <sys/extent.h>
90 #include <sys/mbuf.h>
91 #include <sys/tree.h>
92 
93 #include <uvm/uvm.h>
94 
95 #include <sys/bus.h>
96 #include <machine/iomod.h>
97 #include <machine/autoconf.h>
98 
99 #include <hppa/dev/cpudevs.h>
100 
101 #define UTURNDEBUG
102 #ifdef UTURNDEBUG
103 
104 #define	DPRINTF(s)	do {	\
105 	if (uturndebug)		\
106 		printf s;	\
107 } while(0)
108 
109 int uturndebug = 0;
110 #else
111 #define	DPRINTF(s)	/* */
112 #endif
113 
114 struct uturn_regs {
115 	/* Runway Supervisory Set */
116 	int32_t		unused1[12];
117 	uint32_t	io_command;		/* Offset 12 */
118 #define	UTURN_CMD_TLB_PURGE		33	/* Purge I/O TLB entry */
119 #define	UTURN_CMD_TLB_DIRECT_WRITE	35	/* I/O TLB Writes */
120 
121 	uint32_t	io_status;		/* Offset 13 */
122 	uint32_t	io_control;		/* Offset 14 */
123 #define	UTURN_IOCTRL_TLB_REAL		0x00000000
124 #define	UTURN_IOCTRL_TLB_ERROR		0x00010000
125 #define	UTURN_IOCTRL_TLB_NORMAL		0x00020000
126 
127 #define	UTURN_IOCTRL_MODE_OFF		0x00000000
128 #define	UTURN_IOCTRL_MODE_INCLUDE	0x00000080
129 #define	UTURN_IOCTRL_MODE_PEEK		0x00000180
130 
131 #define	UTURN_VIRTUAL_MODE	\
132 	(UTURN_IOCTRL_TLB_NORMAL | UTURN_IOCTRL_MODE_INCLUDE)
133 
134 #define	UTURN_REAL_MODE		\
135 	UTURN_IOCTRL_MODE_INCLUDE
136 
137 	int32_t		unused2[1];
138 
139 	/* Runway Auxiliary Register Set */
140 	uint32_t	io_err_resp;		/* Offset  0 */
141 	uint32_t	io_err_info;		/* Offset  1 */
142 	uint32_t	io_err_req;		/* Offset  2 */
143 	uint32_t	io_err_resp_hi;		/* Offset  3 */
144 	uint32_t	io_tlb_entry_m;		/* Offset  4 */
145 	uint32_t	io_tlb_entry_l;		/* Offset  5 */
146 	uint32_t	unused3[1];
147 	uint32_t	io_pdir_base;		/* Offset  7 */
148 	uint32_t	io_io_low_hv;		/* Offset  8 */
149 	uint32_t	io_io_high_hv;		/* Offset  9 */
150 	uint32_t	unused4[1];
151 	uint32_t	io_chain_id_mask;	/* Offset 11 */
152 	uint32_t	unused5[2];
153 	uint32_t	io_io_low;		/* Offset 14 */
154 	uint32_t	io_io_high;		/* Offset 15 */
155 };
156 
157 
158 /* Uturn supports 256 TLB entries */
159 #define	UTURN_CHAINID_SHIFT	8
160 #define	UTURN_CHAINID_MASK	0xff
161 #define	UTURN_TLB_ENTRIES	(1 << UTURN_CHAINID_SHIFT)
162 
163 #define	UTURN_IOVP_SIZE		PAGE_SIZE
164 #define	UTURN_IOVP_SHIFT	PAGE_SHIFT
165 #define	UTURN_IOVP_MASK		PAGE_MASK
166 
167 #define	UTURN_IOVA(iovp, off)	((iovp) | (off))
168 #define	UTURN_IOVP(iova)	((iova) & UTURN_IOVP_MASK)
169 #define	UTURN_IOVA_INDEX(iova)	((iova) >> UTURN_IOVP_SHIFT)
170 
171 struct uturn_softc {
172 	device_t sc_dv;
173 
174 	bus_dma_tag_t sc_dmat;
175 	struct uturn_regs volatile *sc_regs;
176 	uint64_t *sc_pdir;
177 	uint32_t sc_chainid_shift;
178 
179 	char sc_mapname[20];
180 	struct extent *sc_map;
181 
182 	struct hppa_bus_dma_tag sc_dmatag;
183 };
184 
185 /*
186  * per-map IOVA page table
187  */
188 struct uturn_page_entry {
189 	SPLAY_ENTRY(uturn_page_entry) upe_node;
190 	paddr_t	upe_pa;
191 	vaddr_t	upe_va;
192 	bus_addr_t upe_iova;
193 };
194 
195 struct uturn_page_map {
196 	SPLAY_HEAD(uturn_page_tree, uturn_page_entry) upm_tree;
197 	int upm_maxpage;	/* Size of allocated page map */
198 	int upm_pagecnt;	/* Number of entries in use */
199 	struct uturn_page_entry	upm_map[1];
200 };
201 
202 /*
203  * per-map UTURN state
204  */
205 struct uturn_map_state {
206 	struct uturn_softc *ums_sc;
207 	bus_addr_t ums_iovastart;
208 	bus_size_t ums_iovasize;
209 	struct uturn_page_map ums_map;	/* map must be last (array at end) */
210 };
211 
212 int	uturnmatch(device_t, cfdata_t, void *);
213 void	uturnattach(device_t, device_t, void *);
214 static device_t uturn_callback(device_t, struct confargs *);
215 
216 CFATTACH_DECL_NEW(uturn, sizeof(struct uturn_softc),
217     uturnmatch, uturnattach, NULL, NULL);
218 
219 extern struct cfdriver uturn_cd;
220 
221 int uturn_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int,
222     bus_dmamap_t *);
223 void uturn_dmamap_destroy(void *, bus_dmamap_t);
224 int uturn_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *,
225     int);
226 int uturn_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
227 int uturn_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
228 int uturn_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int,
229     bus_size_t, int);
230 void uturn_dmamap_unload(void *, bus_dmamap_t);
231 void uturn_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
232 int uturn_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
233     bus_dma_segment_t *, int, int *, int);
234 void uturn_dmamem_free(void *, bus_dma_segment_t *, int);
235 int uturn_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
236 void uturn_dmamem_unmap(void *, void *, size_t);
237 paddr_t uturn_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
238 
239 static void uturn_iommu_enter(struct uturn_softc *, bus_addr_t, pa_space_t,
240     vaddr_t, paddr_t);
241 static void uturn_iommu_remove(struct uturn_softc *, bus_addr_t, bus_size_t);
242 
243 struct uturn_map_state *uturn_iomap_create(int);
244 void	uturn_iomap_destroy(struct uturn_map_state *);
245 int	uturn_iomap_insert_page(struct uturn_map_state *, vaddr_t, paddr_t);
246 bus_addr_t uturn_iomap_translate(struct uturn_map_state *, paddr_t);
247 void	uturn_iomap_clear_pages(struct uturn_map_state *);
248 
249 static int uturn_iomap_load_map(struct uturn_softc *, bus_dmamap_t, int);
250 
251 const struct hppa_bus_dma_tag uturn_dmat = {
252 	NULL,
253 	uturn_dmamap_create, uturn_dmamap_destroy,
254 	uturn_dmamap_load, uturn_dmamap_load_mbuf,
255 	uturn_dmamap_load_uio, uturn_dmamap_load_raw,
256 	uturn_dmamap_unload, uturn_dmamap_sync,
257 
258 	uturn_dmamem_alloc, uturn_dmamem_free, uturn_dmamem_map,
259 	uturn_dmamem_unmap, uturn_dmamem_mmap
260 };
261 
262 int
263 uturnmatch(device_t parent, cfdata_t cf, void *aux)
264 {
265 	struct confargs *ca = aux;
266 
267 	/* there will be only one */
268 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
269 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
270 		return 0;
271 
272 	if (ca->ca_type.iodc_model == 0x58 &&
273 	    ca->ca_type.iodc_revision >= 0x20)
274 		return 0;
275 
276 	return 1;
277 }
278 
279 void
280 uturnattach(device_t parent, device_t self, void *aux)
281 {
282 	struct confargs *ca = aux, nca;
283 	struct uturn_softc *sc = device_private(self);
284 	bus_space_handle_t ioh;
285 	volatile struct uturn_regs *r;
286 	struct pglist pglist;
287 	int iova_bits;
288 	vaddr_t va;
289 	psize_t size;
290 	int i;
291 
292 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, IOMOD_HPASIZE, 0, &ioh)) {
293 		aprint_error(": can't map IO space\n");
294 		return;
295 	}
296 
297 	sc->sc_dv = self;
298 	sc->sc_dmat = ca->ca_dmatag;
299 	sc->sc_regs = r = bus_space_vaddr(ca->ca_iot, ioh);
300 
301 	aprint_normal(": %x-%x", r->io_io_low << 16, r->io_io_high << 16);
302 	aprint_normal(": %x-%x", r->io_io_low_hv << 16, r->io_io_high_hv << 16);
303 
304 	aprint_normal(": %s rev %d\n",
305 	    ca->ca_type.iodc_revision < 0x10 ? "U2" : "UTurn",
306 	    ca->ca_type.iodc_revision & 0xf);
307 
308 	/*
309 	 * Setup the iommu.
310 	 */
311 
312 	/* XXX 28 bits gives us 256Mb of iova space */
313 	/* Calculate based on %age of RAM */
314 	iova_bits = 28;
315 
316 	/*
317 	 * size is # of pdir entries (64bits) in bytes.  1 entry per IOVA
318 	 * page.
319 	 */
320 	size = (1 << (iova_bits - UTURN_IOVP_SHIFT)) * sizeof(uint64_t);
321 
322 	/*
323 	 * Chainid is the upper most bits of an IOVP used to determine which
324 	 * TLB entry an IOVP will use.
325 	 */
326 	sc->sc_chainid_shift = iova_bits - UTURN_CHAINID_SHIFT;
327 
328 	/*
329 	 * Allocate memory for I/O pagetables.  They need to be physically
330 	 * contiguous.
331 	 */
332 
333 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &pglist, 1, 0) != 0)
334 		panic("%s: no memory", __func__);
335 
336 	va = (vaddr_t)VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
337 	sc->sc_pdir = (int64_t *)va;
338 
339 	memset(sc->sc_pdir, 0, size);
340 
341 	r->io_chain_id_mask = UTURN_CHAINID_MASK << sc->sc_chainid_shift;
342 	r->io_pdir_base = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
343 
344 	r->io_tlb_entry_m = 0;
345 	r->io_tlb_entry_l = 0;
346 
347 	/* for (i = UTURN_TLB_ENTRIES; i != 0; i--) { */
348 	for (i = 0; i < UTURN_TLB_ENTRIES; i++) {
349 		r->io_command =
350 		    UTURN_CMD_TLB_DIRECT_WRITE | (i << sc->sc_chainid_shift);
351 	}
352 	/*
353 	 * Go to "Virtual Mode"
354 	 */
355 	r->io_control = UTURN_VIRTUAL_MODE;
356 
357 	snprintf(sc->sc_mapname, sizeof(sc->sc_mapname), "%s_map",
358 	    device_xname(sc->sc_dv));
359 	sc->sc_map = extent_create(sc->sc_mapname, 0, (1 << iova_bits),
360 	    0, 0, EX_WAITOK);
361 
362 	sc->sc_dmatag = uturn_dmat;
363 	sc->sc_dmatag._cookie = sc;
364 
365 	/*
366 	 * U2/UTurn is actually a combination of an Upper Bus Converter (UBC)
367 	 * and a Lower Bus Converter (LBC).  This driver attaches to the UBC;
368 	 * the LBC isn't very interesting, so we skip it.  This is easy, since
369 	 * it always is module 63, hence the MAXMODBUS - 1 below.
370 	 */
371 	nca = *ca;
372 	nca.ca_hpabase = r->io_io_low << 16;
373 	nca.ca_dmatag = &sc->sc_dmatag;
374 	nca.ca_nmodules = MAXMODBUS - 1;
375 	pdc_scanbus(self, &nca, uturn_callback);
376 }
377 
378 static device_t
379 uturn_callback(device_t self, struct confargs *ca)
380 {
381 
382 	return config_found(self, ca, mbprint,
383 	    CFARG_SUBMATCH, mbsubmatch,
384 	    CFARG_EOL);
385 }
386 
387 /*
388  * PDIR entry format (HP bit number)
389  *
390  * +-------+----------------+----------------------------------------------+
391  * |0     3|4             15|16                                          31|
392  * | PPN   | Virtual Index  |         Physical Page Number (PPN)           |
393  * | [0:3] |    [0:11]      |                 [4:19]                       |
394  * +-------+----------------+----------------------------------------------+
395  *
396  * +-----------------------+-----------------------------------------------+
397  * |0           19|20    24|   25   |       |       |      |  30   |   31  |
398  * |     PPN      |  Rsvd  | PH     |Update | Rsvd  |Lock  | Safe  | Valid |
399  * |    [20:39    |        | Enable |Enable |       |Enable| DMA   |       |
400  * +-----------------------+-----------------------------------------------+
401  *
402  */
403 
404 #define UTURN_PENTRY_PREFETCH	0x40
405 #define UTURN_PENTRY_UPDATE	0x20
406 #define UTURN_PENTRY_LOCK	0x04	/* eisa devices only */
407 #define UTURN_PENTRY_SAFEDMA	0x02	/* use safe dma - for subcacheline */
408 #define UTURN_PENTRY_VALID	0x01
409 
410 static void
411 uturn_iommu_enter(struct uturn_softc *sc, bus_addr_t iova, pa_space_t sp,
412     vaddr_t va, paddr_t pa)
413 {
414 	uint64_t pdir_entry;
415 	uint64_t *pdirp;
416 	uint32_t ci; /* coherent index */
417 
418 	pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
419 
420 	DPRINTF(("%s: iova %lx pdir %p pdirp %p pa %lx", __func__, iova,
421 	    sc->sc_pdir, pdirp, pa));
422 
423 	ci = lci(HPPA_SID_KERNEL, va);
424 
425 	/* setup hints, etc */
426 	pdir_entry = (UTURN_PENTRY_LOCK | UTURN_PENTRY_SAFEDMA |
427 	     UTURN_PENTRY_VALID);
428 
429 	/*
430 	 * bottom 36 bits of pa map directly into entry to form PPN[4:39]
431 	 * leaving last 12 bits for hints, etc.
432 	 */
433 	pdir_entry |= (pa & ~PAGE_MASK);
434 
435 	/* mask off top PPN bits */
436 	pdir_entry &= 0x0000ffffffffffffUL;
437 
438 	/* insert the virtual index bits */
439 	pdir_entry |= (((uint64_t)ci >> 12) << 48);
440 
441 	/* PPN[0:3] of the 40bit PPN go in entry[0:3] */
442 	pdir_entry |= ((((uint64_t)pa & 0x000f000000000000UL) >> 48) << 60);
443 
444 	*pdirp = pdir_entry;
445 
446 	DPRINTF((": pdir_entry %llx\n", pdir_entry));
447 
448 	/*
449 	 * We could use PDC_MODEL_CAPABILITIES here
450 	 */
451  	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
452 }
453 
454 
455 static void
456 uturn_iommu_remove(struct uturn_softc *sc, bus_addr_t iova, bus_size_t size)
457 {
458 	uint32_t chain_size = 1 << sc->sc_chainid_shift;
459 	bus_size_t len;
460 
461 	KASSERT((iova & PAGE_MASK) == 0);
462 	KASSERT((size & PAGE_MASK) == 0);
463 
464 	DPRINTF(("%s: sc %p iova %lx size %lx\n", __func__, sc, iova, size));
465 	len = size;
466 	while (len != 0) {
467 		uint64_t *pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
468 
469 		/* XXX Just the valid bit??? */
470 		*pdirp = 0;
471 
472 		/*
473 		* We could use PDC_MODEL_CAPABILITIES here
474 		*/
475 	 	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
476 
477 		iova += PAGE_SIZE;
478 		len -= PAGE_SIZE;
479 	}
480 
481 	len = size + chain_size;
482 
483 	while (len > chain_size) {
484 		sc->sc_regs->io_command = UTURN_CMD_TLB_PURGE | iova;
485 		iova += chain_size;
486 		len -= chain_size;
487 	}
488 }
489 
490 int
491 uturn_dmamap_create(void *v, bus_size_t size, int nsegments,
492     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
493 {
494 	struct uturn_softc *sc = v;
495 	bus_dmamap_t map;
496 	struct uturn_map_state *ums;
497 	int error;
498 
499 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
500 	    boundary, flags, &map);
501 	if (error)
502 		return (error);
503 
504 	ums = uturn_iomap_create(atop(round_page(size)));
505 	if (ums == NULL) {
506 		bus_dmamap_destroy(sc->sc_dmat, map);
507 		return (ENOMEM);
508 	}
509 
510 	ums->ums_sc = sc;
511 	map->_dm_cookie = ums;
512 	*dmamap = map;
513 
514 	return (0);
515 }
516 
517 void
518 uturn_dmamap_destroy(void *v, bus_dmamap_t map)
519 {
520 	struct uturn_softc *sc = v;
521 
522 	/*
523 	 * The specification (man page) requires a loaded
524 	 * map to be unloaded before it is destroyed.
525 	 */
526 	if (map->dm_nsegs)
527 		uturn_dmamap_unload(sc, map);
528 
529 	if (map->_dm_cookie)
530 		uturn_iomap_destroy(map->_dm_cookie);
531 	map->_dm_cookie = NULL;
532 
533 	bus_dmamap_destroy(sc->sc_dmat, map);
534 }
535 
536 static int
537 uturn_iomap_load_map(struct uturn_softc *sc, bus_dmamap_t map, int flags)
538 {
539 	struct uturn_map_state *ums = map->_dm_cookie;
540 	struct uturn_page_map *upm = &ums->ums_map;
541 	struct uturn_page_entry *e;
542 	int err, seg, s;
543 	paddr_t pa, paend;
544 	vaddr_t va;
545 	bus_size_t sgsize;
546 	bus_size_t align, boundary;
547 	u_long iovaddr;
548 	bus_addr_t iova;
549 	int i;
550 
551 	/* XXX */
552 	boundary = map->_dm_boundary;
553 	align = PAGE_SIZE;
554 
555 	uturn_iomap_clear_pages(ums);
556 
557 	for (seg = 0; seg < map->dm_nsegs; seg++) {
558 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
559 
560 		paend = round_page(ds->ds_addr + ds->ds_len);
561 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
562 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
563 			err = uturn_iomap_insert_page(ums, va, pa);
564 			if (err) {
565 				printf("iomap insert error: %d for "
566 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
567 				bus_dmamap_unload(sc->sc_dmat, map);
568 				uturn_iomap_clear_pages(ums);
569 			}
570 		}
571 	}
572 
573 	sgsize = ums->ums_map.upm_pagecnt * PAGE_SIZE;
574 	/* XXXNH */
575 	s = splhigh();
576 	err = extent_alloc(sc->sc_map, sgsize, align, boundary,
577 	    EX_NOWAIT | EX_BOUNDZERO, &iovaddr);
578 	splx(s);
579 	if (err)
580 		return (err);
581 
582 	ums->ums_iovastart = iovaddr;
583 	ums->ums_iovasize = sgsize;
584 
585 	iova = iovaddr;
586 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e) {
587 		e->upe_iova = iova;
588 		uturn_iommu_enter(sc, e->upe_iova, HPPA_SID_KERNEL, e->upe_va,
589 		    e->upe_pa);
590 		iova += PAGE_SIZE;
591 	}
592 
593 	for (seg = 0; seg < map->dm_nsegs; seg++) {
594 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
595 		ds->ds_addr = uturn_iomap_translate(ums, ds->ds_addr);
596 	}
597 
598 	return (0);
599 }
600 
601 int
602 uturn_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
603     struct proc *p, int flags)
604 {
605 	struct uturn_softc *sc = v;
606 	int err;
607 
608 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
609 	if (err)
610 		return (err);
611 
612 	return uturn_iomap_load_map(sc, map, flags);
613 }
614 
615 int
616 uturn_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
617 {
618 	struct uturn_softc *sc = v;
619 	int err;
620 
621 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
622 	if (err)
623 		return (err);
624 
625 	return uturn_iomap_load_map(sc, map, flags);
626 }
627 
628 int
629 uturn_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
630 {
631 	struct uturn_softc *sc = v;
632 
633 	printf("load_uio\n");
634 
635 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
636 }
637 
638 int
639 uturn_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
640     int nsegs, bus_size_t size, int flags)
641 {
642 	struct uturn_softc *sc = v;
643 
644 	printf("load_raw\n");
645 
646 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
647 }
648 
649 void
650 uturn_dmamap_unload(void *v, bus_dmamap_t map)
651 {
652 	struct uturn_softc *sc = v;
653 	struct uturn_map_state *ums = map->_dm_cookie;
654 	struct uturn_page_map *upm = &ums->ums_map;
655 	struct uturn_page_entry *e;
656 	int err, i, s;
657 
658 	/* Remove the IOMMU entries. */
659 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e)
660 		uturn_iommu_remove(sc, e->upe_iova, PAGE_SIZE);
661 
662 	/* Clear the iomap. */
663 	uturn_iomap_clear_pages(ums);
664 
665 	bus_dmamap_unload(sc->sc_dmat, map);
666 
667 	s = splhigh();
668 	err = extent_free(sc->sc_map, ums->ums_iovastart,
669 	    ums->ums_iovasize, EX_NOWAIT);
670 	ums->ums_iovastart = 0;
671 	ums->ums_iovasize = 0;
672 	splx(s);
673 	if (err)
674 		printf("warning: %ld of IOVA space lost\n", ums->ums_iovasize);
675 }
676 
677 void
678 uturn_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
679     bus_size_t len, int ops)
680 {
681 	/* Nothing to do; DMA is cache-coherent. */
682 }
683 
684 int
685 uturn_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
686     bus_size_t boundary, bus_dma_segment_t *segs,
687     int nsegs, int *rsegs, int flags)
688 {
689 	struct uturn_softc *sc = v;
690 
691 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
692 	    segs, nsegs, rsegs, flags));
693 }
694 
695 void
696 uturn_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
697 {
698 	struct uturn_softc *sc = v;
699 
700 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
701 }
702 
703 int
704 uturn_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
705     void **kvap, int flags)
706 {
707 	struct uturn_softc *sc = v;
708 
709 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
710 }
711 
712 void
713 uturn_dmamem_unmap(void *v, void *kva, size_t size)
714 {
715 	struct uturn_softc *sc = v;
716 
717 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
718 }
719 
720 paddr_t
721 uturn_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
722     int prot, int flags)
723 {
724 	struct uturn_softc *sc = v;
725 
726 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
727 }
728 
729 /*
730  * Utility function used by splay tree to order page entries by pa.
731  */
732 static inline int
733 upe_compare(struct uturn_page_entry *a, struct uturn_page_entry *b)
734 {
735 	return ((a->upe_pa > b->upe_pa) ? 1 :
736 		(a->upe_pa < b->upe_pa) ? -1 : 0);
737 }
738 
739 SPLAY_PROTOTYPE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
740 
741 SPLAY_GENERATE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
742 
743 /*
744  * Create a new iomap.
745  */
746 struct uturn_map_state *
747 uturn_iomap_create(int n)
748 {
749 	struct uturn_map_state *ums;
750 
751 	/* Safety for heavily fragmented data, such as mbufs */
752 	n += 4;
753 	if (n < 16)
754 		n = 16;
755 
756 	ums = malloc(sizeof(*ums) + (n - 1) * sizeof(ums->ums_map.upm_map[0]),
757 	    M_DEVBUF, M_NOWAIT | M_ZERO);
758 	if (ums == NULL)
759 		return (NULL);
760 
761 	/* Initialize the map. */
762 	ums->ums_map.upm_maxpage = n;
763 	SPLAY_INIT(&ums->ums_map.upm_tree);
764 
765 	return (ums);
766 }
767 
768 /*
769  * Destroy an iomap.
770  */
771 void
772 uturn_iomap_destroy(struct uturn_map_state *ums)
773 {
774 	KASSERT(ums->ums_map.upm_pagecnt == 0);
775 
776 	free(ums, M_DEVBUF);
777 }
778 
779 /*
780  * Insert a pa entry in the iomap.
781  */
782 int
783 uturn_iomap_insert_page(struct uturn_map_state *ums, vaddr_t va, paddr_t pa)
784 {
785 	struct uturn_page_map *upm = &ums->ums_map;
786 	struct uturn_page_entry *e;
787 
788 	if (upm->upm_pagecnt >= upm->upm_maxpage) {
789 		struct uturn_page_entry upe;
790 
791 		upe.upe_pa = pa;
792 		if (SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &upe))
793 			return (0);
794 
795 		return (ENOMEM);
796 	}
797 
798 	e = &upm->upm_map[upm->upm_pagecnt];
799 
800 	e->upe_pa = pa;
801 	e->upe_va = va;
802 	e->upe_iova = 0;
803 
804 	e = SPLAY_INSERT(uturn_page_tree, &upm->upm_tree, e);
805 
806 	/* Duplicates are okay, but only count them once. */
807 	if (e)
808 		return (0);
809 
810 	++upm->upm_pagecnt;
811 
812 	return (0);
813 }
814 
815 /*
816  * Translate a physical address (pa) into a IOVA address.
817  */
818 bus_addr_t
819 uturn_iomap_translate(struct uturn_map_state *ums, paddr_t pa)
820 {
821 	struct uturn_page_map *upm = &ums->ums_map;
822 	struct uturn_page_entry *e;
823 	struct uturn_page_entry pe;
824 	paddr_t offset = pa & PAGE_MASK;
825 
826 	pe.upe_pa = trunc_page(pa);
827 
828 	e = SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &pe);
829 
830 	if (e == NULL) {
831 		panic("couldn't find pa %lx\n", pa);
832 		return 0;
833 	}
834 
835 	return (e->upe_iova | offset);
836 }
837 
838 /*
839  * Clear the iomap table and tree.
840  */
841 void
842 uturn_iomap_clear_pages(struct uturn_map_state *ums)
843 {
844 	ums->ums_map.upm_pagecnt = 0;
845 	SPLAY_INIT(&ums->ums_map.upm_tree);
846 }
847