xref: /netbsd-src/sys/arch/dreamcast/dev/g2/gapspci_dma.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: gapspci_dma.c,v 1.20 2012/01/27 18:52:53 para Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Bus DMA implementation for the SEGA GAPS PCI bridge.
34  *
35  * NOTE: We only implement a small subset of what the bus_space(9)
36  * API specifies.  Right now, the GAPS PCI bridge is only used for
37  * the Dreamcast Broadband Adatper, so we only provide what the
38  * pci(4) and rtk(4) drivers need.
39  */
40 
41 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
42 __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.20 2012/01/27 18:52:53 para Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47 #include <sys/mbuf.h>
48 #include <sys/extent.h>
49 #include <sys/malloc.h>
50 #include <sys/bus.h>
51 
52 #include <machine/cpu.h>
53 
54 #include <dev/pci/pcivar.h>
55 
56 #include <dreamcast/dev/g2/gapspcivar.h>
57 
58 #include <uvm/uvm.h>
59 
60 int	gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
61 	    bus_size_t, int, bus_dmamap_t *);
62 void	gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
63 int	gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
64 	    struct proc *, int);
65 int	gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
66 int	gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
67 int	gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
68 	    int, bus_size_t, int);
69 void	gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
70 void	gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
71 	    bus_size_t, int);
72 
73 int	gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
74 	    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
75 	    int nsegs, int *rsegs, int flags);
76 void	gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
77 int	gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
78 	    size_t size, void **kvap, int flags);
79 void	gaps_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
80 paddr_t	gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
81 	    off_t off, int prot, int flags);
82 
83 void
84 gaps_dma_init(struct gaps_softc *sc)
85 {
86 	bus_dma_tag_t t = &sc->sc_dmat;
87 
88 	memset(t, 0, sizeof(*t));
89 
90 	t->_cookie = sc;
91 	t->_dmamap_create = gaps_dmamap_create;
92 	t->_dmamap_destroy = gaps_dmamap_destroy;
93 	t->_dmamap_load = gaps_dmamap_load;
94 	t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
95 	t->_dmamap_load_uio = gaps_dmamap_load_uio;
96 	t->_dmamap_load_raw = gaps_dmamap_load_raw;
97 	t->_dmamap_unload = gaps_dmamap_unload;
98 	t->_dmamap_sync = gaps_dmamap_sync;
99 
100 	t->_dmamem_alloc = gaps_dmamem_alloc;
101 	t->_dmamem_free = gaps_dmamem_free;
102 	t->_dmamem_map = gaps_dmamem_map;
103 	t->_dmamem_unmap = gaps_dmamem_unmap;
104 	t->_dmamem_mmap = gaps_dmamem_mmap;
105 
106 	/*
107 	 * The GAPS PCI bridge has 32k of DMA memory.  We manage it
108 	 * with an extent map.
109 	 */
110 	sc->sc_dma_ex = extent_create("gaps dma",
111 	    sc->sc_dmabase, sc->sc_dmabase + (sc->sc_dmasize - 1),
112 	    NULL, 0, EX_WAITOK | EXF_NOCOALESCE);
113 
114 	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
115 	    0, &sc->sc_dma_memh) != 0)
116 		panic("gaps_dma_init: can't map SRAM buffer");
117 }
118 
119 /*
120  * A GAPS DMA map -- has the standard DMA map, plus some extra
121  * housekeeping data.
122  */
123 struct gaps_dmamap {
124 	struct dreamcast_bus_dmamap gd_dmamap;
125 	void *gd_origbuf;
126 	int gd_buftype;
127 };
128 
129 #define	GAPS_DMA_BUFTYPE_INVALID	0
130 #define	GAPS_DMA_BUFTYPE_LINEAR		1
131 #define	GAPS_DMA_BUFTYPE_MBUF		2
132 
133 int
134 gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
135     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
136 {
137 	struct gaps_softc *sc = t->_cookie;
138 	struct gaps_dmamap *gmap;
139 	bus_dmamap_t map;
140 
141 	/*
142 	 * Allocate an initialize the DMA map.  The end of the map is
143 	 * a variable-sized array of segments, so we allocate enough
144 	 * room for them in one shot.  Since the DMA map always includes
145 	 * one segment, and we only support one segment, this is really
146 	 * easy.
147 	 *
148 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
149 	 * of ALLOCNOW notifies others that we've reserved these resources
150 	 * and they are not to be freed.
151 	 */
152 
153 	gmap = malloc(sizeof(*gmap), M_DMAMAP,
154 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
155 	if (gmap == NULL)
156 		return ENOMEM;
157 
158 	memset(gmap, 0, sizeof(*gmap));
159 
160 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
161 
162 	map = &gmap->gd_dmamap;
163 
164 	map->_dm_size = size;
165 	map->_dm_segcnt = 1;
166 	map->_dm_maxmaxsegsz = maxsegsz;
167 	map->_dm_boundary = boundary;
168 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
169 	map->dm_maxsegsz = maxsegsz;
170 
171 	if (flags & BUS_DMA_ALLOCNOW) {
172 		u_long res;
173 		int error;
174 
175 		error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
176 		    map->_dm_boundary,
177 		    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
178 		if (error) {
179 			free(gmap, M_DEVBUF);
180 			return error;
181 		}
182 
183 		map->dm_segs[0].ds_addr = res;
184 		map->dm_segs[0].ds_len = size;
185 
186 		map->dm_mapsize = size;
187 		map->dm_nsegs = 1;
188 	} else {
189 		map->dm_mapsize = 0;		/* no valid mappings */
190 		map->dm_nsegs = 0;
191 	}
192 
193 	*dmamap = map;
194 
195 	return 0;
196 }
197 
198 void
199 gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
200 {
201 	struct gaps_softc *sc = t->_cookie;
202 
203 	if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
204 		(void) extent_free(sc->sc_dma_ex,
205 		    map->dm_segs[0].ds_addr,
206 		    map->dm_mapsize, EX_NOWAIT);
207 	}
208 	free(map, M_DMAMAP);
209 }
210 
211 int
212 gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
213     bus_size_t size, struct proc *p, int flags)
214 {
215 	struct gaps_softc *sc = t->_cookie;
216 	struct gaps_dmamap *gmap = (void *) map;
217 	u_long res;
218 	int error;
219 
220 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
221 		/*
222 		 * Make sure that on error condition we return
223 		 * "no valid mappings".
224 		 */
225 		map->dm_mapsize = 0;
226 		map->dm_nsegs = 0;
227 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
228 	}
229 
230 	/* XXX Don't support DMA to process space right now. */
231 	if (p != NULL)
232 		return EINVAL;
233 
234 	if (size > map->_dm_size)
235 		return EINVAL;
236 
237 	error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
238 	    map->_dm_boundary,
239 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
240 	if (error)
241 		return error;
242 
243 	map->dm_segs[0].ds_addr = res;
244 	map->dm_segs[0].ds_len = size;
245 
246 	gmap->gd_origbuf = addr;
247 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
248 
249 	map->dm_mapsize = size;
250 	map->dm_nsegs = 1;
251 
252 	return 0;
253 }
254 
255 int
256 gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
257     int flags)
258 {
259 	struct gaps_softc *sc = t->_cookie;
260 	struct gaps_dmamap *gmap = (void *) map;
261 	u_long res;
262 	int error;
263 
264 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
265 		/*
266 		 * Make sure that on error condition we return
267 		 * "no valid mappings".
268 		 */
269 		map->dm_mapsize = 0;
270 		map->dm_nsegs = 0;
271 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
272 	}
273 
274 #ifdef DIAGNOSTIC
275 	if ((m0->m_flags & M_PKTHDR) == 0)
276 		panic("gaps_dmamap_load_mbuf: no packet header");
277 #endif
278 
279 	if (m0->m_pkthdr.len > map->_dm_size)
280 		return EINVAL;
281 
282 	error = extent_alloc(sc->sc_dma_ex, m0->m_pkthdr.len, 1024 /* XXX */,
283 	    map->_dm_boundary,
284 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
285 	if (error)
286 		return error;
287 
288 	map->dm_segs[0].ds_addr = res;
289 	map->dm_segs[0].ds_len = m0->m_pkthdr.len;
290 
291 	gmap->gd_origbuf = m0;
292 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
293 
294 	map->dm_mapsize = m0->m_pkthdr.len;
295 	map->dm_nsegs = 1;
296 
297 	return 0;
298 }
299 
300 int
301 gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
302     int flags)
303 {
304 
305 	printf("gaps_dmamap_load_uio: not implemented\n");
306 	return EINVAL;
307 }
308 
309 int
310 gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
311     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
312 {
313 
314 	printf("gaps_dmamap_load_raw: not implemented\n");
315 	return EINVAL;
316 }
317 
318 void
319 gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
320 {
321 	struct gaps_softc *sc = t->_cookie;
322 	struct gaps_dmamap *gmap = (void *) map;
323 
324 	if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
325 		printf("gaps_dmamap_unload: DMA map not loaded!\n");
326 		return;
327 	}
328 
329 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
330 		(void) extent_free(sc->sc_dma_ex,
331 		    map->dm_segs[0].ds_addr,
332 		    map->dm_mapsize, EX_NOWAIT);
333 
334 		map->dm_maxsegsz = map->_dm_maxmaxsegsz;
335 		map->dm_mapsize = 0;
336 		map->dm_nsegs = 0;
337 	}
338 
339 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
340 }
341 
342 void
343 gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
344     bus_size_t len, int ops)
345 {
346 	struct gaps_softc *sc = t->_cookie;
347 	struct gaps_dmamap *gmap = (void *) map;
348 	bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
349 
350 	/*
351 	 * Mixing PRE and POST operations is not allowed.
352 	 */
353 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
354 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
355 		panic("gaps_dmamap_sync: mix PRE and POST");
356 
357 #ifdef DIAGNOSTIC
358 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
359 		if (offset >= map->dm_mapsize) {
360 			printf("offset 0x%lx mapsize 0x%lx\n",
361 			    offset, map->dm_mapsize);
362 			panic("gaps_dmamap_sync: bad offset");
363 		}
364 		if (len == 0 || (offset + len) > map->dm_mapsize) {
365 			printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
366 			    len, offset, map->dm_mapsize);
367 			panic("gaps_dmamap_sync: bad length");
368 		}
369 	}
370 #endif
371 
372 	switch (gmap->gd_buftype) {
373 	case GAPS_DMA_BUFTYPE_INVALID:
374 		printf("gaps_dmamap_sync: DMA map is not loaded!\n");
375 		return;
376 
377 	case GAPS_DMA_BUFTYPE_LINEAR:
378 		/*
379 		 * Nothing to do for pre-read.
380 		 */
381 
382 		if (ops & BUS_DMASYNC_PREWRITE) {
383 			/*
384 			 * Copy the caller's buffer to the SRAM buffer.
385 			 */
386 			bus_space_write_region_1(sc->sc_memt,
387 			    sc->sc_dma_memh,
388 			    dmaoff + offset,
389 			    (uint8_t *)gmap->gd_origbuf + offset, len);
390 		}
391 
392 		if (ops & BUS_DMASYNC_POSTREAD) {
393 			/*
394 			 * Copy the SRAM buffer to the caller's buffer.
395 			 */
396 			bus_space_read_region_1(sc->sc_memt,
397 			    sc->sc_dma_memh,
398 			    dmaoff + offset,
399 			    (uint8_t *)gmap->gd_origbuf + offset, len);
400 		}
401 
402 		/*
403 		 * Nothing to do for post-write.
404 		 */
405 		break;
406 
407 	case GAPS_DMA_BUFTYPE_MBUF:
408 	    {
409 		struct mbuf *m, *m0 = gmap->gd_origbuf;
410 		bus_size_t minlen, moff;
411 
412 		/*
413 		 * Nothing to do for pre-read.
414 		 */
415 
416 		if (ops & BUS_DMASYNC_PREWRITE) {
417 			/*
418 			 * Copy the caller's buffer into the SRAM buffer.
419 			 */
420 			for (moff = offset, m = m0; m != NULL && len != 0;
421 			     m = m->m_next) {
422 				/* Find the beginning mbuf. */
423 				if (moff >= m->m_len) {
424 					moff -= m->m_len;
425 					continue;
426 				}
427 
428 				/*
429 				 * Now at the first mbuf to sync; nail
430 				 * each one until we have exhausted the
431 				 * length.
432 				 */
433 				minlen = len < m->m_len - moff ?
434 				    len : m->m_len - moff;
435 
436 				bus_space_write_region_1(sc->sc_memt,
437 				    sc->sc_dma_memh, dmaoff + offset,
438 				    mtod(m, uint8_t *) + moff, minlen);
439 
440 				moff = 0;
441 				len -= minlen;
442 				offset += minlen;
443 			}
444 		}
445 
446 		if (ops & BUS_DMASYNC_POSTREAD) {
447 			/*
448 			 * Copy the SRAM buffer into the caller's buffer.
449 			 */
450 			for (moff = offset, m = m0; m != NULL && len != 0;
451 			     m = m->m_next) {
452 				/* Find the beginning mbuf. */
453 				if (moff >= m->m_len) {
454 					moff -= m->m_len;
455 					continue;
456 				}
457 
458 				/*
459 				 * Now at the first mbuf to sync; nail
460 				 * each one until we have exhausted the
461 				 * length.
462 				 */
463 				minlen = len < m->m_len - moff ?
464 				    len : m->m_len - moff;
465 
466 				bus_space_read_region_1(sc->sc_memt,
467 				    sc->sc_dma_memh, dmaoff + offset,
468 				    mtod(m, uint8_t *) + moff, minlen);
469 
470 				moff = 0;
471 				len -= minlen;
472 				offset += minlen;
473 			}
474 		}
475 
476 		/*
477 		 * Nothing to do for post-write.
478 		 */
479 		break;
480 	    }
481 
482 	default:
483 		printf("unknown buffer type %d\n", gmap->gd_buftype);
484 		panic("gaps_dmamap_sync");
485 	}
486 }
487 
488 int
489 gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
490     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
491     int flags)
492 {
493 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
494 
495 	struct pglist mlist;
496 	paddr_t curaddr, lastaddr;
497 	struct vm_page *m;
498 	int curseg, error;
499 
500 	/* Always round the size. */
501 	size = round_page(size);
502 
503 	/*
504 	 * Allocate the pages from the VM system.
505 	 */
506 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
507 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
508 	if (error)
509 		return error;
510 
511 	/*
512 	 * Compute the location, size, and number of segments actually
513 	 * returned by the VM code.
514 	 */
515 	m = mlist.tqh_first;
516 	curseg = 0;
517 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
518 	segs[curseg].ds_len = PAGE_SIZE;
519 	m = TAILQ_NEXT(m, pageq.queue);
520 
521 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
522 		curaddr = VM_PAGE_TO_PHYS(m);
523 		if (curaddr == (lastaddr + PAGE_SIZE))
524 			segs[curseg].ds_len += PAGE_SIZE;
525 		else {
526 			curseg++;
527 			segs[curseg].ds_addr = curaddr;
528 			segs[curseg].ds_len = PAGE_SIZE;
529 		}
530 		lastaddr = curaddr;
531 	}
532 
533 	*rsegs = curseg + 1;
534 
535 	return 0;
536 }
537 
538 void
539 gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
540 {
541 	struct pglist mlist;
542 	struct vm_page *m;
543 	bus_addr_t addr;
544 	int curseg;
545 
546 	/*
547 	 * Build a list of pages to free back to the VM system.
548 	 */
549 	TAILQ_INIT(&mlist);
550 	for (curseg = 0; curseg < nsegs; curseg++) {
551 		for (addr = segs[curseg].ds_addr;
552 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
553 		     addr += PAGE_SIZE) {
554 			m = PHYS_TO_VM_PAGE(addr);
555 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
556 		}
557 	}
558 
559 	uvm_pglistfree(&mlist);
560 }
561 
562 int
563 gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
564     size_t size, void **kvap, int flags)
565 {
566 	vaddr_t va;
567 	bus_addr_t addr;
568 	int curseg;
569 	const uvm_flag_t kmflags =
570 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
571 
572 	/*
573 	 * If we're only mapping 1 segment, use P2SEG, to avoid
574 	 * TLB thrashing.
575 	 */
576 	if (nsegs == 1) {
577 		*kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
578 		return 0;
579 	}
580 
581 	size = round_page(size);
582 
583 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
584 
585 	if (va == 0)
586 		return ENOMEM;
587 
588 	*kvap = (void *)va;
589 
590 	for (curseg = 0; curseg < nsegs; curseg++) {
591 		for (addr = segs[curseg].ds_addr;
592 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
593 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
594 			if (size == 0)
595 				panic("gaps_dmamem_map: size botch");
596 			pmap_kenter_pa(va, addr,
597 			    VM_PROT_READ | VM_PROT_WRITE, 0);
598 		}
599 	}
600 	pmap_update(pmap_kernel());
601 
602 	return 0;
603 }
604 
605 void
606 gaps_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
607 {
608 
609 #ifdef DIAGNOSTIC
610 	if ((u_long) kva & PAGE_MASK)
611 		panic("gaps_dmamem_unmap");
612 #endif
613 
614 	/*
615 	 * Nothing to do if we mapped it with P2SEG.
616 	 */
617 	if (kva >= (void *)SH3_P2SEG_BASE &&
618 	    kva <= (void *)SH3_P2SEG_END)
619 		return;
620 
621 	size = round_page(size);
622 	pmap_kremove((vaddr_t) kva, size);
623 	pmap_update(pmap_kernel());
624 	uvm_km_free(kernel_map, (vaddr_t) kva, size, UVM_KMF_VAONLY);
625 }
626 
627 paddr_t
628 gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
629     off_t off, int prot, int flags)
630 {
631 
632 	/* Not implemented. */
633 	return -1;
634 }
635