xref: /netbsd-src/sys/arch/dreamcast/dev/g2/gapspci_dma.c (revision 4b1cae5595f4da9c8621f0e78ec08b1845a3676c)
1*4b1cae55Sthorpej /*	$NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $	*/
226cf6921Sthorpej 
326cf6921Sthorpej /*-
426cf6921Sthorpej  * Copyright (c) 2001 The NetBSD Foundation, Inc.
526cf6921Sthorpej  * All rights reserved.
626cf6921Sthorpej  *
726cf6921Sthorpej  * This code is derived from software contributed to The NetBSD Foundation
826cf6921Sthorpej  * by Jason R. Thorpe.
926cf6921Sthorpej  *
1026cf6921Sthorpej  * Redistribution and use in source and binary forms, with or without
1126cf6921Sthorpej  * modification, are permitted provided that the following conditions
1226cf6921Sthorpej  * are met:
1326cf6921Sthorpej  * 1. Redistributions of source code must retain the above copyright
1426cf6921Sthorpej  *    notice, this list of conditions and the following disclaimer.
1526cf6921Sthorpej  * 2. Redistributions in binary form must reproduce the above copyright
1626cf6921Sthorpej  *    notice, this list of conditions and the following disclaimer in the
1726cf6921Sthorpej  *    documentation and/or other materials provided with the distribution.
1826cf6921Sthorpej  *
1926cf6921Sthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2026cf6921Sthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2126cf6921Sthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2226cf6921Sthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2326cf6921Sthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2426cf6921Sthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2526cf6921Sthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2626cf6921Sthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2726cf6921Sthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2826cf6921Sthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2926cf6921Sthorpej  * POSSIBILITY OF SUCH DAMAGE.
3026cf6921Sthorpej  */
3126cf6921Sthorpej 
3226cf6921Sthorpej /*
3326cf6921Sthorpej  * Bus DMA implementation for the SEGA GAPS PCI bridge.
3426cf6921Sthorpej  *
35*4b1cae55Sthorpej  * NOTE: We only implement a small subset of what the bus_dma(9)
3626cf6921Sthorpej  * API specifies.  Right now, the GAPS PCI bridge is only used for
3726cf6921Sthorpej  * the Dreamcast Broadband Adatper, so we only provide what the
3826cf6921Sthorpej  * pci(4) and rtk(4) drivers need.
3926cf6921Sthorpej  */
4026cf6921Sthorpej 
4126cf6921Sthorpej #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
42*4b1cae55Sthorpej __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $");
4326cf6921Sthorpej 
4426cf6921Sthorpej #include <sys/param.h>
4526cf6921Sthorpej #include <sys/systm.h>
4626cf6921Sthorpej #include <sys/device.h>
4726cf6921Sthorpej #include <sys/mbuf.h>
48*4b1cae55Sthorpej #include <sys/vmem.h>
4926cf6921Sthorpej #include <sys/malloc.h>
5086b5be6eSdyoung #include <sys/bus.h>
5126cf6921Sthorpej 
5226cf6921Sthorpej #include <machine/cpu.h>
5326cf6921Sthorpej 
5426cf6921Sthorpej #include <dev/pci/pcivar.h>
5526cf6921Sthorpej 
5626cf6921Sthorpej #include <dreamcast/dev/g2/gapspcivar.h>
5726cf6921Sthorpej 
5852232a9dSuebayasi #include <uvm/uvm.h>
5926cf6921Sthorpej 
6026cf6921Sthorpej int	gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
6126cf6921Sthorpej 	    bus_size_t, int, bus_dmamap_t *);
6226cf6921Sthorpej void	gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
6326cf6921Sthorpej int	gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
6426cf6921Sthorpej 	    struct proc *, int);
6526cf6921Sthorpej int	gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
6626cf6921Sthorpej int	gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
6726cf6921Sthorpej int	gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
6826cf6921Sthorpej 	    int, bus_size_t, int);
6926cf6921Sthorpej void	gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
7026cf6921Sthorpej void	gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
7126cf6921Sthorpej 	    bus_size_t, int);
7226cf6921Sthorpej 
7326cf6921Sthorpej int	gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
7426cf6921Sthorpej 	    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
7526cf6921Sthorpej 	    int nsegs, int *rsegs, int flags);
7626cf6921Sthorpej void	gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
7726cf6921Sthorpej int	gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
7853524e44Schristos 	    size_t size, void **kvap, int flags);
7953524e44Schristos void	gaps_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
8026cf6921Sthorpej paddr_t	gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
8126cf6921Sthorpej 	    off_t off, int prot, int flags);
8226cf6921Sthorpej 
8326cf6921Sthorpej void
gaps_dma_init(struct gaps_softc * sc)8426cf6921Sthorpej gaps_dma_init(struct gaps_softc *sc)
8526cf6921Sthorpej {
8626cf6921Sthorpej 	bus_dma_tag_t t = &sc->sc_dmat;
8726cf6921Sthorpej 
8826cf6921Sthorpej 	memset(t, 0, sizeof(*t));
8926cf6921Sthorpej 
9026cf6921Sthorpej 	t->_cookie = sc;
9126cf6921Sthorpej 	t->_dmamap_create = gaps_dmamap_create;
9226cf6921Sthorpej 	t->_dmamap_destroy = gaps_dmamap_destroy;
9326cf6921Sthorpej 	t->_dmamap_load = gaps_dmamap_load;
9426cf6921Sthorpej 	t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
9526cf6921Sthorpej 	t->_dmamap_load_uio = gaps_dmamap_load_uio;
9626cf6921Sthorpej 	t->_dmamap_load_raw = gaps_dmamap_load_raw;
9726cf6921Sthorpej 	t->_dmamap_unload = gaps_dmamap_unload;
9826cf6921Sthorpej 	t->_dmamap_sync = gaps_dmamap_sync;
9926cf6921Sthorpej 
10026cf6921Sthorpej 	t->_dmamem_alloc = gaps_dmamem_alloc;
10126cf6921Sthorpej 	t->_dmamem_free = gaps_dmamem_free;
10226cf6921Sthorpej 	t->_dmamem_map = gaps_dmamem_map;
10326cf6921Sthorpej 	t->_dmamem_unmap = gaps_dmamem_unmap;
10426cf6921Sthorpej 	t->_dmamem_mmap = gaps_dmamem_mmap;
10526cf6921Sthorpej 
10626cf6921Sthorpej 	/*
10726cf6921Sthorpej 	 * The GAPS PCI bridge has 32k of DMA memory.  We manage it
108*4b1cae55Sthorpej 	 * with a vmem arena.
10926cf6921Sthorpej 	 */
110*4b1cae55Sthorpej 	sc->sc_dma_arena = vmem_create("gaps dma",
111*4b1cae55Sthorpej 				       sc->sc_dmabase,
112*4b1cae55Sthorpej 				       sc->sc_dmasize,
113*4b1cae55Sthorpej 				       1024 /* XXX */,	/* quantum */
114*4b1cae55Sthorpej 				       NULL,		/* allocfn */
115*4b1cae55Sthorpej 				       NULL,		/* freefn */
116*4b1cae55Sthorpej 				       NULL,		/* arg */
117*4b1cae55Sthorpej 				       0,		/* qcache_max */
118*4b1cae55Sthorpej 				       VM_SLEEP,
119*4b1cae55Sthorpej 				       IPL_VM);
12026cf6921Sthorpej 
121fc7cc523Smarcus 	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
12226cf6921Sthorpej 	    0, &sc->sc_dma_memh) != 0)
12326cf6921Sthorpej 		panic("gaps_dma_init: can't map SRAM buffer");
12426cf6921Sthorpej }
12526cf6921Sthorpej 
12626cf6921Sthorpej /*
12726cf6921Sthorpej  * A GAPS DMA map -- has the standard DMA map, plus some extra
12826cf6921Sthorpej  * housekeeping data.
12926cf6921Sthorpej  */
13026cf6921Sthorpej struct gaps_dmamap {
13126cf6921Sthorpej 	struct dreamcast_bus_dmamap gd_dmamap;
13226cf6921Sthorpej 	void *gd_origbuf;
13326cf6921Sthorpej 	int gd_buftype;
13426cf6921Sthorpej };
13526cf6921Sthorpej 
13626cf6921Sthorpej #define	GAPS_DMA_BUFTYPE_INVALID	0
13726cf6921Sthorpej #define	GAPS_DMA_BUFTYPE_LINEAR		1
13826cf6921Sthorpej #define	GAPS_DMA_BUFTYPE_MBUF		2
13926cf6921Sthorpej 
14026cf6921Sthorpej int
gaps_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamap)14126cf6921Sthorpej gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
14226cf6921Sthorpej     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
14326cf6921Sthorpej {
14426cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
14526cf6921Sthorpej 	struct gaps_dmamap *gmap;
14626cf6921Sthorpej 	bus_dmamap_t map;
14726cf6921Sthorpej 
14826cf6921Sthorpej 	/*
14926cf6921Sthorpej 	 * Allocate an initialize the DMA map.  The end of the map is
15026cf6921Sthorpej 	 * a variable-sized array of segments, so we allocate enough
15126cf6921Sthorpej 	 * room for them in one shot.  Since the DMA map always includes
15226cf6921Sthorpej 	 * one segment, and we only support one segment, this is really
15326cf6921Sthorpej 	 * easy.
15426cf6921Sthorpej 	 *
15526cf6921Sthorpej 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
15626cf6921Sthorpej 	 * of ALLOCNOW notifies others that we've reserved these resources
15726cf6921Sthorpej 	 * and they are not to be freed.
15826cf6921Sthorpej 	 */
15926cf6921Sthorpej 
16026cf6921Sthorpej 	gmap = malloc(sizeof(*gmap), M_DMAMAP,
16126cf6921Sthorpej 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
16226cf6921Sthorpej 	if (gmap == NULL)
163cb6453dbStsutsui 		return ENOMEM;
16426cf6921Sthorpej 
16526cf6921Sthorpej 	memset(gmap, 0, sizeof(*gmap));
16626cf6921Sthorpej 
16726cf6921Sthorpej 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
16826cf6921Sthorpej 
16926cf6921Sthorpej 	map = &gmap->gd_dmamap;
17026cf6921Sthorpej 
17126cf6921Sthorpej 	map->_dm_size = size;
17226cf6921Sthorpej 	map->_dm_segcnt = 1;
173a6db24a4Smatt 	map->_dm_maxmaxsegsz = maxsegsz;
17426cf6921Sthorpej 	map->_dm_boundary = boundary;
17526cf6921Sthorpej 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
176a6db24a4Smatt 	map->dm_maxsegsz = maxsegsz;
17726cf6921Sthorpej 
17826cf6921Sthorpej 	if (flags & BUS_DMA_ALLOCNOW) {
179*4b1cae55Sthorpej 		vmem_addr_t res;
18026cf6921Sthorpej 		int error;
18126cf6921Sthorpej 
182*4b1cae55Sthorpej 		const vm_flag_t vmflags = VM_BESTFIT |
183*4b1cae55Sthorpej 		    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
184*4b1cae55Sthorpej 
185*4b1cae55Sthorpej 		error = vmem_xalloc(sc->sc_dma_arena, size,
186*4b1cae55Sthorpej 				    0,			/* alignment */
187*4b1cae55Sthorpej 				    0,			/* phase */
188*4b1cae55Sthorpej 				    0,			/* nocross */
189*4b1cae55Sthorpej 				    VMEM_ADDR_MIN,	/* minaddr */
190*4b1cae55Sthorpej 				    VMEM_ADDR_MAX,	/* maxaddr */
191*4b1cae55Sthorpej 				    vmflags,
192*4b1cae55Sthorpej 				    &res);
19326cf6921Sthorpej 		if (error) {
19426cf6921Sthorpej 			free(gmap, M_DEVBUF);
195cb6453dbStsutsui 			return error;
19626cf6921Sthorpej 		}
19726cf6921Sthorpej 
19826cf6921Sthorpej 		map->dm_segs[0].ds_addr = res;
19926cf6921Sthorpej 		map->dm_segs[0].ds_len = size;
20026cf6921Sthorpej 
20126cf6921Sthorpej 		map->dm_mapsize = size;
20226cf6921Sthorpej 		map->dm_nsegs = 1;
20326cf6921Sthorpej 	} else {
20426cf6921Sthorpej 		map->dm_mapsize = 0;		/* no valid mappings */
20526cf6921Sthorpej 		map->dm_nsegs = 0;
20626cf6921Sthorpej 	}
20726cf6921Sthorpej 
20826cf6921Sthorpej 	*dmamap = map;
20926cf6921Sthorpej 
210cb6453dbStsutsui 	return 0;
21126cf6921Sthorpej }
21226cf6921Sthorpej 
21326cf6921Sthorpej void
gaps_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)21426cf6921Sthorpej gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
21526cf6921Sthorpej {
21626cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
21726cf6921Sthorpej 
21826cf6921Sthorpej 	if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
219*4b1cae55Sthorpej 		vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
220*4b1cae55Sthorpej 		    map->dm_mapsize);
22126cf6921Sthorpej 	}
22226cf6921Sthorpej 	free(map, M_DMAMAP);
22326cf6921Sthorpej }
22426cf6921Sthorpej 
22526cf6921Sthorpej int
gaps_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * addr,bus_size_t size,struct proc * p,int flags)22626cf6921Sthorpej gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
22726cf6921Sthorpej     bus_size_t size, struct proc *p, int flags)
22826cf6921Sthorpej {
22926cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
23026cf6921Sthorpej 	struct gaps_dmamap *gmap = (void *) map;
231*4b1cae55Sthorpej 	vmem_addr_t res;
23226cf6921Sthorpej 	int error;
23326cf6921Sthorpej 
23426cf6921Sthorpej 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
23526cf6921Sthorpej 		/*
23626cf6921Sthorpej 		 * Make sure that on error condition we return
23726cf6921Sthorpej 		 * "no valid mappings".
23826cf6921Sthorpej 		 */
23926cf6921Sthorpej 		map->dm_mapsize = 0;
24026cf6921Sthorpej 		map->dm_nsegs = 0;
241a6db24a4Smatt 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
24226cf6921Sthorpej 	}
24326cf6921Sthorpej 
24426cf6921Sthorpej 	/* XXX Don't support DMA to process space right now. */
24526cf6921Sthorpej 	if (p != NULL)
246cb6453dbStsutsui 		return EINVAL;
24726cf6921Sthorpej 
24826cf6921Sthorpej 	if (size > map->_dm_size)
249cb6453dbStsutsui 		return EINVAL;
25026cf6921Sthorpej 
251*4b1cae55Sthorpej 	const vm_flag_t vmflags = VM_BESTFIT |
252*4b1cae55Sthorpej 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
253*4b1cae55Sthorpej 
254*4b1cae55Sthorpej 	error = vmem_xalloc(sc->sc_dma_arena, size,
255*4b1cae55Sthorpej 			    0,			/* alignment */
256*4b1cae55Sthorpej 			    0,			/* phase */
257*4b1cae55Sthorpej 			    map->_dm_boundary,	/* nocross */
258*4b1cae55Sthorpej 			    VMEM_ADDR_MIN,	/* minaddr */
259*4b1cae55Sthorpej 			    VMEM_ADDR_MAX,	/* maxaddr */
260*4b1cae55Sthorpej 			    vmflags,
261*4b1cae55Sthorpej 			    &res);
26226cf6921Sthorpej 	if (error)
263cb6453dbStsutsui 		return error;
26426cf6921Sthorpej 
26526cf6921Sthorpej 	map->dm_segs[0].ds_addr = res;
26626cf6921Sthorpej 	map->dm_segs[0].ds_len = size;
26726cf6921Sthorpej 
26826cf6921Sthorpej 	gmap->gd_origbuf = addr;
26926cf6921Sthorpej 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
27026cf6921Sthorpej 
27126cf6921Sthorpej 	map->dm_mapsize = size;
27226cf6921Sthorpej 	map->dm_nsegs = 1;
27326cf6921Sthorpej 
274cb6453dbStsutsui 	return 0;
27526cf6921Sthorpej }
27626cf6921Sthorpej 
27726cf6921Sthorpej int
gaps_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)27826cf6921Sthorpej gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
27926cf6921Sthorpej     int flags)
28026cf6921Sthorpej {
28126cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
28226cf6921Sthorpej 	struct gaps_dmamap *gmap = (void *) map;
283*4b1cae55Sthorpej 	vmem_addr_t res;
28426cf6921Sthorpej 	int error;
28526cf6921Sthorpej 
28626cf6921Sthorpej 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
28726cf6921Sthorpej 		/*
28826cf6921Sthorpej 		 * Make sure that on error condition we return
28926cf6921Sthorpej 		 * "no valid mappings".
29026cf6921Sthorpej 		 */
29126cf6921Sthorpej 		map->dm_mapsize = 0;
29226cf6921Sthorpej 		map->dm_nsegs = 0;
293a6db24a4Smatt 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
29426cf6921Sthorpej 	}
29526cf6921Sthorpej 
29626cf6921Sthorpej #ifdef DIAGNOSTIC
29726cf6921Sthorpej 	if ((m0->m_flags & M_PKTHDR) == 0)
29826cf6921Sthorpej 		panic("gaps_dmamap_load_mbuf: no packet header");
29926cf6921Sthorpej #endif
30026cf6921Sthorpej 
30126cf6921Sthorpej 	if (m0->m_pkthdr.len > map->_dm_size)
302cb6453dbStsutsui 		return EINVAL;
30326cf6921Sthorpej 
304*4b1cae55Sthorpej 	const vm_flag_t vmflags = VM_BESTFIT |
305*4b1cae55Sthorpej 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
306*4b1cae55Sthorpej 
307*4b1cae55Sthorpej 	error = vmem_xalloc(sc->sc_dma_arena, m0->m_pkthdr.len,
308*4b1cae55Sthorpej 			    0,			/* alignment */
309*4b1cae55Sthorpej 			    0,			/* phase */
310*4b1cae55Sthorpej 			    map->_dm_boundary,	/* nocross */
311*4b1cae55Sthorpej 			    VMEM_ADDR_MIN,	/* minaddr */
312*4b1cae55Sthorpej 			    VMEM_ADDR_MAX,	/* maxaddr */
313*4b1cae55Sthorpej 			    vmflags,
314*4b1cae55Sthorpej 			    &res);
31526cf6921Sthorpej 	if (error)
316cb6453dbStsutsui 		return error;
31726cf6921Sthorpej 
31826cf6921Sthorpej 	map->dm_segs[0].ds_addr = res;
31926cf6921Sthorpej 	map->dm_segs[0].ds_len = m0->m_pkthdr.len;
32026cf6921Sthorpej 
32126cf6921Sthorpej 	gmap->gd_origbuf = m0;
32226cf6921Sthorpej 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
32326cf6921Sthorpej 
32426cf6921Sthorpej 	map->dm_mapsize = m0->m_pkthdr.len;
32526cf6921Sthorpej 	map->dm_nsegs = 1;
32626cf6921Sthorpej 
327cb6453dbStsutsui 	return 0;
32826cf6921Sthorpej }
32926cf6921Sthorpej 
33026cf6921Sthorpej int
gaps_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)33126cf6921Sthorpej gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
33226cf6921Sthorpej     int flags)
33326cf6921Sthorpej {
33426cf6921Sthorpej 
33526cf6921Sthorpej 	printf("gaps_dmamap_load_uio: not implemented\n");
336cb6453dbStsutsui 	return EINVAL;
33726cf6921Sthorpej }
33826cf6921Sthorpej 
33926cf6921Sthorpej int
gaps_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)34026cf6921Sthorpej gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
34126cf6921Sthorpej     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
34226cf6921Sthorpej {
34326cf6921Sthorpej 
34426cf6921Sthorpej 	printf("gaps_dmamap_load_raw: not implemented\n");
345cb6453dbStsutsui 	return EINVAL;
34626cf6921Sthorpej }
34726cf6921Sthorpej 
34826cf6921Sthorpej void
gaps_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)34926cf6921Sthorpej gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
35026cf6921Sthorpej {
35126cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
35226cf6921Sthorpej 	struct gaps_dmamap *gmap = (void *) map;
35326cf6921Sthorpej 
35426cf6921Sthorpej 	if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
35526cf6921Sthorpej 		printf("gaps_dmamap_unload: DMA map not loaded!\n");
35626cf6921Sthorpej 		return;
35726cf6921Sthorpej 	}
35826cf6921Sthorpej 
35926cf6921Sthorpej 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
360*4b1cae55Sthorpej 		vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
361*4b1cae55Sthorpej 		    map->dm_mapsize);
36226cf6921Sthorpej 
363a6db24a4Smatt 		map->dm_maxsegsz = map->_dm_maxmaxsegsz;
36426cf6921Sthorpej 		map->dm_mapsize = 0;
36526cf6921Sthorpej 		map->dm_nsegs = 0;
36626cf6921Sthorpej 	}
36726cf6921Sthorpej 
36826cf6921Sthorpej 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
36926cf6921Sthorpej }
37026cf6921Sthorpej 
37126cf6921Sthorpej void
gaps_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)37226cf6921Sthorpej gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
37326cf6921Sthorpej     bus_size_t len, int ops)
37426cf6921Sthorpej {
37526cf6921Sthorpej 	struct gaps_softc *sc = t->_cookie;
37626cf6921Sthorpej 	struct gaps_dmamap *gmap = (void *) map;
37726cf6921Sthorpej 	bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
37826cf6921Sthorpej 
37926cf6921Sthorpej 	/*
38026cf6921Sthorpej 	 * Mixing PRE and POST operations is not allowed.
38126cf6921Sthorpej 	 */
38226cf6921Sthorpej 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
38326cf6921Sthorpej 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
38426cf6921Sthorpej 		panic("gaps_dmamap_sync: mix PRE and POST");
38526cf6921Sthorpej 
38626cf6921Sthorpej #ifdef DIAGNOSTIC
38726cf6921Sthorpej 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
3886195ca34Sthorpej 		if (offset >= map->dm_mapsize) {
3896195ca34Sthorpej 			printf("offset 0x%lx mapsize 0x%lx\n",
3906195ca34Sthorpej 			    offset, map->dm_mapsize);
39126cf6921Sthorpej 			panic("gaps_dmamap_sync: bad offset");
3926195ca34Sthorpej 		}
3936195ca34Sthorpej 		if (len == 0 || (offset + len) > map->dm_mapsize) {
3946195ca34Sthorpej 			printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
3956195ca34Sthorpej 			    len, offset, map->dm_mapsize);
39626cf6921Sthorpej 			panic("gaps_dmamap_sync: bad length");
39726cf6921Sthorpej 		}
3986195ca34Sthorpej 	}
39926cf6921Sthorpej #endif
40026cf6921Sthorpej 
40126cf6921Sthorpej 	switch (gmap->gd_buftype) {
40226cf6921Sthorpej 	case GAPS_DMA_BUFTYPE_INVALID:
40326cf6921Sthorpej 		printf("gaps_dmamap_sync: DMA map is not loaded!\n");
40426cf6921Sthorpej 		return;
40526cf6921Sthorpej 
40626cf6921Sthorpej 	case GAPS_DMA_BUFTYPE_LINEAR:
40726cf6921Sthorpej 		/*
40826cf6921Sthorpej 		 * Nothing to do for pre-read.
40926cf6921Sthorpej 		 */
41026cf6921Sthorpej 
41126cf6921Sthorpej 		if (ops & BUS_DMASYNC_PREWRITE) {
41226cf6921Sthorpej 			/*
41326cf6921Sthorpej 			 * Copy the caller's buffer to the SRAM buffer.
41426cf6921Sthorpej 			 */
41526cf6921Sthorpej 			bus_space_write_region_1(sc->sc_memt,
41626cf6921Sthorpej 			    sc->sc_dma_memh,
41726cf6921Sthorpej 			    dmaoff + offset,
418cb6453dbStsutsui 			    (uint8_t *)gmap->gd_origbuf + offset, len);
41926cf6921Sthorpej 		}
42026cf6921Sthorpej 
42126cf6921Sthorpej 		if (ops & BUS_DMASYNC_POSTREAD) {
42226cf6921Sthorpej 			/*
42326cf6921Sthorpej 			 * Copy the SRAM buffer to the caller's buffer.
42426cf6921Sthorpej 			 */
42526cf6921Sthorpej 			bus_space_read_region_1(sc->sc_memt,
42626cf6921Sthorpej 			    sc->sc_dma_memh,
42726cf6921Sthorpej 			    dmaoff + offset,
428cb6453dbStsutsui 			    (uint8_t *)gmap->gd_origbuf + offset, len);
42926cf6921Sthorpej 		}
43026cf6921Sthorpej 
43126cf6921Sthorpej 		/*
43226cf6921Sthorpej 		 * Nothing to do for post-write.
43326cf6921Sthorpej 		 */
43426cf6921Sthorpej 		break;
43526cf6921Sthorpej 
43626cf6921Sthorpej 	case GAPS_DMA_BUFTYPE_MBUF:
43726cf6921Sthorpej 	    {
43826cf6921Sthorpej 		struct mbuf *m, *m0 = gmap->gd_origbuf;
43926cf6921Sthorpej 		bus_size_t minlen, moff;
44026cf6921Sthorpej 
44126cf6921Sthorpej 		/*
44226cf6921Sthorpej 		 * Nothing to do for pre-read.
44326cf6921Sthorpej 		 */
44426cf6921Sthorpej 
44526cf6921Sthorpej 		if (ops & BUS_DMASYNC_PREWRITE) {
44626cf6921Sthorpej 			/*
44726cf6921Sthorpej 			 * Copy the caller's buffer into the SRAM buffer.
44826cf6921Sthorpej 			 */
44926cf6921Sthorpej 			for (moff = offset, m = m0; m != NULL && len != 0;
45026cf6921Sthorpej 			     m = m->m_next) {
45126cf6921Sthorpej 				/* Find the beginning mbuf. */
45226cf6921Sthorpej 				if (moff >= m->m_len) {
45326cf6921Sthorpej 					moff -= m->m_len;
45426cf6921Sthorpej 					continue;
45526cf6921Sthorpej 				}
45626cf6921Sthorpej 
45726cf6921Sthorpej 				/*
45826cf6921Sthorpej 				 * Now at the first mbuf to sync; nail
45926cf6921Sthorpej 				 * each one until we have exhausted the
46026cf6921Sthorpej 				 * length.
46126cf6921Sthorpej 				 */
46226cf6921Sthorpej 				minlen = len < m->m_len - moff ?
46326cf6921Sthorpej 				    len : m->m_len - moff;
46426cf6921Sthorpej 
46526cf6921Sthorpej 				bus_space_write_region_1(sc->sc_memt,
46626cf6921Sthorpej 				    sc->sc_dma_memh, dmaoff + offset,
467cb6453dbStsutsui 				    mtod(m, uint8_t *) + moff, minlen);
46826cf6921Sthorpej 
46926cf6921Sthorpej 				moff = 0;
47026cf6921Sthorpej 				len -= minlen;
47126cf6921Sthorpej 				offset += minlen;
47226cf6921Sthorpej 			}
47326cf6921Sthorpej 		}
47426cf6921Sthorpej 
47526cf6921Sthorpej 		if (ops & BUS_DMASYNC_POSTREAD) {
47626cf6921Sthorpej 			/*
47726cf6921Sthorpej 			 * Copy the SRAM buffer into the caller's buffer.
47826cf6921Sthorpej 			 */
47926cf6921Sthorpej 			for (moff = offset, m = m0; m != NULL && len != 0;
48026cf6921Sthorpej 			     m = m->m_next) {
48126cf6921Sthorpej 				/* Find the beginning mbuf. */
48226cf6921Sthorpej 				if (moff >= m->m_len) {
48326cf6921Sthorpej 					moff -= m->m_len;
48426cf6921Sthorpej 					continue;
48526cf6921Sthorpej 				}
48626cf6921Sthorpej 
48726cf6921Sthorpej 				/*
48826cf6921Sthorpej 				 * Now at the first mbuf to sync; nail
48926cf6921Sthorpej 				 * each one until we have exhausted the
49026cf6921Sthorpej 				 * length.
49126cf6921Sthorpej 				 */
49226cf6921Sthorpej 				minlen = len < m->m_len - moff ?
49326cf6921Sthorpej 				    len : m->m_len - moff;
49426cf6921Sthorpej 
49526cf6921Sthorpej 				bus_space_read_region_1(sc->sc_memt,
49626cf6921Sthorpej 				    sc->sc_dma_memh, dmaoff + offset,
497cb6453dbStsutsui 				    mtod(m, uint8_t *) + moff, minlen);
49826cf6921Sthorpej 
49926cf6921Sthorpej 				moff = 0;
50026cf6921Sthorpej 				len -= minlen;
50126cf6921Sthorpej 				offset += minlen;
50226cf6921Sthorpej 			}
50326cf6921Sthorpej 		}
50426cf6921Sthorpej 
50526cf6921Sthorpej 		/*
50626cf6921Sthorpej 		 * Nothing to do for post-write.
50726cf6921Sthorpej 		 */
50826cf6921Sthorpej 		break;
50926cf6921Sthorpej 	    }
51026cf6921Sthorpej 
51126cf6921Sthorpej 	default:
51226cf6921Sthorpej 		printf("unknown buffer type %d\n", gmap->gd_buftype);
51326cf6921Sthorpej 		panic("gaps_dmamap_sync");
51426cf6921Sthorpej 	}
51526cf6921Sthorpej }
51626cf6921Sthorpej 
51726cf6921Sthorpej int
gaps_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)51826cf6921Sthorpej gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
51926cf6921Sthorpej     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
52026cf6921Sthorpej     int flags)
52126cf6921Sthorpej {
52226cf6921Sthorpej 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
52326cf6921Sthorpej 
52426cf6921Sthorpej 	struct pglist mlist;
52526cf6921Sthorpej 	paddr_t curaddr, lastaddr;
526e44e9decSchs 	struct vm_page *m;
52726cf6921Sthorpej 	int curseg, error;
52826cf6921Sthorpej 
52926cf6921Sthorpej 	/* Always round the size. */
53026cf6921Sthorpej 	size = round_page(size);
53126cf6921Sthorpej 
53226cf6921Sthorpej 	/*
53326cf6921Sthorpej 	 * Allocate the pages from the VM system.
53426cf6921Sthorpej 	 */
53526cf6921Sthorpej 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
53626cf6921Sthorpej 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
53726cf6921Sthorpej 	if (error)
538cb6453dbStsutsui 		return error;
53926cf6921Sthorpej 
54026cf6921Sthorpej 	/*
54126cf6921Sthorpej 	 * Compute the location, size, and number of segments actually
54226cf6921Sthorpej 	 * returned by the VM code.
54326cf6921Sthorpej 	 */
54426cf6921Sthorpej 	m = mlist.tqh_first;
54526cf6921Sthorpej 	curseg = 0;
54626cf6921Sthorpej 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
54726cf6921Sthorpej 	segs[curseg].ds_len = PAGE_SIZE;
54806c343acSad 	m = TAILQ_NEXT(m, pageq.queue);
54926cf6921Sthorpej 
55006c343acSad 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
55126cf6921Sthorpej 		curaddr = VM_PAGE_TO_PHYS(m);
55226cf6921Sthorpej 		if (curaddr == (lastaddr + PAGE_SIZE))
55326cf6921Sthorpej 			segs[curseg].ds_len += PAGE_SIZE;
55426cf6921Sthorpej 		else {
55526cf6921Sthorpej 			curseg++;
55626cf6921Sthorpej 			segs[curseg].ds_addr = curaddr;
55726cf6921Sthorpej 			segs[curseg].ds_len = PAGE_SIZE;
55826cf6921Sthorpej 		}
55926cf6921Sthorpej 		lastaddr = curaddr;
56026cf6921Sthorpej 	}
56126cf6921Sthorpej 
56226cf6921Sthorpej 	*rsegs = curseg + 1;
56326cf6921Sthorpej 
564cb6453dbStsutsui 	return 0;
56526cf6921Sthorpej }
56626cf6921Sthorpej 
56726cf6921Sthorpej void
gaps_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)56826cf6921Sthorpej gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
56926cf6921Sthorpej {
57026cf6921Sthorpej 	struct pglist mlist;
571e44e9decSchs 	struct vm_page *m;
57226cf6921Sthorpej 	bus_addr_t addr;
57326cf6921Sthorpej 	int curseg;
57426cf6921Sthorpej 
57526cf6921Sthorpej 	/*
57626cf6921Sthorpej 	 * Build a list of pages to free back to the VM system.
57726cf6921Sthorpej 	 */
57826cf6921Sthorpej 	TAILQ_INIT(&mlist);
57926cf6921Sthorpej 	for (curseg = 0; curseg < nsegs; curseg++) {
58026cf6921Sthorpej 		for (addr = segs[curseg].ds_addr;
58126cf6921Sthorpej 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
58226cf6921Sthorpej 		     addr += PAGE_SIZE) {
58326cf6921Sthorpej 			m = PHYS_TO_VM_PAGE(addr);
58406c343acSad 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
58526cf6921Sthorpej 		}
58626cf6921Sthorpej 	}
58726cf6921Sthorpej 
58826cf6921Sthorpej 	uvm_pglistfree(&mlist);
58926cf6921Sthorpej }
59026cf6921Sthorpej 
59126cf6921Sthorpej int
gaps_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)59226cf6921Sthorpej gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
59353524e44Schristos     size_t size, void **kvap, int flags)
59426cf6921Sthorpej {
59526cf6921Sthorpej 	vaddr_t va;
59626cf6921Sthorpej 	bus_addr_t addr;
59726cf6921Sthorpej 	int curseg;
598bc21da4cSyamt 	const uvm_flag_t kmflags =
599bc21da4cSyamt 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
60026cf6921Sthorpej 
60126cf6921Sthorpej 	/*
60226cf6921Sthorpej 	 * If we're only mapping 1 segment, use P2SEG, to avoid
60326cf6921Sthorpej 	 * TLB thrashing.
60426cf6921Sthorpej 	 */
60526cf6921Sthorpej 	if (nsegs == 1) {
60653524e44Schristos 		*kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
607cb6453dbStsutsui 		return 0;
60826cf6921Sthorpej 	}
60926cf6921Sthorpej 
61026cf6921Sthorpej 	size = round_page(size);
61126cf6921Sthorpej 
612bc21da4cSyamt 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
61326cf6921Sthorpej 
61426cf6921Sthorpej 	if (va == 0)
615cb6453dbStsutsui 		return ENOMEM;
61626cf6921Sthorpej 
61753524e44Schristos 	*kvap = (void *)va;
61826cf6921Sthorpej 
61926cf6921Sthorpej 	for (curseg = 0; curseg < nsegs; curseg++) {
62026cf6921Sthorpej 		for (addr = segs[curseg].ds_addr;
62126cf6921Sthorpej 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
62226cf6921Sthorpej 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
62326cf6921Sthorpej 			if (size == 0)
62426cf6921Sthorpej 				panic("gaps_dmamem_map: size botch");
62526cf6921Sthorpej 			pmap_kenter_pa(va, addr,
6269480c51bScegger 			    VM_PROT_READ | VM_PROT_WRITE, 0);
62726cf6921Sthorpej 		}
62826cf6921Sthorpej 	}
6290e7661f0Schris 	pmap_update(pmap_kernel());
63026cf6921Sthorpej 
631cb6453dbStsutsui 	return 0;
63226cf6921Sthorpej }
63326cf6921Sthorpej 
63426cf6921Sthorpej void
gaps_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)63553524e44Schristos gaps_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
63626cf6921Sthorpej {
63726cf6921Sthorpej 
63826cf6921Sthorpej #ifdef DIAGNOSTIC
63926cf6921Sthorpej 	if ((u_long) kva & PAGE_MASK)
64026cf6921Sthorpej 		panic("gaps_dmamem_unmap");
64126cf6921Sthorpej #endif
64226cf6921Sthorpej 
64326cf6921Sthorpej 	/*
64426cf6921Sthorpej 	 * Nothing to do if we mapped it with P2SEG.
64526cf6921Sthorpej 	 */
64653524e44Schristos 	if (kva >= (void *)SH3_P2SEG_BASE &&
64753524e44Schristos 	    kva <= (void *)SH3_P2SEG_END)
64826cf6921Sthorpej 		return;
64926cf6921Sthorpej 
65026cf6921Sthorpej 	size = round_page(size);
65126cf6921Sthorpej 	pmap_kremove((vaddr_t) kva, size);
6520e7661f0Schris 	pmap_update(pmap_kernel());
6536b2d8b66Syamt 	uvm_km_free(kernel_map, (vaddr_t) kva, size, UVM_KMF_VAONLY);
65426cf6921Sthorpej }
65526cf6921Sthorpej 
65626cf6921Sthorpej paddr_t
gaps_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)65726cf6921Sthorpej gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
65826cf6921Sthorpej     off_t off, int prot, int flags)
65926cf6921Sthorpej {
66026cf6921Sthorpej 
66126cf6921Sthorpej 	/* Not implemented. */
662cb6453dbStsutsui 	return -1;
66326cf6921Sthorpej }
664