xref: /netbsd-src/sys/arch/alpha/common/sgmap_common.c (revision 26bc586398db6f1e94fe6ee2140a168d49b06ea2)
1*26bc5863Sthorpej /* $NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $ */
246b89d77Sthorpej 
346b89d77Sthorpej /*-
48617f2c7Sthorpej  * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
546b89d77Sthorpej  * All rights reserved.
646b89d77Sthorpej  *
746b89d77Sthorpej  * This code is derived from software contributed to The NetBSD Foundation
846b89d77Sthorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
946b89d77Sthorpej  * NASA Ames Research Center.
1046b89d77Sthorpej  *
1146b89d77Sthorpej  * Redistribution and use in source and binary forms, with or without
1246b89d77Sthorpej  * modification, are permitted provided that the following conditions
1346b89d77Sthorpej  * are met:
1446b89d77Sthorpej  * 1. Redistributions of source code must retain the above copyright
1546b89d77Sthorpej  *    notice, this list of conditions and the following disclaimer.
1646b89d77Sthorpej  * 2. Redistributions in binary form must reproduce the above copyright
1746b89d77Sthorpej  *    notice, this list of conditions and the following disclaimer in the
1846b89d77Sthorpej  *    documentation and/or other materials provided with the distribution.
1946b89d77Sthorpej  *
2046b89d77Sthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2146b89d77Sthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2246b89d77Sthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2346b89d77Sthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2446b89d77Sthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2546b89d77Sthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2646b89d77Sthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2746b89d77Sthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2846b89d77Sthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2946b89d77Sthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3046b89d77Sthorpej  * POSSIBILITY OF SUCH DAMAGE.
3146b89d77Sthorpej  */
3246b89d77Sthorpej 
3346b89d77Sthorpej #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
3446b89d77Sthorpej 
35*26bc5863Sthorpej __KERNEL_RCSID(0, "$NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $");
3646b89d77Sthorpej 
3746b89d77Sthorpej #include <sys/param.h>
3846b89d77Sthorpej #include <sys/systm.h>
3946b89d77Sthorpej #include <sys/kernel.h>
4046b89d77Sthorpej #include <sys/proc.h>
4146b89d77Sthorpej 
4280bb8049Smrg #include <uvm/uvm_extern.h>
4346b89d77Sthorpej 
44db36913cSthorpej #define	_ALPHA_BUS_DMA_PRIVATE
45cf10107dSdyoung #include <sys/bus.h>
4646b89d77Sthorpej 
4746b89d77Sthorpej #include <alpha/common/sgmapvar.h>
4846b89d77Sthorpej 
49e62d894dSthorpej /*
50e62d894dSthorpej  * Some systems will prefetch the next page during a memory -> device DMA.
51e62d894dSthorpej  * This can cause machine checks if there is not a spill page after the
52e62d894dSthorpej  * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE).
53e62d894dSthorpej  */
54d5df5511Sthorpej vaddr_t		alpha_sgmap_prefetch_spill_page_va;
55e62d894dSthorpej bus_addr_t	alpha_sgmap_prefetch_spill_page_pa;
56e62d894dSthorpej 
5746b89d77Sthorpej void
alpha_sgmap_init(bus_dma_tag_t t,struct alpha_sgmap * sgmap,const char * name,bus_addr_t wbase,bus_addr_t sgvabase,bus_size_t sgvasize,size_t ptesize,void * ptva,bus_size_t minptalign)58c3d730dfSthorpej alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name,
59c3d730dfSthorpej     bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize,
60c3d730dfSthorpej     void *ptva, bus_size_t minptalign)
6146b89d77Sthorpej {
6246b89d77Sthorpej 	bus_dma_segment_t seg;
6346b89d77Sthorpej 	size_t ptsize;
6446b89d77Sthorpej 	int rseg;
6546b89d77Sthorpej 
6659d76407Sthorpej 	if (sgvasize & PGOFSET) {
6759d76407Sthorpej 		printf("size botch for sgmap `%s'\n", name);
6859d76407Sthorpej 		goto die;
6959d76407Sthorpej 	}
7046b89d77Sthorpej 
71*26bc5863Sthorpej 	/*
72*26bc5863Sthorpej 	 * If we don't yet have a minimum SGVA alignment, default
73*26bc5863Sthorpej 	 * to the system page size.
74*26bc5863Sthorpej 	 */
75*26bc5863Sthorpej 	if (t->_sgmap_minalign < PAGE_SIZE) {
76*26bc5863Sthorpej 		t->_sgmap_minalign = PAGE_SIZE;
77*26bc5863Sthorpej 	}
78*26bc5863Sthorpej 
7946b89d77Sthorpej 	sgmap->aps_wbase = wbase;
8046b89d77Sthorpej 	sgmap->aps_sgvabase = sgvabase;
8146b89d77Sthorpej 	sgmap->aps_sgvasize = sgvasize;
8246b89d77Sthorpej 
8346b89d77Sthorpej 	if (ptva != NULL) {
8446b89d77Sthorpej 		/*
8546b89d77Sthorpej 		 * We already have a page table; this may be a system
8646b89d77Sthorpej 		 * where the page table resides in bridge-resident SRAM.
8746b89d77Sthorpej 		 */
8846b89d77Sthorpej 		sgmap->aps_pt = ptva;
8946b89d77Sthorpej 		sgmap->aps_ptpa = 0;
9046b89d77Sthorpej 	} else {
9146b89d77Sthorpej 		/*
922981fec2Sthorpej 		 * Compute the page table size and allocate it.  At minimum,
932981fec2Sthorpej 		 * this must be aligned to the page table size.  However,
942981fec2Sthorpej 		 * some platforms have more strict alignment reqirements.
9546b89d77Sthorpej 		 */
963faec1e0Sthorpej 		ptsize = (sgvasize / PAGE_SIZE) * ptesize;
972981fec2Sthorpej 		if (minptalign != 0) {
982981fec2Sthorpej 			if (minptalign < ptsize)
992981fec2Sthorpej 				minptalign = ptsize;
1002981fec2Sthorpej 		} else
1012981fec2Sthorpej 			minptalign = ptsize;
1022981fec2Sthorpej 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
10359d76407Sthorpej 		    BUS_DMA_NOWAIT)) {
1040f09ed48Sprovos 			panic("unable to allocate page table for sgmap `%s'",
10559d76407Sthorpej 			    name);
10659d76407Sthorpej 			goto die;
10759d76407Sthorpej 		}
10846b89d77Sthorpej 		sgmap->aps_ptpa = seg.ds_addr;
10953524e44Schristos 		sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa);
11046b89d77Sthorpej 	}
11146b89d77Sthorpej 
11246b89d77Sthorpej 	/*
1131fd968efSthorpej 	 * Create the arena used to manage the virtual address
11446b89d77Sthorpej 	 * space.
1151fd968efSthorpej 	 *
1161fd968efSthorpej 	 * XXX Consider using a quantum cache up to MAXPHYS+PAGE_SIZE
1171fd968efSthorpej 	 * XXX (extra page to handle the spill page).  For now, we don't,
1181fd968efSthorpej 	 * XXX because we are using constrained allocations everywhere.
11946b89d77Sthorpej 	 */
1201fd968efSthorpej 	sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
1211fd968efSthorpej 				       PAGE_SIZE,	/* quantum */
1221fd968efSthorpej 				       NULL,		/* importfn */
1231fd968efSthorpej 				       NULL,		/* releasefn */
1241fd968efSthorpej 				       NULL,		/* source */
1251fd968efSthorpej 				       0,		/* qcache_max */
1261fd968efSthorpej 				       VM_SLEEP,
1271fd968efSthorpej 				       IPL_VM);
1281fd968efSthorpej 	KASSERT(sgmap->aps_arena != NULL);
129e62d894dSthorpej 
130e62d894dSthorpej 	/*
131e62d894dSthorpej 	 * Allocate a spill page if that hasn't already been done.
132e62d894dSthorpej 	 */
133e62d894dSthorpej 	if (alpha_sgmap_prefetch_spill_page_va == 0) {
1343faec1e0Sthorpej 		if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg,
13559d76407Sthorpej 		    BUS_DMA_NOWAIT)) {
13659d76407Sthorpej 			printf("unable to allocate spill page for sgmap `%s'\n",
13759d76407Sthorpej 			    name);
13859d76407Sthorpej 			goto die;
13959d76407Sthorpej 		}
140e62d894dSthorpej 		alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr;
141e62d894dSthorpej 		alpha_sgmap_prefetch_spill_page_va =
142e62d894dSthorpej 		    ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa);
14353524e44Schristos 		memset((void *)alpha_sgmap_prefetch_spill_page_va, 0,
1443faec1e0Sthorpej 		    PAGE_SIZE);
145e62d894dSthorpej 	}
14659d76407Sthorpej 
14759d76407Sthorpej 	return;
14859d76407Sthorpej  die:
14959d76407Sthorpej 	panic("alpha_sgmap_init");
15046b89d77Sthorpej }
15146b89d77Sthorpej 
15246b89d77Sthorpej int
alpha_sgmap_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)153c3d730dfSthorpej alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
154c3d730dfSthorpej     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
155db36913cSthorpej {
156db36913cSthorpej 	bus_dmamap_t map;
157db36913cSthorpej 	int error;
158db36913cSthorpej 
159db36913cSthorpej 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
160a2a69661Sthorpej 	    boundary, flags, &map);
161db36913cSthorpej 	if (error)
162db36913cSthorpej 		return (error);
163db36913cSthorpej 
1648617f2c7Sthorpej 	/* XXX BUS_DMA_ALLOCNOW */
165a2a69661Sthorpej 
166a2a69661Sthorpej 	if (error == 0)
167a2a69661Sthorpej 		*dmamp = map;
168a2a69661Sthorpej 	else
169db36913cSthorpej 		alpha_sgmap_dmamap_destroy(t, map);
170db36913cSthorpej 
171db36913cSthorpej 	return (error);
172db36913cSthorpej }
173db36913cSthorpej 
174db36913cSthorpej void
alpha_sgmap_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)175c3d730dfSthorpej alpha_sgmap_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
176db36913cSthorpej {
177db36913cSthorpej 
1788617f2c7Sthorpej 	KASSERT(map->dm_mapsize == 0);
179db36913cSthorpej 
180db36913cSthorpej 	_bus_dmamap_destroy(t, map);
181db36913cSthorpej }
182