xref: /openbsd-src/sys/arch/alpha/dev/sgmap_typedep.c (revision e97088d66c8eee044cad550a3fd9fad00889544f)
1 /* $OpenBSD: sgmap_typedep.c,v 1.15 2017/04/30 16:45:45 mpi Exp $ */
2 /* $NetBSD: sgmap_typedep.c,v 1.17 2001/07/19 04:27:37 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifdef SGMAP_DEBUG
35 int			__C(SGMAP_TYPE,_debug) = 0;
36 #endif
37 
38 SGMAP_PTE_TYPE		__C(SGMAP_TYPE,_prefetch_spill_page_pte);
39 
40 int			__C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t,
41 			    bus_dmamap_t, void *buf, size_t buflen,
42 			    struct proc *, int, int, struct alpha_sgmap *);
43 
44 void
__C(SGMAP_TYPE,_init_spill_page_pte)45 __C(SGMAP_TYPE,_init_spill_page_pte)(void)
46 {
47 
48 	__C(SGMAP_TYPE,_prefetch_spill_page_pte) =
49 	    (alpha_sgmap_prefetch_spill_page_pa >>
50 	     SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
51 }
52 
53 int
__C(SGMAP_TYPE,_load_buffer)54 __C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
55     size_t buflen, struct proc *p, int flags, int seg,
56     struct alpha_sgmap *sgmap)
57 {
58 	vaddr_t endva, va = (vaddr_t)buf;
59 	paddr_t pa;
60 	pmap_t pmap;
61 	bus_addr_t dmaoffset, sgva;
62 	bus_size_t sgvalen, boundary, alignment;
63 	struct extent_region *regions = map->_dm_cookie;
64 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
65 	int pteidx, error, spill;
66 
67 	if (seg >= map->_dm_segcnt)
68 		return (EFBIG);
69 
70 	/* Initialize the spill page PTE if it hasn't been already. */
71 	if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
72 		__C(SGMAP_TYPE,_init_spill_page_pte)();
73 
74 	/*
75 	 * Remember the offset into the first page and the total
76 	 * transfer length.
77 	 */
78 	dmaoffset = ((u_long)buf) & PGOFSET;
79 
80 #ifdef SGMAP_DEBUG
81 	if (__C(SGMAP_TYPE,_debug)) {
82 		printf("sgmap_load: ----- buf = %p -----\n", buf);
83 		printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n",
84 		    dmaoffset, buflen);
85 	}
86 #endif
87 	if (p != NULL)
88 		pmap = p->p_vmspace->vm_map.pmap;
89 	else
90 		pmap = pmap_kernel();
91 
92 	/*
93 	 * Allocate the necessary virtual address space for the
94 	 * mapping.  Round the size, since we deal with whole pages.
95 	 */
96 
97 	/*
98 	 * XXX Always allocate a spill page for now.  Note
99 	 * the spill page is not needed for an in-bound-only
100 	 * transfer.
101 	 */
102 	if ((flags & BUS_DMA_READ) == 0)
103 		spill = 1;
104 	else
105 		spill = 0;
106 
107 	endva = round_page(va + buflen);
108 	va = trunc_page(va);
109 
110 	boundary = map->_dm_boundary;
111 	alignment = PAGE_SIZE;
112 
113 	sgvalen = (endva - va);
114 	if (spill) {
115 		sgvalen += PAGE_SIZE;
116 
117 		/*
118 		 * ARGH!  If the addition of the spill page bumped us
119 		 * over our boundary, we have to 2x the boundary limit.
120 		 */
121 		if (boundary && boundary < sgvalen) {
122 			alignment = boundary;
123 			do {
124 				boundary <<= 1;
125 			} while (boundary < sgvalen);
126 		}
127 	}
128 
129 #if 0
130 	printf("len 0x%lx -> 0x%lx, boundary 0x%lx -> 0x%lx -> ",
131 	    (endva - va), sgvalen, map->_dm_boundary, boundary);
132 #endif
133 
134 	mtx_enter(&sgmap->aps_mtx);
135 	error = extent_alloc_with_descr(sgmap->aps_ex, sgvalen, alignment,
136 	    0, boundary, EX_NOWAIT, &regions[seg], &sgva);
137 	mtx_leave(&sgmap->aps_mtx);
138 	if (error)
139 		return (error);
140 
141 #if 0
142 	printf("error %d sgva 0x%lx\n", error, sgva);
143 #endif
144 
145 	pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
146 	pte = &page_table[pteidx * SGMAP_PTE_SPACING];
147 
148 #ifdef SGMAP_DEBUG
149 	if (__C(SGMAP_TYPE,_debug))
150 		printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
151 		    "pte = %p (pt = %p)\n", sgva, pteidx, pte,
152 		    page_table);
153 #endif
154 
155 	/* Generate the DMA address. */
156 	map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset;
157 	map->dm_segs[seg].ds_len = buflen;
158 
159 #ifdef SGMAP_DEBUG
160 	if (__C(SGMAP_TYPE,_debug))
161 		printf("sgmap_load: wbase = 0x%lx, vpage = 0x%x, "
162 		    "dma addr = 0x%lx\n", sgmap->aps_wbase, sgva,
163 		    map->dm_segs[seg].ds_addr);
164 #endif
165 
166 	for (; va < endva; va += PAGE_SIZE, pteidx++,
167 	     pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
168 		/* Get the physical address for this segment. */
169 		(void)pmap_extract(pmap, va, &pa);
170 
171 		/* Load the current PTE with this page. */
172 		*pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
173 #ifdef SGMAP_DEBUG
174 		if (__C(SGMAP_TYPE,_debug))
175 			printf("sgmap_load:     pa = 0x%lx, pte = %p, "
176 			    "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
177 #endif
178 	}
179 
180 	if (spill) {
181 		/* ...and the prefetch-spill page. */
182 		*pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
183 #ifdef SGMAP_DEBUG
184 		if (__C(SGMAP_TYPE,_debug)) {
185 			printf("sgmap_load:     spill page, pte = %p, "
186 			    "*pte = 0x%lx\n", pte, *pte);
187 			printf("sgmap_load:     pte count = %d\n",
188 			    map->_dm_ptecnt);
189 		}
190 #endif
191 	}
192 
193 	return (0);
194 }
195 
196 int
__C(SGMAP_TYPE,_load)197 __C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
198     bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap)
199 {
200 	int seg, error;
201 
202 	/*
203 	 * Make sure that on error condition we return "no valid mappings".
204 	 */
205 	map->dm_mapsize = 0;
206 	map->dm_nsegs = 0;
207 
208 	if (buflen > map->_dm_size)
209 		return (EINVAL);
210 
211 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
212 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
213 	    (BUS_DMA_READ|BUS_DMA_WRITE));
214 
215 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
216 
217 	seg = 0;
218 	error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, p,
219 	    flags, seg, sgmap);
220 
221 	alpha_mb();
222 
223 #if defined(SGMAP_DEBUG) && defined(DDB)
224 	if (__C(SGMAP_TYPE,_debug) > 1)
225 		db_enter();
226 #endif
227 
228 	if (error == 0) {
229 		map->dm_mapsize = buflen;
230 		map->dm_nsegs = 1;
231 		map->_dm_window = t;
232 	} else {
233 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
234 		if (t->_next_window != NULL) {
235 			/* Give the next window a chance. */
236 			error = bus_dmamap_load(t->_next_window, map, buf,
237 			    buflen, p, flags);
238 		}
239 	}
240 	return (error);
241 }
242 
243 int
__C(SGMAP_TYPE,_load_mbuf)244 __C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map,
245     struct mbuf *m0, int flags, struct alpha_sgmap *sgmap)
246 {
247 	struct mbuf *m;
248 	int seg, error;
249 
250 	/*
251 	 * Make sure that on error condition we return "no valid mappings".
252 	 */
253 	map->dm_mapsize = 0;
254 	map->dm_nsegs = 0;
255 
256 #ifdef DIAGNOSTIC
257 	if ((m0->m_flags & M_PKTHDR) == 0)
258 		panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header");
259 #endif
260 
261 	if (m0->m_pkthdr.len > map->_dm_size)
262 		return (EINVAL);
263 
264 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
265 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
266 	    (BUS_DMA_READ|BUS_DMA_WRITE));
267 
268 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
269 
270 	seg = 0;
271 	error = 0;
272 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
273 		if (m->m_len == 0)
274 			continue;
275 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
276 		    m->m_data, m->m_len, NULL, flags, seg, sgmap);
277 		seg++;
278 	}
279 
280 	alpha_mb();
281 
282 #if defined(SGMAP_DEBUG) && defined(DDB)
283 	if (__C(SGMAP_TYPE,_debug) > 1)
284 		db_enter();
285 #endif
286 
287 	if (error == 0) {
288 		map->dm_mapsize = m0->m_pkthdr.len;
289 		map->dm_nsegs = seg;
290 		map->_dm_window = t;
291 	} else {
292 		/* Need to back out what we've done so far. */
293 		map->dm_nsegs = seg - 1;
294 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
295 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
296 		if (t->_next_window != NULL) {
297 			/* Give the next window a chance. */
298 			error = bus_dmamap_load_mbuf(t->_next_window, map,
299 			    m0, flags);
300 		}
301 	}
302 
303 	return (error);
304 }
305 
306 int
__C(SGMAP_TYPE,_load_uio)307 __C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
308     int flags, struct alpha_sgmap *sgmap)
309 {
310 	bus_size_t minlen, resid;
311 	struct proc *p = NULL;
312 	struct iovec *iov;
313 	caddr_t addr;
314 	int i, seg, error;
315 
316 	/*
317 	 * Make sure that on error condition we return "no valid mappings".
318 	 */
319 	map->dm_mapsize = 0;
320 	map->dm_nsegs = 0;
321 
322 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
323 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
324  	    (BUS_DMA_READ|BUS_DMA_WRITE));
325 
326 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
327 
328 	resid = uio->uio_resid;
329 	iov = uio->uio_iov;
330 
331 	if (uio->uio_segflg == UIO_USERSPACE) {
332 		p = uio->uio_procp;
333 #ifdef DIAGNOSTIC
334 		if (p == NULL)
335 			panic(__S(__C(SGMAP_TYPE,_load_uio))
336 			    ": USERSPACE but no proc");
337 #endif
338 	}
339 
340 	seg = 0;
341 	error = 0;
342 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0;
343 	     i++, seg++) {
344 		/*
345 		 * Now at the first iovec to load.  Load each iovec
346 		 * until we have exhausted the residual count.
347 		 */
348 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
349 		addr = (caddr_t)iov[i].iov_base;
350 
351 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
352 		    addr, minlen, p, flags, seg, sgmap);
353 
354 		resid -= minlen;
355 	}
356 
357 	alpha_mb();
358 
359 #if defined(SGMAP_DEBUG) && defined(DDB)
360 	if (__C(SGMAP_TYPE,_debug) > 1)
361 		db_enter();
362 #endif
363 
364 	if (error == 0) {
365 		map->dm_mapsize = uio->uio_resid;
366 		map->dm_nsegs = seg;
367 	} else {
368 		/* Need to back out what we've done so far. */
369 		map->dm_nsegs = seg - 1;
370 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
371 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
372 		if (t->_next_window != NULL) {
373 			/* Give the next window a chance. */
374 			error = bus_dmamap_load_uio(t->_next_window, map,
375 			    uio, flags);
376 		}
377 	}
378 
379 	return (error);
380 }
381 
382 int
__C(SGMAP_TYPE,_load_raw)383 __C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map,
384     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags,
385     struct alpha_sgmap *sgmap)
386 {
387 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
388 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
389 	    (BUS_DMA_READ|BUS_DMA_WRITE));
390 
391 	panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
392 }
393 
394 void
__C(SGMAP_TYPE,_unload)395 __C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map,
396     struct alpha_sgmap *sgmap)
397 {
398 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
399 	bus_addr_t osgva, sgva, esgva;
400 	int error, spill, seg, pteidx;
401 
402 	for (seg = 0; seg < map->dm_nsegs; seg++) {
403 		/*
404 		 * XXX Always allocate a spill page for now.  Note
405 		 * the spill page is not needed for an in-bound-only
406 		 * transfer.
407 		 */
408 		if ((map->_dm_flags & BUS_DMA_READ) == 0)
409 			spill = 1;
410 		else
411 			spill = 0;
412 
413 		sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase;
414 
415 		esgva = round_page(sgva + map->dm_segs[seg].ds_len);
416 		osgva = sgva = trunc_page(sgva);
417 
418 		if (spill)
419 			esgva += PAGE_SIZE;
420 
421 		/* Invalidate the PTEs for the mapping. */
422 		for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
423 		     sgva < esgva; sgva += PAGE_SIZE, pteidx++) {
424 			pte = &page_table[pteidx * SGMAP_PTE_SPACING];
425 #ifdef SGMAP_DEBUG
426 			if (__C(SGMAP_TYPE,_debug))
427 				printf("sgmap_unload:     pte = %p, "
428 				    "*pte = 0x%lx\n", pte, (u_long)(*pte));
429 #endif
430 			*pte = 0;
431 		}
432 
433 		alpha_mb();
434 
435 		/* Free the virtual address space used by the mapping. */
436 		mtx_enter(&sgmap->aps_mtx);
437 		error = extent_free(sgmap->aps_ex, osgva, (esgva - osgva),
438 		    EX_NOWAIT);
439 		mtx_leave(&sgmap->aps_mtx);
440 		if (error != 0)
441 			panic(__S(__C(SGMAP_TYPE,_unload)));
442 	}
443 
444 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
445 
446 	/* Mark the mapping invalid. */
447 	map->dm_mapsize = 0;
448 	map->dm_nsegs = 0;
449 	map->_dm_window = NULL;
450 }
451