xref: /openbsd-src/sys/arch/alpha/dev/bus_dma.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: bus_dma.c,v 1.35 2014/11/16 12:30:56 deraadt Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define _ALPHA_BUS_DMA_PRIVATE
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/mbuf.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47 
48 int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
49 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
50 	    paddr_t *, int *, int);
51 
52 /*
53  * Common function for DMA map creation.  May be called by bus-specific
54  * DMA map creation functions.
55  */
56 int
57 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
58 	bus_dma_tag_t t;
59 	bus_size_t size;
60 	int nsegments;
61 	bus_size_t maxsegsz;
62 	bus_size_t boundary;
63 	int flags;
64 	bus_dmamap_t *dmamp;
65 {
66 	struct alpha_bus_dmamap *map;
67 	void *mapstore;
68 	size_t mapsize;
69 
70 	/*
71 	 * Allocate and initialize the DMA map.  The end of the map
72 	 * is a variable-sized array of segments, so we allocate enough
73 	 * room for them in one shot.
74 	 *
75 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
76 	 * of ALLOCNOW notifies others that we've reserved these resources,
77 	 * and they are not to be freed.
78 	 *
79 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
80 	 * the (nsegments - 1).
81 	 */
82 	mapsize = sizeof(struct alpha_bus_dmamap) +
83 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
84 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
85 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
86 		return (ENOMEM);
87 
88 	map = (struct alpha_bus_dmamap *)mapstore;
89 	map->_dm_size = size;
90 	map->_dm_segcnt = nsegments;
91 	map->_dm_maxsegsz = maxsegsz;
92 	if (t->_boundary != 0 && t->_boundary < boundary)
93 		map->_dm_boundary = t->_boundary;
94 	else
95 		map->_dm_boundary = boundary;
96 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
97 	map->dm_mapsize = 0;		/* no valid mappings */
98 	map->dm_nsegs = 0;
99 	map->_dm_window = NULL;
100 
101 	*dmamp = map;
102 	return (0);
103 }
104 
105 /*
106  * Common function for DMA map destruction.  May be called by bus-specific
107  * DMA map destruction functions.
108  */
109 void
110 _bus_dmamap_destroy(t, map)
111 	bus_dma_tag_t t;
112 	bus_dmamap_t map;
113 {
114 
115 	free(map, M_DEVBUF, 0);
116 }
117 
118 /*
119  * Utility function to load a linear buffer.  lastaddrp holds state
120  * between invocations (for multiple-buffer loads).  segp contains
121  * the starting segment on entrance, and the ending segment on exit.
122  * first indicates if this is the first invocation of this function.
123  */
124 int
125 _bus_dmamap_load_buffer_direct(t, map, buf, buflen, p, flags,
126     lastaddrp, segp, first)
127 	bus_dma_tag_t t;
128 	bus_dmamap_t map;
129 	void *buf;
130 	bus_size_t buflen;
131 	struct proc *p;
132 	int flags;
133 	paddr_t *lastaddrp;
134 	int *segp;
135 	int first;
136 {
137 	bus_size_t sgsize;
138 	pmap_t pmap;
139 	bus_addr_t curaddr, lastaddr, baddr, bmask;
140 	vaddr_t vaddr = (vaddr_t)buf;
141 	int seg;
142 
143 	if (p != NULL)
144 		pmap = p->p_vmspace->vm_map.pmap;
145 	else
146 		pmap = pmap_kernel();
147 
148 	lastaddr = *lastaddrp;
149 	bmask = ~(map->_dm_boundary - 1);
150 
151 	for (seg = *segp; buflen > 0 ; ) {
152 		/*
153 		 * Get the physical address for this segment.
154 		 */
155 		pmap_extract(pmap, vaddr, &curaddr);
156 
157 		/*
158 		 * If we're beyond the current DMA window, indicate
159 		 * that and try to fall back into SGMAPs.
160 		 */
161 		if (t->_wsize != 0 && curaddr >= t->_wsize)
162 			return (EINVAL);
163 
164 		curaddr |= t->_wbase;
165 
166 		/*
167 		 * Compute the segment size, and adjust counts.
168 		 */
169 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
170 		if (buflen < sgsize)
171 			sgsize = buflen;
172 		if (map->_dm_maxsegsz < sgsize)
173 			sgsize = map->_dm_maxsegsz;
174 
175 		/*
176 		 * Make sure we don't cross any boundaries.
177 		 */
178 		if (map->_dm_boundary > 0) {
179 			baddr = (curaddr + map->_dm_boundary) & bmask;
180 			if (sgsize > (baddr - curaddr))
181 				sgsize = (baddr - curaddr);
182 		}
183 
184 		/*
185 		 * Insert chunk into a segment, coalescing with
186 		 * the previous segment if possible.
187 		 */
188 		if (first) {
189 			map->dm_segs[seg].ds_addr = curaddr;
190 			map->dm_segs[seg].ds_len = sgsize;
191 			first = 0;
192 		} else {
193 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
194 			    curaddr == lastaddr &&
195 			    (map->dm_segs[seg].ds_len + sgsize) <=
196 			     map->_dm_maxsegsz &&
197 			    (map->_dm_boundary == 0 ||
198 			     (map->dm_segs[seg].ds_addr & bmask) ==
199 			     (curaddr & bmask)))
200 				map->dm_segs[seg].ds_len += sgsize;
201 			else {
202 				if (++seg >= map->_dm_segcnt)
203 					break;
204 				map->dm_segs[seg].ds_addr = curaddr;
205 				map->dm_segs[seg].ds_len = sgsize;
206 			}
207 		}
208 
209 		lastaddr = curaddr + sgsize;
210 		vaddr += sgsize;
211 		buflen -= sgsize;
212 	}
213 
214 	*segp = seg;
215 	*lastaddrp = lastaddr;
216 
217 	/*
218 	 * Did we fit?
219 	 */
220 	if (buflen != 0) {
221 		/*
222 		 * If there is a chained window, we will automatically
223 		 * fall back to it.
224 		 */
225 		return (EFBIG);		/* XXX better return value here? */
226 	}
227 
228 	return (0);
229 }
230 
231 /*
232  * Common function for loading a direct-mapped DMA map with a linear
233  * buffer.  Called by bus-specific DMA map load functions with the
234  * OR value appropriate for indicating "direct-mapped" for that
235  * chipset.
236  */
237 int
238 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
239 	bus_dma_tag_t t;
240 	bus_dmamap_t map;
241 	void *buf;
242 	bus_size_t buflen;
243 	struct proc *p;
244 	int flags;
245 {
246 	paddr_t lastaddr;
247 	int seg, error;
248 
249 	/*
250 	 * Make sure that on error condition we return "no valid mappings".
251 	 */
252 	map->dm_mapsize = 0;
253 	map->dm_nsegs = 0;
254 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
255 
256 	if (buflen > map->_dm_size)
257 		return (EINVAL);
258 
259 	seg = 0;
260 	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
261 	    p, flags, &lastaddr, &seg, 1);
262 	if (error == 0) {
263 		map->dm_mapsize = buflen;
264 		map->dm_nsegs = seg + 1;
265 		map->_dm_window = t;
266 	} else if (t->_next_window != NULL) {
267 		/*
268 		 * Give the next window a chance.
269 		 */
270 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
271 		    p, flags);
272 	}
273 	return (error);
274 }
275 
276 /*
277  * Like _bus_dmamap_load_direct(), but for mbufs.
278  */
279 int
280 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
281 	bus_dma_tag_t t;
282 	bus_dmamap_t map;
283 	struct mbuf *m0;
284 	int flags;
285 {
286 	paddr_t lastaddr;
287 	int seg, error, first;
288 	struct mbuf *m;
289 
290 	/*
291 	 * Make sure that on error condition we return "no valid mappings."
292 	 */
293 	map->dm_mapsize = 0;
294 	map->dm_nsegs = 0;
295 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
296 
297 #ifdef DIAGNOSTIC
298 	if ((m0->m_flags & M_PKTHDR) == 0)
299 		panic("_bus_dmamap_load_mbuf_direct: no packet header");
300 #endif
301 
302 	if (m0->m_pkthdr.len > map->_dm_size)
303 		return (EINVAL);
304 
305 	first = 1;
306 	seg = 0;
307 	error = 0;
308 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
309 		if (m->m_len == 0)
310 			continue;
311 		error = _bus_dmamap_load_buffer_direct(t, map,
312 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
313 		first = 0;
314 	}
315 	if (error == 0) {
316 		map->dm_mapsize = m0->m_pkthdr.len;
317 		map->dm_nsegs = seg + 1;
318 		map->_dm_window = t;
319 	} else if (t->_next_window != NULL) {
320 		/*
321 		 * Give the next window a chance.
322 		 */
323 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
324 	}
325 	return (error);
326 }
327 
328 /*
329  * Like _bus_dmamap_load_direct(), but for uios.
330  */
331 int
332 _bus_dmamap_load_uio_direct(t, map, uio, flags)
333 	bus_dma_tag_t t;
334 	bus_dmamap_t map;
335 	struct uio *uio;
336 	int flags;
337 {
338 	paddr_t lastaddr;
339 	int seg, i, error, first;
340 	bus_size_t minlen, resid;
341 	struct proc *p = NULL;
342 	struct iovec *iov;
343 	caddr_t addr;
344 
345 	/*
346 	 * Make sure that on error condition we return "no valid mappings."
347 	 */
348 	map->dm_mapsize = 0;
349 	map->dm_nsegs = 0;
350 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
351 
352 	resid = uio->uio_resid;
353 	iov = uio->uio_iov;
354 
355 	if (uio->uio_segflg == UIO_USERSPACE) {
356 		p = uio->uio_procp;
357 #ifdef DIAGNOSTIC
358 		if (p == NULL)
359 			panic("_bus_dmamap_load_uio_direct: "
360 			    "USERSPACE but no proc");
361 #endif
362 	}
363 
364 	first = 1;
365 	seg = 0;
366 	error = 0;
367 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
368 		/*
369 		 * Now at the first iovec to load.  Load each iovec
370 		 * until we have exhausted the residual count.
371 		 */
372 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
373 		addr = (caddr_t)iov[i].iov_base;
374 
375 		error = _bus_dmamap_load_buffer_direct(t, map,
376 		    addr, minlen, p, flags, &lastaddr, &seg, first);
377 		first = 0;
378 
379 		resid -= minlen;
380 	}
381 	if (error == 0) {
382 		map->dm_mapsize = uio->uio_resid;
383 		map->dm_nsegs = seg + 1;
384 		map->_dm_window = t;
385 	} else if (t->_next_window != NULL) {
386 		/*
387 		 * Give the next window a chance.
388 		 */
389 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
390 	}
391 	return (error);
392 }
393 
394 /*
395  * Like _bus_dmamap_load_direct(), but for raw memory.
396  */
397 int
398 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
399 	bus_dma_tag_t t;
400 	bus_dmamap_t map;
401 	bus_dma_segment_t *segs;
402 	int nsegs;
403 	bus_size_t size;
404 	int flags;
405 {
406 
407 	panic("_bus_dmamap_load_raw_direct: not implemented");
408 }
409 
410 /*
411  * Common function for unloading a DMA map.  May be called by
412  * chipset-specific DMA map unload functions.
413  */
414 void
415 _bus_dmamap_unload(t, map)
416 	bus_dma_tag_t t;
417 	bus_dmamap_t map;
418 {
419 
420 	/*
421 	 * No resources to free; just mark the mappings as
422 	 * invalid.
423 	 */
424 	map->dm_mapsize = 0;
425 	map->dm_nsegs = 0;
426 	map->_dm_window = NULL;
427 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
428 }
429 
430 /*
431  * Common function for DMA map synchronization.  May be called
432  * by chipset-specific DMA map synchronization functions.
433  */
434 void
435 _bus_dmamap_sync(t, map, offset, len, op)
436 	bus_dma_tag_t t;
437 	bus_dmamap_t map;
438 	bus_addr_t offset;
439 	bus_size_t len;
440 	int op;
441 {
442 
443 	/*
444 	 * Flush the store buffer.
445 	 */
446 	alpha_mb();
447 }
448 
449 /*
450  * Common function for DMA-safe memory allocation.  May be called
451  * by bus-specific DMA memory allocation functions.
452  */
453 int
454 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
455 	bus_dma_tag_t t;
456 	bus_size_t size, alignment, boundary;
457 	bus_dma_segment_t *segs;
458 	int nsegs;
459 	int *rsegs;
460 	int flags;
461 {
462 
463 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
464 	    segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1));
465 }
466 
467 /*
468  * Allocate physical memory from the given physical address range.
469  * Called by DMA-safe memory allocation methods.
470  */
471 int
472 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
473     flags, low, high)
474 	bus_dma_tag_t t;
475 	bus_size_t size, alignment, boundary;
476 	bus_dma_segment_t *segs;
477 	int nsegs;
478 	int *rsegs;
479 	int flags;
480 	paddr_t low;
481 	paddr_t high;
482 {
483 	paddr_t curaddr, lastaddr;
484 	struct vm_page *m;
485 	struct pglist mlist;
486 	int curseg, error, plaflag;
487 
488 	/* Always round the size. */
489 	size = round_page(size);
490 
491 	/*
492 	 * Allocate pages from the VM system.
493 	 */
494 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
495 	if (flags & BUS_DMA_ZERO)
496 		plaflag |= UVM_PLA_ZERO;
497 
498 	TAILQ_INIT(&mlist);
499 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
500 	    &mlist, nsegs, plaflag);
501 	if (error)
502 		return (error);
503 
504 	/*
505 	 * Compute the location, size, and number of segments actually
506 	 * returned by the VM code.
507 	 */
508 	m = TAILQ_FIRST(&mlist);
509 	curseg = 0;
510 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
511 	segs[curseg].ds_len = PAGE_SIZE;
512 	m = TAILQ_NEXT(m, pageq);
513 
514 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
515 		curaddr = VM_PAGE_TO_PHYS(m);
516 #ifdef DIAGNOSTIC
517 		if (curaddr < low || curaddr >= high) {
518 			printf("uvm_pglistalloc returned non-sensical"
519 			    " address 0x%lx\n", curaddr);
520 			panic("_bus_dmamem_alloc");
521 		}
522 #endif
523 		if (curaddr == (lastaddr + PAGE_SIZE))
524 			segs[curseg].ds_len += PAGE_SIZE;
525 		else {
526 			curseg++;
527 			segs[curseg].ds_addr = curaddr;
528 			segs[curseg].ds_len = PAGE_SIZE;
529 		}
530 		lastaddr = curaddr;
531 	}
532 
533 	*rsegs = curseg + 1;
534 
535 	return (0);
536 }
537 
538 /*
539  * Common function for freeing DMA-safe memory.  May be called by
540  * bus-specific DMA memory free functions.
541  */
542 void
543 _bus_dmamem_free(t, segs, nsegs)
544 	bus_dma_tag_t t;
545 	bus_dma_segment_t *segs;
546 	int nsegs;
547 {
548 	struct vm_page *m;
549 	bus_addr_t addr;
550 	struct pglist mlist;
551 	int curseg;
552 
553 	/*
554 	 * Build a list of pages to free back to the VM system.
555 	 */
556 	TAILQ_INIT(&mlist);
557 	for (curseg = 0; curseg < nsegs; curseg++) {
558 		for (addr = segs[curseg].ds_addr;
559 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
560 		    addr += PAGE_SIZE) {
561 			m = PHYS_TO_VM_PAGE(addr);
562 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
563 		}
564 	}
565 
566 	uvm_pglistfree(&mlist);
567 }
568 
569 /*
570  * Common function for mapping DMA-safe memory.  May be called by
571  * bus-specific DMA memory map functions.
572  */
573 int
574 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
575 	bus_dma_tag_t t;
576 	bus_dma_segment_t *segs;
577 	int nsegs;
578 	size_t size;
579 	caddr_t *kvap;
580 	int flags;
581 {
582 	vaddr_t va, sva;
583 	size_t ssize;
584 	bus_addr_t addr;
585 	int curseg, error;
586 	const struct kmem_dyn_mode *kd;
587 
588 	/*
589 	 * If we're only mapping 1 segment, use K0SEG, to avoid
590 	 * TLB thrashing.
591 	 */
592 	if (nsegs == 1) {
593 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
594 		return (0);
595 	}
596 
597 	size = round_page(size);
598 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
599 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
600 	if (va == 0)
601 		return (ENOMEM);
602 
603 	*kvap = (caddr_t)va;
604 
605 	sva = va;
606 	ssize = size;
607 	for (curseg = 0; curseg < nsegs; curseg++) {
608 		for (addr = segs[curseg].ds_addr;
609 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
610 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
611 			if (size == 0)
612 				panic("_bus_dmamem_map: size botch");
613 			error = pmap_enter(pmap_kernel(), va, addr,
614 			    PROT_READ | PROT_WRITE,
615 			    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
616 			if (error) {
617 				pmap_update(pmap_kernel());
618 				km_free((void *)sva, ssize, &kv_any, &kp_none);
619 				return (error);
620 			}
621 		}
622 	}
623 	pmap_update(pmap_kernel());
624 
625 	return (0);
626 }
627 
628 /*
629  * Common function for unmapping DMA-safe memory.  May be called by
630  * bus-specific DMA memory unmapping functions.
631  */
632 void
633 _bus_dmamem_unmap(t, kva, size)
634 	bus_dma_tag_t t;
635 	caddr_t kva;
636 	size_t size;
637 {
638 
639 #ifdef DIAGNOSTIC
640 	if ((u_long)kva & PGOFSET)
641 		panic("_bus_dmamem_unmap");
642 #endif
643 
644 	/*
645 	 * Nothing to do if we mapped it with K0SEG.
646 	 */
647 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
648 	    kva <= (caddr_t)ALPHA_K0SEG_END)
649 		return;
650 
651 	km_free(kva, round_page(size), &kv_any, &kp_none);
652 }
653 
654 /*
655  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
656  * bus-specific DMA mmap(2)'ing functions.
657  */
658 paddr_t
659 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
660 	bus_dma_tag_t t;
661 	bus_dma_segment_t *segs;
662 	int nsegs;
663 	off_t off;
664 	int prot, flags;
665 {
666 	int i;
667 
668 	for (i = 0; i < nsegs; i++) {
669 #ifdef DIAGNOSTIC
670 		if (off & PGOFSET)
671 			panic("_bus_dmamem_mmap: offset unaligned");
672 		if (segs[i].ds_addr & PGOFSET)
673 			panic("_bus_dmamem_mmap: segment unaligned");
674 		if (segs[i].ds_len & PGOFSET)
675 			panic("_bus_dmamem_mmap: segment size not multiple"
676 			    " of page size");
677 #endif
678 		if (off >= segs[i].ds_len) {
679 			off -= segs[i].ds_len;
680 			continue;
681 		}
682 
683 		return (segs[i].ds_addr + off);
684 	}
685 
686 	/* Page not found. */
687 	return (-1);
688 }
689