xref: /openbsd-src/sys/arch/powerpc64/powerpc64/bus_dma.c (revision 913a28404bd1683a5b05e66cb62426f90bc29518)
1 /*	$OpenBSD: bus_dma.c,v 1.2 2020/06/17 17:54:05 kettenis Exp $ */
2 
3 /*
4  * Copyright (c) 2003-2004 Opsycon AB  (www.opsycon.se / www.opsycon.com)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 /*-
29  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
30  * All rights reserved.
31  *
32  * This code is derived from software contributed to The NetBSD Foundation
33  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
34  * NASA Ames Research Center.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
46  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
47  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
49  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
50  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
51  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
52  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
53  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
54  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
55  * POSSIBILITY OF SUCH DAMAGE.
56  */
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 
64 #include <uvm/uvm_extern.h>
65 
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
68 #include <machine/cpufunc.h>
69 
70 /*
71  * Common function for DMA map creation.  May be called by bus-specific
72  * DMA map creation functions.
73  */
74 int
_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)75 _dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
76     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
77 {
78 	struct machine_bus_dmamap *map;
79 	void *mapstore;
80 	size_t mapsize;
81 
82 	/*
83 	 * Allocate and initialize the DMA map.  The end of the map
84 	 * is a variable-sized array of segments, so we allocate enough
85 	 * room for them in one shot.
86 	 *
87 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
88 	 * of ALLOCNOW notifies others that we've reserved these resources,
89 	 * and they are not to be freed.
90 	 *
91 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
92 	 * the (nsegments - 1).
93 	 */
94 	mapsize = sizeof(struct machine_bus_dmamap) +
95 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
96 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
97 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
98 		return (ENOMEM);
99 
100 	map = (struct machine_bus_dmamap *)mapstore;
101 	map->_dm_size = size;
102 	map->_dm_segcnt = nsegments;
103 	map->_dm_maxsegsz = maxsegsz;
104 	map->_dm_boundary = boundary;
105 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
106 
107 	*dmamp = map;
108 	return (0);
109 }
110 
111 /*
112  * Common function for DMA map destruction.  May be called by bus-specific
113  * DMA map destruction functions.
114  */
115 void
_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)116 _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
117 {
118 	size_t mapsize;
119 
120 	mapsize = sizeof(struct machine_bus_dmamap) +
121 	    (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
122 	free(map, M_DEVBUF, mapsize);
123 }
124 
125 /*
126  * Common function for loading a DMA map with a linear buffer.  May
127  * be called by bus-specific DMA map load functions.
128  */
129 int
_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)130 _dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
131     struct proc *p, int flags)
132 {
133 	paddr_t lastaddr;
134 	int seg, error;
135 
136 	/*
137 	 * Make sure that on error condition we return "no valid mappings".
138 	 */
139 	map->dm_nsegs = 0;
140 	map->dm_mapsize = 0;
141 
142 	if (buflen > map->_dm_size)
143 		return (EINVAL);
144 
145 	seg = 0;
146 	error = (*t->_dmamap_load_buffer)(t, map, buf, buflen, p, flags,
147 	    &lastaddr, &seg, 1);
148 	if (error == 0) {
149 		map->dm_nsegs = seg + 1;
150 		map->dm_mapsize = buflen;
151 	}
152 
153 	return (error);
154 }
155 
156 /*
157  * Like _bus_dmamap_load(), but for mbufs.
158  */
159 int
_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)160 _dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
161 {
162 	paddr_t lastaddr;
163 	int seg, error, first;
164 	struct mbuf *m;
165 
166 	/*
167 	 * Make sure that on error condition we return "no valid mappings".
168 	 */
169 	map->dm_nsegs = 0;
170 	map->dm_mapsize = 0;
171 
172 #ifdef DIAGNOSTIC
173 	if ((m0->m_flags & M_PKTHDR) == 0)
174 		panic("_dmamap_load_mbuf: no packet header");
175 #endif
176 
177 	if (m0->m_pkthdr.len > map->_dm_size)
178 		return (EINVAL);
179 
180 	first = 1;
181 	seg = 0;
182 	error = 0;
183 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
184 		if (m->m_len == 0)
185 			continue;
186 		error = (*t->_dmamap_load_buffer)(t, map, m->m_data, m->m_len,
187 		    NULL, flags, &lastaddr, &seg, first);
188 		first = 0;
189 	}
190 	if (error == 0) {
191 		map->dm_nsegs = seg + 1;
192 		map->dm_mapsize = m0->m_pkthdr.len;
193 	}
194 
195 	return (error);
196 }
197 
198 /*
199  * Like _dmamap_load(), but for uios.
200  */
201 int
_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)202 _dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
203 {
204 	paddr_t lastaddr;
205 	int seg, i, error, first;
206 	bus_size_t minlen, resid;
207 	struct proc *p = NULL;
208 	struct iovec *iov;
209 	void *addr;
210 
211 	/*
212 	 * Make sure that on error condition we return "no valid mappings".
213 	 */
214 	map->dm_nsegs = 0;
215 	map->dm_mapsize = 0;
216 
217 	resid = uio->uio_resid;
218 	iov = uio->uio_iov;
219 
220 	if (uio->uio_segflg == UIO_USERSPACE) {
221 		p = uio->uio_procp;
222 #ifdef DIAGNOSTIC
223 		if (p == NULL)
224 			panic("_dmamap_load_uio: USERSPACE but no proc");
225 #endif
226 	}
227 
228 	first = 1;
229 	seg = 0;
230 	error = 0;
231 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
232 		/*
233 		 * Now at the first iovec to load.  Load each iovec
234 		 * until we have exhausted the residual count.
235 		 */
236 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
237 		addr = (void *)iov[i].iov_base;
238 
239 		error = (*t->_dmamap_load_buffer)(t, map, addr, minlen,
240 		    p, flags, &lastaddr, &seg, first);
241 		first = 0;
242 
243 		resid -= minlen;
244 	}
245 	if (error == 0) {
246 		map->dm_nsegs = seg + 1;
247 		map->dm_mapsize = uio->uio_resid;
248 	}
249 
250 	return (error);
251 }
252 
253 /*
254  * Like _dmamap_load(), but for raw memory allocated with
255  * bus_dmamem_alloc().
256  */
257 int
_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)258 _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
259     int nsegs, bus_size_t size, int flags)
260 {
261 	bus_addr_t paddr, baddr, bmask, lastaddr = 0;
262 	bus_size_t plen, sgsize, mapsize;
263 	vaddr_t vaddr;
264 	int first = 1;
265 	int i, seg = 0;
266 
267 	/*
268 	 * Make sure that on error condition we return "no valid mappings".
269 	 */
270 	map->dm_mapsize = 0;
271 	map->dm_nsegs = 0;
272 
273 	if (nsegs > map->_dm_segcnt || size > map->_dm_size)
274 		return (EINVAL);
275 
276 	mapsize = size;
277 	bmask = ~(map->_dm_boundary - 1);
278 
279 	for (i = 0; i < nsegs && size > 0; i++) {
280 		paddr = segs[i].ds_addr;
281 		vaddr = segs[i]._ds_vaddr;
282 		plen = MIN(segs[i].ds_len, size);
283 
284 		while (plen > 0) {
285 			/*
286 			 * Compute the segment size, and adjust counts.
287 			 */
288 			sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
289 			if (plen < sgsize)
290 				sgsize = plen;
291 
292 			/*
293 			 * Make sure we don't cross any boundaries.
294 			 */
295 			if (map->_dm_boundary > 0) {
296 				baddr = (paddr + map->_dm_boundary) & bmask;
297 				if (sgsize > (baddr - paddr))
298 					sgsize = (baddr - paddr);
299 			}
300 
301 			/*
302 			 * Insert chunk into a segment, coalescing with
303 			 * previous segment if possible.
304 			 */
305 			if (first) {
306 				map->dm_segs[seg].ds_addr = paddr;
307 				map->dm_segs[seg].ds_len = sgsize;
308 				map->dm_segs[seg]._ds_paddr = paddr;
309 				map->dm_segs[seg]._ds_vaddr = vaddr;
310 				first = 0;
311 			} else {
312 				if (paddr == lastaddr &&
313 				    (map->dm_segs[seg].ds_len + sgsize) <=
314 				     map->_dm_maxsegsz &&
315 				     (map->_dm_boundary == 0 ||
316 				     (map->dm_segs[seg].ds_addr & bmask) ==
317 				     (paddr & bmask)))
318 					map->dm_segs[seg].ds_len += sgsize;
319 				else {
320 					if (++seg >= map->_dm_segcnt)
321 						return (EINVAL);
322 					map->dm_segs[seg].ds_addr = paddr;
323 					map->dm_segs[seg].ds_len = sgsize;
324 					map->dm_segs[seg]._ds_paddr = paddr;
325 					map->dm_segs[seg]._ds_vaddr = vaddr;
326 				}
327 			}
328 
329 			paddr += sgsize;
330 			vaddr += sgsize;
331 			plen -= sgsize;
332 			size -= sgsize;
333 
334 			lastaddr = paddr;
335 		}
336 	}
337 
338 	map->dm_mapsize = mapsize;
339 	map->dm_nsegs = seg + 1;
340 	return (0);
341 }
342 
343 /*
344  * Common function for unloading a DMA map.  May be called by
345  * bus-specific DMA map unload functions.
346  */
347 void
_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)348 _dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
349 {
350 	/*
351 	 * No resources to free; just mark the mappings as
352 	 * invalid.
353 	 */
354 	map->dm_nsegs = 0;
355 	map->dm_mapsize = 0;
356 }
357 
358 /*
359  * Common function for DMA map synchronization.  May be called
360  * by bus-specific DMA map synchronization functions.
361  */
362 void
_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t addr,bus_size_t size,int op)363 _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
364     bus_size_t size, int op)
365 {
366 	membar_sync();
367 }
368 
369 /*
370  * Common function for DMA-safe memory allocation.  May be called
371  * by bus-specific DMA memory allocation functions.
372  */
373 int
_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)374 _dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
375     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
376     int flags)
377 {
378 	return _dmamem_alloc_range(t, size, alignment, boundary,
379 	    segs, nsegs, rsegs, flags, dma_constraint.ucr_low,
380 	    dma_constraint.ucr_high);
381 }
382 
383 /*
384  * Common function for freeing DMA-safe memory.  May be called by
385  * bus-specific DMA memory free functions.
386  */
387 void
_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)388 _dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
389 {
390 	vm_page_t m;
391 	bus_addr_t addr;
392 	struct pglist mlist;
393 	int curseg;
394 
395 	/*
396 	 * Build a list of pages to free back to the VM system.
397 	 */
398 	TAILQ_INIT(&mlist);
399 	for (curseg = 0; curseg < nsegs; curseg++) {
400 		for (addr = segs[curseg].ds_addr;
401 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
402 		    addr += PAGE_SIZE) {
403 			m = PHYS_TO_VM_PAGE(addr);
404 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
405 		}
406 	}
407 
408 	uvm_pglistfree(&mlist);
409 }
410 
411 /*
412  * Common function for mapping DMA-safe memory.  May be called by
413  * bus-specific DMA memory map functions.
414  */
415 int
_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)416 _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
417     caddr_t *kvap, int flags)
418 {
419 	vaddr_t va, sva;
420 	size_t ssize;
421 	bus_addr_t addr;
422 	int curseg, pmap_flags = 0;
423 	const struct kmem_dyn_mode *kd;
424 
425 	size = round_page(size);
426 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
427 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
428 	if (va == 0)
429 		return (ENOMEM);
430 
431 	*kvap = (caddr_t)va;
432 
433 	sva = va;
434 	ssize = size;
435 	if (flags & BUS_DMA_NOCACHE)
436 		pmap_flags |= PMAP_NOCACHE;
437 	for (curseg = 0; curseg < nsegs; curseg++) {
438 		segs[curseg]._ds_vaddr = va;
439 		for (addr = segs[curseg].ds_addr;
440 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
441 		    addr += NBPG, va += NBPG, size -= NBPG) {
442 			if (size == 0)
443 				panic("_dmamem_map: size botch");
444 			pmap_kenter_pa(va, addr,
445 			    PROT_READ | PROT_WRITE | pmap_flags);
446 		}
447 		pmap_update(pmap_kernel());
448 	}
449 
450 	return (0);
451 }
452 
453 /*
454  * Common function for unmapping DMA-safe memory.  May be called by
455  * bus-specific DMA memory unmapping functions.
456  */
457 void
_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)458 _dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
459 {
460 	pmap_kremove((vaddr_t)kva, size);
461 	km_free(kva, round_page(size), &kv_any, &kp_none);
462 }
463 
464 /*
465  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
466  * bus-specific DMA mmap(2)'ing functions.
467  */
468 paddr_t
_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)469 _dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
470     int prot, int flags)
471 {
472 	int i, pmapflags = 0;
473 
474 	if (flags & BUS_DMA_NOCACHE)
475 		pmapflags |= PMAP_NOCACHE;
476 
477 	for (i = 0; i < nsegs; i++) {
478 #ifdef DIAGNOSTIC
479 		if (off & PGOFSET)
480 			panic("_dmamem_mmap: offset unaligned");
481 		if (segs[i].ds_addr & PGOFSET)
482 			panic("_dmamem_mmap: segment unaligned");
483 		if (segs[i].ds_len & PGOFSET)
484 			panic("_dmamem_mmap: segment size not multiple"
485 			    " of page size");
486 #endif
487 		if (off >= segs[i].ds_len) {
488 			off -= segs[i].ds_len;
489 			continue;
490 		}
491 
492 		return ((segs[i].ds_addr + off) | pmapflags);
493 	}
494 
495 	/* Page not found. */
496 	return (-1);
497 }
498 
499 /**********************************************************************
500  * DMA utility functions
501  **********************************************************************/
502 
503 /*
504  * Utility function to load a linear buffer.  lastaddrp holds state
505  * between invocations (for multiple-buffer loads).  segp contains
506  * the starting segment on entrance, and the ending segment on exit.
507  * first indicates if this is the first invocation of this function.
508  */
509 int
_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,paddr_t * lastaddrp,int * segp,int first)510 _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
511     bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
512     int *segp, int first)
513 {
514 	bus_size_t sgsize;
515 	bus_addr_t lastaddr, baddr, bmask;
516 	paddr_t curaddr;
517 	vaddr_t vaddr = (vaddr_t)buf;
518 	int seg;
519 	pmap_t pmap;
520 
521 	if (p != NULL)
522 		pmap = p->p_vmspace->vm_map.pmap;
523 	else
524 		pmap = pmap_kernel();
525 
526 	lastaddr = *lastaddrp;
527 	bmask  = ~(map->_dm_boundary - 1);
528 	if (t->_dma_mask != 0)
529 		bmask &= t->_dma_mask;
530 
531 	for (seg = *segp; buflen > 0; ) {
532 		/*
533 		 * Get the physical address for this segment.
534 		 */
535 		if (pmap_extract(pmap, vaddr, &curaddr) == FALSE)
536 			panic("_dmapmap_load_buffer: pmap_extract(%p, %lx) failed!",
537 			    pmap, vaddr);
538 
539 		/*
540 		 * Compute the segment size, and adjust counts.
541 		 */
542 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
543 		if (buflen < sgsize)
544 			sgsize = buflen;
545 
546 		/*
547 		 * Make sure we don't cross any boundaries.
548 		 */
549 		if (map->_dm_boundary > 0) {
550 			baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
551 			    bmask;
552 			if (sgsize > (baddr - (bus_addr_t)curaddr))
553 				sgsize = (baddr - (bus_addr_t)curaddr);
554 		}
555 
556 		/*
557 		 * Insert chunk into a segment, coalescing with
558 		 * previous segment if possible.
559 		 */
560 		if (first) {
561 			map->dm_segs[seg].ds_addr = curaddr;
562 			map->dm_segs[seg].ds_len = sgsize;
563 			map->dm_segs[seg]._ds_paddr = curaddr;
564 			map->dm_segs[seg]._ds_vaddr = vaddr;
565 			first = 0;
566 		} else {
567 			if ((bus_addr_t)curaddr == lastaddr &&
568 			    (map->dm_segs[seg].ds_len + sgsize) <=
569 			     map->_dm_maxsegsz &&
570 			     (map->_dm_boundary == 0 ||
571 			     (map->dm_segs[seg].ds_addr & bmask) ==
572 			     ((bus_addr_t)curaddr & bmask)))
573 				map->dm_segs[seg].ds_len += sgsize;
574 			else {
575 				if (++seg >= map->_dm_segcnt)
576 					break;
577 				map->dm_segs[seg].ds_addr = curaddr;
578 				map->dm_segs[seg].ds_len = sgsize;
579 				map->dm_segs[seg]._ds_paddr = curaddr;
580 				map->dm_segs[seg]._ds_vaddr = vaddr;
581 			}
582 		}
583 
584 		lastaddr = (bus_addr_t)curaddr + sgsize;
585 		vaddr += sgsize;
586 		buflen -= sgsize;
587 	}
588 
589 	*segp = seg;
590 	*lastaddrp = lastaddr;
591 
592 	/*
593 	 * Did we fit?
594 	 */
595 	if (buflen != 0)
596 		return (EFBIG);		/* XXX better return value here? */
597 
598 	return (0);
599 }
600 
601 /*
602  * Allocate physical memory from the given physical address range.
603  * Called by DMA-safe memory allocation methods.
604  */
605 int
_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)606 _dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
607     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
608     int flags, paddr_t low, paddr_t high)
609 {
610 	paddr_t curaddr, lastaddr;
611 	vm_page_t m;
612 	struct pglist mlist;
613 	int curseg, error, plaflag;
614 
615 	/* Always round the size. */
616 	size = round_page(size);
617 
618 	/*
619 	 * Allocate pages from the VM system.
620 	 */
621 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
622 	if (flags & BUS_DMA_ZERO)
623 		plaflag |= UVM_PLA_ZERO;
624 
625 	TAILQ_INIT(&mlist);
626 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
627 	    &mlist, nsegs, plaflag);
628 	if (error)
629 		return (error);
630 
631 	/*
632 	 * Compute the location, size, and number of segments actually
633 	 * returned by the VM code.
634 	 */
635 	m = TAILQ_FIRST(&mlist);
636 	curseg = 0;
637 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
638 	segs[curseg].ds_len = PAGE_SIZE;
639 	m = TAILQ_NEXT(m, pageq);
640 
641 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
642 		curaddr = VM_PAGE_TO_PHYS(m);
643 #ifdef DIAGNOSTIC
644 		if (curaddr < low || curaddr >= high) {
645 			printf("vm_page_alloc_memory returned non-sensical"
646 			    " address 0x%lx\n", curaddr);
647 			panic("_dmamem_alloc_range");
648 		}
649 #endif
650 		if (curaddr == (lastaddr + PAGE_SIZE))
651 			segs[curseg].ds_len += PAGE_SIZE;
652 		else {
653 			curseg++;
654 			segs[curseg].ds_addr = curaddr;
655 			segs[curseg].ds_len = PAGE_SIZE;
656 		}
657 		lastaddr = curaddr;
658 	}
659 
660 	*rsegs = curseg + 1;
661 
662 	return (0);
663 }
664