xref: /netbsd-src/sys/arch/x68k/x68k/bus.c (revision fad4c9f71477ae11cea2ee75ec82151ac770a534)
1 /*	$NetBSD: bus.c,v 1.29 2005/12/11 12:19:45 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * bus_space(9) and bus_dma(9) implementation for NetBSD/x68k.
41  * These are default implementations; some buses may use their own.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.29 2005/12/11 12:19:45 christos Exp $");
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/kernel.h>
52 #include <sys/conf.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 #include <m68k/cacheops.h>
59 #include <machine/bus.h>
60 
61 #if defined(M68040) || defined(M68060)
62 static inline void dmasync_flush(bus_addr_t, bus_size_t);
63 static inline void dmasync_inval(bus_addr_t, bus_size_t);
64 #endif
65 
66 int
67 x68k_bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
68     bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
69     bus_addr_t *bpap, bus_space_handle_t *bshp)
70 {
71 	return (EINVAL);
72 }
73 
74 void
75 x68k_bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
76 {
77 	panic("bus_space_free: shouldn't be here");
78 }
79 
80 
81 extern paddr_t avail_end;
82 
83 /*
84  * Common function for DMA map creation.  May be called by bus-specific
85  * DMA map creation functions.
86  */
87 int
88 x68k_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
89     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
90 {
91 	struct x68k_bus_dmamap *map;
92 	void *mapstore;
93 	size_t mapsize;
94 
95 	/*
96 	 * Allocate and initialize the DMA map.  The end of the map
97 	 * is a variable-sized array of segments, so we allocate enough
98 	 * room for them in one shot.
99 	 *
100 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
101 	 * of ALLOCNOW notifies others that we've reserved these resources,
102 	 * and they are not to be freed.
103 	 *
104 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
105 	 * the (nsegments - 1).
106 	 */
107 	mapsize = sizeof(struct x68k_bus_dmamap) +
108 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
109 	if ((mapstore = malloc(mapsize, M_DMAMAP,
110 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
111 		return (ENOMEM);
112 
113 	memset(mapstore, 0, mapsize);
114 	map = (struct x68k_bus_dmamap *)mapstore;
115 	map->x68k_dm_size = size;
116 	map->x68k_dm_segcnt = nsegments;
117 	map->x68k_dm_maxmaxsegsz = maxsegsz;
118 	map->x68k_dm_boundary = boundary;
119 	map->x68k_dm_bounce_thresh = t->_bounce_thresh;
120 	map->x68k_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
121 	map->dm_maxsegsz = maxsegsz;
122 	map->dm_mapsize = 0;		/* no valid mappings */
123 	map->dm_nsegs = 0;
124 
125 	*dmamp = map;
126 	return (0);
127 }
128 
129 /*
130  * Common function for DMA map destruction.  May be called by bus-specific
131  * DMA map destruction functions.
132  */
133 void
134 x68k_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
135 {
136 
137 	free(map, M_DMAMAP);
138 }
139 
140 /*
141  * Common function for loading a DMA map with a linear buffer.  May
142  * be called by bus-specific DMA map load functions.
143  */
144 int
145 x68k_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
146     bus_size_t buflen, struct proc *p, int flags)
147 {
148 	paddr_t lastaddr;
149 	int seg, error;
150 
151 	/*
152 	 * Make sure that on error condition we return "no valid mappings".
153 	 */
154 	map->dm_mapsize = 0;
155 	map->dm_nsegs = 0;
156 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
157 
158 	if (buflen > map->x68k_dm_size)
159 		return (EINVAL);
160 
161 	seg = 0;
162 	error = x68k_bus_dmamap_load_buffer(map, buf, buflen, p, flags,
163 	    &lastaddr, &seg, 1);
164 	if (error == 0) {
165 		map->dm_mapsize = buflen;
166 		map->dm_nsegs = seg + 1;
167 	}
168 	return (error);
169 }
170 
171 /*
172  * Like x68k_bus_dmamap_load(), but for mbufs.
173  */
174 int
175 x68k_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
176     int flags)
177 {
178 	paddr_t lastaddr;
179 	int seg, error, first;
180 	struct mbuf *m;
181 
182 	/*
183 	 * Make sure that on error condition we return "no valid mappings."
184 	 */
185 	map->dm_mapsize = 0;
186 	map->dm_nsegs = 0;
187 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
188 
189 #ifdef DIAGNOSTIC
190 	if ((m0->m_flags & M_PKTHDR) == 0)
191 		panic("x68k_bus_dmamap_load_mbuf: no packet header");
192 #endif
193 
194 	if (m0->m_pkthdr.len > map->x68k_dm_size)
195 		return (EINVAL);
196 
197 	first = 1;
198 	seg = 0;
199 	error = 0;
200 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
201 		if (m->m_len == 0)
202 			continue;
203 		error = x68k_bus_dmamap_load_buffer(map, m->m_data, m->m_len,
204 		    NULL, flags, &lastaddr, &seg, first);
205 		first = 0;
206 	}
207 	if (error == 0) {
208 		map->dm_mapsize = m0->m_pkthdr.len;
209 		map->dm_nsegs = seg + 1;
210 	}
211 	return (error);
212 }
213 
214 /*
215  * Like x68k_bus_dmamap_load(), but for uios.
216  */
217 int
218 x68k_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
219     int flags)
220 {
221 #if 0
222 	paddr_t lastaddr;
223 	int seg, i, error, first;
224 	bus_size_t minlen, resid;
225 	struct proc *p = NULL;
226 	struct iovec *iov;
227 	caddr_t addr;
228 
229 	/*
230 	 * Make sure that on error condition we return "no valid mappings."
231 	 */
232 	map->dm_mapsize = 0;
233 	map->dm_nsegs = 0;
234 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
235 
236 	resid = uio->uio_resid;
237 	iov = uio->uio_iov;
238 
239 	if (uio->uio_segflg == UIO_USERSPACE) {
240 		p = uio->uio_lwp ? uio->uio_lwp->l_proc : NULL;
241 #ifdef DIAGNOSTIC
242 		if (p == NULL)
243 			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
244 #endif
245 	}
246 
247 	first = 1;
248 	seg = 0;
249 	error = 0;
250 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
251 		/*
252 		 * Now at the first iovec to load.  Load each iovec
253 		 * until we have exhausted the residual count.
254 		 */
255 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
256 		addr = (caddr_t)iov[i].iov_base;
257 
258 		error = x68k_bus_dmamap_load_buffer(map, addr, minlen,
259 		    p, flags, &lastaddr, &seg, first);
260 		first = 0;
261 
262 		resid -= minlen;
263 	}
264 	if (error == 0) {
265 		map->dm_mapsize = uio->uio_resid;
266 		map->dm_nsegs = seg + 1;
267 	}
268 	return (error);
269 #else
270 	panic ("x68k_bus_dmamap_load_uio: not implemented");
271 #endif
272 }
273 
274 /*
275  * Like x68k_bus_dmamap_load(), but for raw memory allocated with
276  * bus_dmamem_alloc().
277  */
278 int
279 x68k_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
280     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
281 {
282 
283 	panic("x68k_bus_dmamap_load_raw: not implemented");
284 }
285 
286 /*
287  * Common function for unloading a DMA map.  May be called by
288  * bus-specific DMA map unload functions.
289  */
290 void
291 x68k_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
292 {
293 
294 	/*
295 	 * No resources to free; just mark the mappings as
296 	 * invalid.
297 	 */
298 	map->dm_maxsegsz = map->x68k_dm_maxmaxsegsz;
299 	map->dm_mapsize = 0;
300 	map->dm_nsegs = 0;
301 }
302 
303 #if defined(M68040) || defined(M68060)
304 static inline void
305 dmasync_flush(bus_addr_t addr, bus_size_t len)
306 {
307 	bus_addr_t end = addr+len;
308 
309 	if (len <= 1024) {
310 		addr = addr & ~0xF;
311 
312 		do {
313 			DCFL(addr);
314 			addr += 16;
315 		} while (addr < end);
316 	} else {
317 		addr = m68k_trunc_page(addr);
318 
319 		do {
320 			DCFP(addr);
321 			addr += PAGE_SIZE;
322 		} while (addr < end);
323 	}
324 }
325 
326 static inline void
327 dmasync_inval(bus_addr_t addr, bus_size_t len)
328 {
329 	bus_addr_t end = addr+len;
330 
331 	if (len <= 1024) {
332 		addr = addr & ~0xF;
333 
334 		do {
335 			DCFL(addr);
336 			ICPL(addr);
337 			addr += 16;
338 		} while (addr < end);
339 	} else {
340 		addr = m68k_trunc_page(addr);
341 
342 		do {
343 			DCPL(addr);
344 			ICPP(addr);
345 			addr += PAGE_SIZE;
346 		} while (addr < end);
347 	}
348 }
349 #endif
350 
351 /*
352  * Common function for DMA map synchronization.  May be called
353  * by bus-specific DMA map synchronization functions.
354  */
355 void
356 x68k_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
357     bus_size_t len, int ops)
358 {
359 #if defined(M68040) || defined(M68060)
360 	bus_dma_segment_t *ds = map->dm_segs;
361 	bus_addr_t seg;
362 	int i;
363 
364 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE)) == 0)
365 		return;
366 #if defined(M68020) || defined(M68030)
367 	if (mmutype != MMU_68040) {
368 		if ((ops & BUS_DMASYNC_POSTWRITE) == 0)
369 			return;	/* no copyback cache */
370 		ICIA();		/* no per-page/per-line control */
371 		DCIA();
372 		return;
373 	}
374 #endif
375 	if (offset >= map->dm_mapsize)
376 		return;	/* driver bug; warn it? */
377 	if (offset+len > map->dm_mapsize)
378 		len = map->dm_mapsize; /* driver bug; warn it? */
379 
380 	i = 0;
381 	while (ds[i].ds_len <= offset) {
382 		offset -= ds[i++].ds_len;
383 		continue;
384 	}
385 	while (len > 0) {
386 		seg = ds[i].ds_len - offset;
387 		if (seg > len)
388 			seg = len;
389 		if (mmutype == MMU_68040 && (ops & BUS_DMASYNC_PREWRITE))
390 			dmasync_flush(ds[i].ds_addr+offset, seg);
391 		if (ops & BUS_DMASYNC_POSTREAD)
392 			dmasync_inval(ds[i].ds_addr+offset, seg);
393 		offset = 0;
394 		len -= seg;
395 		i++;
396 	}
397 #else  /* no 040/060 */
398 	if ((ops & BUS_DMASYNC_POSTWRITE)) {
399 		ICIA();		/* no per-page/per-line control */
400 		DCIA();
401 	}
402 #endif
403 }
404 
405 /*
406  * Common function for DMA-safe memory allocation.  May be called
407  * by bus-specific DMA memory allocation functions.
408  */
409 int
410 x68k_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
411     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
412     int flags)
413 {
414 
415 	return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary,
416 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
417 }
418 
419 /*
420  * Common function for freeing DMA-safe memory.  May be called by
421  * bus-specific DMA memory free functions.
422  */
423 void
424 x68k_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
425 {
426 	struct vm_page *m;
427 	bus_addr_t addr;
428 	struct pglist mlist;
429 	int curseg;
430 
431 	/*
432 	 * Build a list of pages to free back to the VM system.
433 	 */
434 	TAILQ_INIT(&mlist);
435 	for (curseg = 0; curseg < nsegs; curseg++) {
436 		for (addr = segs[curseg].ds_addr;
437 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
438 		    addr += PAGE_SIZE) {
439 			m = PHYS_TO_VM_PAGE(addr);
440 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
441 		}
442 	}
443 
444 	uvm_pglistfree(&mlist);
445 }
446 
447 /*
448  * Common function for mapping DMA-safe memory.  May be called by
449  * bus-specific DMA memory map functions.
450  */
451 int
452 x68k_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
453     size_t size, caddr_t *kvap, int flags)
454 {
455 	vaddr_t va;
456 	bus_addr_t addr;
457 	int curseg;
458 	const uvm_flag_t kmflags =
459 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
460 
461 	size = round_page(size);
462 
463 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
464 
465 	if (va == 0)
466 		return (ENOMEM);
467 
468 	*kvap = (caddr_t)va;
469 
470 	for (curseg = 0; curseg < nsegs; curseg++) {
471 		for (addr = segs[curseg].ds_addr;
472 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
473 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
474 			if (size == 0)
475 				panic("x68k_bus_dmamem_map: size botch");
476 			pmap_enter(pmap_kernel(), va, addr,
477 			    VM_PROT_READ | VM_PROT_WRITE,
478 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
479 		}
480 	}
481 	pmap_update(pmap_kernel());
482 
483 	return (0);
484 }
485 
486 /*
487  * Common function for unmapping DMA-safe memory.  May be called by
488  * bus-specific DMA memory unmapping functions.
489  */
490 void
491 x68k_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
492 {
493 #ifdef DIAGNOSTIC
494 	if (m68k_page_offset(kva))
495 		panic("x68k_bus_dmamem_unmap");
496 #endif
497 
498 	size = round_page(size);
499 
500 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
501 	pmap_update(pmap_kernel());
502 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
503 }
504 
505 /*
506  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
507  * bus-specific DMA mmap(2)'ing functions.
508  */
509 paddr_t
510 x68k_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
511     off_t off, int prot, int flags)
512 {
513 	int i;
514 
515 	for (i = 0; i < nsegs; i++) {
516 #ifdef DIAGNOSTIC
517 		if (m68k_page_offset(off))
518 			panic("x68k_bus_dmamem_mmap: offset unaligned");
519 		if (m68k_page_offset(segs[i].ds_addr))
520 			panic("x68k_bus_dmamem_mmap: segment unaligned");
521 		if (m68k_page_offset(segs[i].ds_len))
522 			panic("x68k_bus_dmamem_mmap: segment size not multiple"
523 			    " of page size");
524 #endif
525 		if (off >= segs[i].ds_len) {
526 			off -= segs[i].ds_len;
527 			continue;
528 		}
529 
530 		return (m68k_btop((caddr_t)segs[i].ds_addr + off));
531 	}
532 
533 	/* Page not found. */
534 	return (-1);
535 }
536 
537 
538 /**********************************************************************
539  * DMA utility functions
540  **********************************************************************/
541 
542 /*
543  * Utility function to load a linear buffer.  lastaddrp holds state
544  * between invocations (for multiple-buffer loads).  segp contains
545  * the starting segment on entrace, and the ending segment on exit.
546  * first indicates if this is the first invocation of this function.
547  */
548 int
549 x68k_bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
550      struct proc *p, int flags, paddr_t *lastaddrp, int *segp, int first)
551 {
552 	bus_size_t sgsize;
553 	bus_addr_t curaddr, lastaddr, baddr, bmask;
554 	vaddr_t vaddr = (vaddr_t)buf;
555 	int seg;
556 	pmap_t pmap;
557 
558 	if (p != NULL)
559 		pmap = p->p_vmspace->vm_map.pmap;
560 	else
561 		pmap = pmap_kernel();
562 
563 	lastaddr = *lastaddrp;
564 	bmask  = ~(map->x68k_dm_boundary - 1);
565 
566 	for (seg = *segp; buflen > 0 ; ) {
567 		/*
568 		 * Get the physical address for this segment.
569 		 */
570 		(void) pmap_extract(pmap, vaddr, &curaddr);
571 
572 		/*
573 		 * If we're beyond the bounce threshold, notify
574 		 * the caller.
575 		 */
576 		if (map->x68k_dm_bounce_thresh != 0 &&
577 		    curaddr >= map->x68k_dm_bounce_thresh)
578 			return (EINVAL);
579 
580 		/*
581 		 * Compute the segment size, and adjust counts.
582 		 */
583 		sgsize = PAGE_SIZE - m68k_page_offset(vaddr);
584 		if (buflen < sgsize)
585 			sgsize = buflen;
586 
587 		/*
588 		 * Make sure we don't cross any boundaries.
589 		 */
590 		if (map->x68k_dm_boundary > 0) {
591 			baddr = (curaddr + map->x68k_dm_boundary) & bmask;
592 			if (sgsize > (baddr - curaddr))
593 				sgsize = (baddr - curaddr);
594 		}
595 
596 		/*
597 		 * Insert chunk into a segment, coalescing with
598 		 * previous segment if possible.
599 		 */
600 		if (first) {
601 			map->dm_segs[seg].ds_addr = curaddr;
602 			map->dm_segs[seg].ds_len = sgsize;
603 			first = 0;
604 		} else {
605 			if (curaddr == lastaddr &&
606 			    (map->dm_segs[seg].ds_len + sgsize) <=
607 			     map->dm_maxsegsz &&
608 			    (map->x68k_dm_boundary == 0 ||
609 			     (map->dm_segs[seg].ds_addr & bmask) ==
610 			     (curaddr & bmask)))
611 				map->dm_segs[seg].ds_len += sgsize;
612 			else {
613 				if (++seg >= map->x68k_dm_segcnt)
614 					break;
615 				map->dm_segs[seg].ds_addr = curaddr;
616 				map->dm_segs[seg].ds_len = sgsize;
617 			}
618 		}
619 
620 		lastaddr = curaddr + sgsize;
621 		vaddr += sgsize;
622 		buflen -= sgsize;
623 	}
624 
625 	*segp = seg;
626 	*lastaddrp = lastaddr;
627 
628 	/*
629 	 * Did we fit?
630 	 */
631 	if (buflen != 0)
632 		return (EFBIG);		/* XXX better return value here? */
633 	return (0);
634 }
635 
636 /*
637  * Allocate physical memory from the given physical address range.
638  * Called by DMA-safe memory allocation methods.
639  */
640 int
641 x68k_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
642     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
643     int nsegs, int *rsegs, int flags, paddr_t low, paddr_t high)
644 {
645 	paddr_t curaddr, lastaddr;
646 	struct vm_page *m;
647 	struct pglist mlist;
648 	int curseg, error;
649 
650 	/* Always round the size. */
651 	size = round_page(size);
652 
653 	/*
654 	 * Allocate pages from the VM system.
655 	 */
656 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
657 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
658 	if (error)
659 		return (error);
660 
661 	/*
662 	 * Compute the location, size, and number of segments actually
663 	 * returned by the VM code.
664 	 */
665 	m = mlist.tqh_first;
666 	curseg = 0;
667 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
668 	segs[curseg].ds_len = PAGE_SIZE;
669 	m = m->pageq.tqe_next;
670 
671 	for (; m != NULL; m = m->pageq.tqe_next) {
672 		curaddr = VM_PAGE_TO_PHYS(m);
673 #ifdef DIAGNOSTIC
674 		if (curaddr < low || curaddr >= high) {
675 			printf("uvm_pglistalloc returned non-sensical"
676 			    " address 0x%lx\n", curaddr);
677 			panic("x68k_bus_dmamem_alloc_range");
678 		}
679 #endif
680 		if (curaddr == (lastaddr + PAGE_SIZE))
681 			segs[curseg].ds_len += PAGE_SIZE;
682 		else {
683 			curseg++;
684 			segs[curseg].ds_addr = curaddr;
685 			segs[curseg].ds_len = PAGE_SIZE;
686 		}
687 		lastaddr = curaddr;
688 	}
689 
690 	*rsegs = curseg + 1;
691 
692 	return (0);
693 }
694