1 /* $OpenBSD: bus_dma.c,v 1.5 2018/01/11 15:49:34 visa Exp $ */
2 /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
3
4 /*-
5 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/param.h>
35 #include <sys/proc.h>
36 #include <sys/extent.h>
37 #include <sys/buf.h>
38 #include <sys/device.h>
39 #include <sys/systm.h>
40 #include <sys/conf.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mount.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/bus.h>
48
49 int _dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
50 struct proc *, int, bus_addr_t *, int *, int);
51 /*
52 * Common function for DMA map creation. May be called by bus-specific
53 * DMA map creation functions.
54 */
55 int
_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)56 _dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
57 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
58 {
59 struct powerpc_bus_dmamap *map;
60 void *mapstore;
61 size_t mapsize;
62
63 /*
64 * Allocate and initialize the DMA map. The end of the map
65 * is a variable-sized array of segments, so we allocate enough
66 * room for them in one shot.
67 *
68 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
69 * of ALLOCNOW notifies others that we've reserved these resources,
70 * and they are not to be freed.
71 *
72 * The bus_dmamap_t includes one bus_dma_segment_t, hence
73 * the (nsegments - 1).
74 */
75 mapsize = sizeof(struct powerpc_bus_dmamap) +
76 (sizeof(bus_dma_segment_t) * (nsegments - 1));
77 if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
78 (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
79 return (ENOMEM);
80
81 map = (struct powerpc_bus_dmamap *)mapstore;
82 map->_dm_size = size;
83 map->_dm_segcnt = nsegments;
84 map->_dm_maxsegsz = maxsegsz;
85 map->_dm_boundary = boundary;
86 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
87 map->dm_nsegs = 0; /* no valid mappings */
88 map->dm_mapsize = 0;
89
90 *dmamp = map;
91 return (0);
92 }
93
94 /*
95 * Common function for DMA map destruction. May be called by bus-specific
96 * DMA map destruction functions.
97 */
98 void
_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)99 _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
100 {
101 size_t mapsize;
102
103 mapsize = sizeof(struct powerpc_bus_dmamap) +
104 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
105 free(map, M_DEVBUF, mapsize);
106 }
107
108
109 int
_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,bus_addr_t * lastaddrp,int * segp,int first)110 _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
111 bus_size_t buflen, struct proc *p, int flags, bus_addr_t *lastaddrp,
112 int *segp, int first)
113 {
114 bus_size_t sgsize;
115 bus_addr_t curaddr, lastaddr, baddr, bmask;
116 vaddr_t vaddr = (vaddr_t)buf;
117 pmap_t pmap;
118 int seg;
119
120 lastaddr = *lastaddrp;
121 bmask = ~(map->_dm_boundary - 1);
122
123 if (p != NULL)
124 pmap = p->p_vmspace->vm_map.pmap;
125 else
126 pmap = pmap_kernel();
127
128 for (seg = *segp; buflen > 0; ) {
129 /*
130 * Get the physical address for this segment.
131 */
132 if (pmap_extract(pmap, vaddr, (paddr_t *)&curaddr) != TRUE) {
133 panic("dmamap_load_buffer pmap %p vaddr %lx "
134 "pmap_extract failed", pmap, vaddr);
135 }
136
137 /*
138 * Compute the segment size, and adjust counts.
139 */
140 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
141 if (buflen < sgsize)
142 sgsize = buflen;
143
144 /*
145 * Make sure we don't cross any boundaries.
146 */
147 if (map->_dm_boundary > 0) {
148 baddr = (curaddr + map->_dm_boundary) & bmask;
149 if (sgsize > (baddr - curaddr))
150 sgsize = (baddr - curaddr);
151 }
152
153 /*
154 * Insert chunk into a segment, coalescing with the
155 * previous segment if possible.
156 */
157 if (first) {
158 map->dm_segs[seg].ds_addr = curaddr;
159 map->dm_segs[seg].ds_len = sgsize;
160 first = 0;
161 } else {
162 if (curaddr == lastaddr &&
163 (map->dm_segs[seg].ds_len + sgsize) <=
164 map->_dm_maxsegsz &&
165 (map->_dm_boundary == 0 ||
166 (map->dm_segs[seg].ds_addr & bmask) ==
167 (curaddr & bmask)))
168 map->dm_segs[seg].ds_len += sgsize;
169 else {
170 if (++seg >= map->_dm_segcnt)
171 break;
172 map->dm_segs[seg].ds_addr = curaddr;
173 map->dm_segs[seg].ds_len = sgsize;
174 }
175 }
176
177 lastaddr = curaddr + sgsize;
178 vaddr += sgsize;
179 buflen -= sgsize;
180 }
181
182 *segp = seg;
183 *lastaddrp = lastaddr;
184
185 /*
186 * Did we fit?
187 */
188 if (buflen != 0)
189 return (EFBIG); /* XX better return value here? */
190
191 return (0);
192 }
193
194 /*
195 * Common function for loading a DMA map with a linear buffer. May
196 * be called by bus-specific DMA map load functions.
197 */
198 int
_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)199 _dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
200 struct proc *p, int flags)
201 {
202 bus_addr_t lastaddr;
203 int seg, error;
204
205 /*
206 * Make sure that on error condition we return "no valid mappings".
207 */
208 map->dm_mapsize = 0;
209 map->dm_nsegs = 0;
210
211 if (buflen > map->_dm_size)
212 return (EINVAL);
213
214 seg = 0;
215 error = _dmamap_load_buffer(t, map, buf, buflen, p, flags,
216 &lastaddr, &seg, 1);
217 if (error == 0) {
218 map->dm_mapsize = buflen;
219 map->dm_nsegs = seg + 1;
220 }
221 return (error);
222 }
223
224 /*
225 * Like _bus_dmamap_load(), but for mbufs.
226 */
227 int
_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)228 _dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
229 int flags)
230 {
231 bus_addr_t lastaddr;
232 int seg, error, first;
233 struct mbuf *m;
234
235 /*
236 * Make sure that on error condition we return "no valid mappings".
237 */
238 map->dm_mapsize = 0;
239 map->dm_nsegs = 0;
240
241 #ifdef DIAGNOSTIC
242 if ((m0->m_flags & M_PKTHDR) == 0)
243 panic("_bus_dmamap_load_mbuf: no packet header");
244 #endif
245
246 if (m0->m_pkthdr.len > map->_dm_size)
247 return (EINVAL);
248
249 first = 1;
250 seg = 0;
251 error = 0;
252 for (m = m0; m != NULL && error == 0; m = m->m_next) {
253 if (m->m_len == 0)
254 continue;
255 error = _dmamap_load_buffer(t, map, m->m_data, m->m_len,
256 NULL, flags, &lastaddr, &seg, first);
257 first = 0;
258 }
259 if (error == 0) {
260 map->dm_mapsize = m0->m_pkthdr.len;
261 map->dm_nsegs = seg + 1;
262 }
263 return (error);
264 }
265
266 /*
267 * Like _bus_dmamap_load(), but for uios.
268 */
269 int
_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)270 _dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
271 {
272 bus_addr_t lastaddr;
273 int seg, i, error, first;
274 bus_size_t minlen, resid;
275 struct proc *p = NULL;
276 struct iovec *iov;
277 caddr_t addr;
278
279 /*
280 * Make sure that on error condition we return "no valid mappings".
281 */
282 map->dm_mapsize = 0;
283 map->dm_nsegs = 0;
284
285 resid = uio->uio_resid;
286 iov = uio->uio_iov;
287
288 if (resid > map->_dm_size)
289 return (EINVAL);
290
291 if (uio->uio_segflg == UIO_USERSPACE) {
292 p = uio->uio_procp;
293 #ifdef DIAGNOSTIC
294 if (p == NULL)
295 panic("_bus_dmamap_load_uio: USERSPACE but no proc");
296 #endif
297 }
298
299 first = 1;
300 seg = 0;
301 error = 0;
302 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
303 /*
304 * Now at the first iovec to load. Load each iovec
305 * until we have exhausted the residual count.
306 */
307 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
308 addr = (caddr_t)iov[i].iov_base;
309
310 error = _dmamap_load_buffer(t, map, addr, minlen,
311 p, flags, &lastaddr, &seg, first);
312 first = 0;
313
314 resid -= minlen;
315 }
316 if (error == 0) {
317 map->dm_mapsize = uio->uio_resid;
318 map->dm_nsegs = seg + 1;
319 }
320 return (error);
321 }
322
323 /*
324 * Like _bus_dmamap_load(), but for raw memory allocated with
325 * bus_dmamem_alloc().
326 */
327 int
_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)328 _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
329 int nsegs, bus_size_t size, int flags)
330 {
331 bus_addr_t paddr, baddr, bmask, lastaddr = 0;
332 bus_size_t plen, sgsize, mapsize;
333 int first = 1;
334 int i, seg = 0;
335
336 /*
337 * Make sure that on error condition we return "no valid mappings".
338 */
339 map->dm_mapsize = 0;
340 map->dm_nsegs = 0;
341
342 if (nsegs > map->_dm_segcnt || size > map->_dm_size)
343 return (EINVAL);
344
345 mapsize = size;
346 bmask = ~(map->_dm_boundary - 1);
347
348 for (i = 0; i < nsegs && size > 0; i++) {
349 paddr = segs[i].ds_addr;
350 plen = MIN(segs[i].ds_len, size);
351
352 while (plen > 0) {
353 /*
354 * Compute the segment size, and adjust counts.
355 */
356 sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
357 if (plen < sgsize)
358 sgsize = plen;
359
360 if (paddr > dma_constraint.ucr_high)
361 panic("Non dma-reachable buffer at paddr %#lx(raw)",
362 paddr);
363
364 /*
365 * Make sure we don't cross any boundaries.
366 */
367 if (map->_dm_boundary > 0) {
368 baddr = (paddr + map->_dm_boundary) & bmask;
369 if (sgsize > (baddr - paddr))
370 sgsize = (baddr - paddr);
371 }
372
373 /*
374 * Insert chunk into a segment, coalescing with
375 * previous segment if possible.
376 */
377 if (first) {
378 map->dm_segs[seg].ds_addr = paddr;
379 map->dm_segs[seg].ds_len = sgsize;
380 first = 0;
381 } else {
382 if (paddr == lastaddr &&
383 (map->dm_segs[seg].ds_len + sgsize) <=
384 map->_dm_maxsegsz &&
385 (map->_dm_boundary == 0 ||
386 (map->dm_segs[seg].ds_addr & bmask) ==
387 (paddr & bmask)))
388 map->dm_segs[seg].ds_len += sgsize;
389 else {
390 if (++seg >= map->_dm_segcnt)
391 return (EINVAL);
392 map->dm_segs[seg].ds_addr = paddr;
393 map->dm_segs[seg].ds_len = sgsize;
394 }
395 }
396
397 paddr += sgsize;
398 plen -= sgsize;
399 size -= sgsize;
400
401 lastaddr = paddr;
402 }
403 }
404
405 map->dm_mapsize = mapsize;
406 map->dm_nsegs = seg + 1;
407 return (0);
408 }
409
410 /*
411 * Common function for unloading a DMA map. May be called by
412 * bus-specific DMA map unload functions.
413 */
414 void
_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)415 _dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
416 {
417
418 /*
419 * No resources to free; just mark the mappings as
420 * invalid.
421 */
422 map->dm_nsegs = 0;
423 map->dm_mapsize = 0;
424 }
425
426 /*
427 * Common function for DMA map synchronization. May be called
428 * by bus-specific DMA map synchronization functions.
429 */
430 void
_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int op)431 _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
432 bus_size_t len, int op)
433 {
434 int i;
435 bus_size_t minlen, wlen;
436 bus_addr_t pa, addr;
437 struct vm_page *pg;
438
439 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
440 /* Find the beginning segment. */
441 if (offset >= map->dm_segs[i].ds_len) {
442 offset -= map->dm_segs[i].ds_len;
443 continue;
444 }
445
446 minlen = len < map->dm_segs[i].ds_len - offset ?
447 len : map->dm_segs[i].ds_len - offset;
448
449 addr = map->dm_segs[i].ds_addr + offset;
450
451 switch (op) {
452 case BUS_DMASYNC_POSTWRITE:
453 for (pa = trunc_page(addr), wlen = 0;
454 pa < round_page(addr + minlen);
455 pa += PAGE_SIZE) {
456 pg = PHYS_TO_VM_PAGE(pa);
457 if (pg != NULL)
458 atomic_clearbits_int(&pg->pg_flags,
459 PG_PMAP_EXE);
460 }
461 }
462
463 }
464 }
465
466 /*
467 * Common function for DMA-safe memory allocation. May be called
468 * by bus-specific DMA memory allocation functions.
469 */
470 int
_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)471 _dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
472 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
473 int flags)
474 {
475 return (_dmamem_alloc_range(t, size, alignment, boundary,
476 segs, nsegs, rsegs, flags, 0, -1));
477 }
478
479 /*
480 * Common function for freeing DMA-safe memory. May be called by
481 * bus-specific DMA memory free functions.
482 */
483 void
_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)484 _dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
485 {
486 struct vm_page *m;
487 bus_addr_t addr;
488 struct pglist mlist;
489 int curseg;
490
491 /*
492 * Build a list of pages to free back to the VM system.
493 */
494 TAILQ_INIT(&mlist);
495 for (curseg = 0; curseg < nsegs; curseg++) {
496 for (addr = segs[curseg].ds_addr;
497 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
498 addr += PAGE_SIZE) {
499 m = PHYS_TO_VM_PAGE(addr);
500 TAILQ_INSERT_TAIL(&mlist, m, pageq);
501 }
502 }
503
504 uvm_pglistfree(&mlist);
505 }
506
507 /*
508 * Common function for mapping DMA-safe memory. May be called by
509 * bus-specific DMA memory map functions.
510 */
511 int
_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)512 _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
513 caddr_t *kvap, int flags)
514 {
515 vaddr_t va, sva;
516 size_t ssize;
517 bus_addr_t addr;
518 int curseg, pmapflags = 0, error;
519 const struct kmem_dyn_mode *kd;
520
521 if (flags & BUS_DMA_NOCACHE)
522 pmapflags |= PMAP_NOCACHE;
523
524 size = round_page(size);
525 kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
526 va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
527 if (va == 0)
528 return (ENOMEM);
529
530 *kvap = (caddr_t)va;
531
532 sva = va;
533 ssize = size;
534 for (curseg = 0; curseg < nsegs; curseg++) {
535 for (addr = segs[curseg].ds_addr;
536 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
537 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
538 if (size == 0)
539 panic("_bus_dmamem_map: size botch");
540 error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
541 PROT_READ | PROT_WRITE,
542 PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
543 if (error) {
544 pmap_update(pmap_kernel());
545 km_free((void *)sva, ssize, &kv_any, &kp_none);
546 return (error);
547 }
548 }
549 }
550 pmap_update(pmap_kernel());
551
552 return (0);
553 }
554
555 /*
556 * Common function for unmapping DMA-safe memory. May be called by
557 * bus-specific DMA memory unmapping functions.
558 */
559 void
_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)560 _dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
561 {
562
563 #ifdef DIAGNOSTIC
564 if ((u_long)kva & PGOFSET)
565 panic("_bus_dmamem_unmap");
566 #endif
567
568 km_free(kva, round_page(size), &kv_any, &kp_none);
569 }
570
571 /*
572 * Common function for mmap(2)'ing DMA-safe memory. May be called by
573 * bus-specific DMA mmap(2)'ing functions.
574 */
575 paddr_t
_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)576 _dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
577 int prot, int flags)
578 {
579 int i, pmapflags = 0;
580
581 if (flags & BUS_DMA_NOCACHE)
582 pmapflags |= PMAP_NOCACHE;
583
584 for (i = 0; i < nsegs; i++) {
585 #ifdef DIAGNOSTIC
586 if (off & PGOFSET)
587 panic("_bus_dmamem_mmap: offset unaligned");
588 if (segs[i].ds_addr & PGOFSET)
589 panic("_bus_dmamem_mmap: segment unaligned");
590 if (segs[i].ds_len & PGOFSET)
591 panic("_bus_dmamem_mmap: segment size not multiple"
592 " of page size");
593 #endif
594 if (off >= segs[i].ds_len) {
595 off -= segs[i].ds_len;
596 continue;
597 }
598
599 return ((segs[i].ds_addr + off) | pmapflags);
600 }
601
602 /* Page not found. */
603 return (-1);
604 }
605
606 /**********************************************************************
607 * DMA utility functions
608 **********************************************************************/
609
610 /*
611 * Allocate physical memory from the given physical address range.
612 * Called by DMA-safe memory allocation methods.
613 */
614 int
_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,bus_addr_t low,bus_addr_t high)615 _dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
616 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
617 int flags, bus_addr_t low, bus_addr_t high)
618 {
619 vaddr_t curaddr, lastaddr;
620 struct vm_page *m;
621 struct pglist mlist;
622 int curseg, error, plaflag;
623
624 /* Always round the size. */
625 size = round_page(size);
626
627 /*
628 * Allocate pages from the VM system.
629 */
630 plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
631 if (flags & BUS_DMA_ZERO)
632 plaflag |= UVM_PLA_ZERO;
633
634 TAILQ_INIT(&mlist);
635 error = uvm_pglistalloc(size, low, high,
636 alignment, boundary, &mlist, nsegs, plaflag);
637 if (error)
638 return (error);
639
640 /*
641 * Compute the location, size, and number of segments actually
642 * returned by the VM code.
643 */
644 m = TAILQ_FIRST(&mlist);
645 curseg = 0;
646 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
647 segs[curseg].ds_len = PAGE_SIZE;
648 m = TAILQ_NEXT(m, pageq);
649
650 for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
651 curaddr = VM_PAGE_TO_PHYS(m);
652 #ifdef DIAGNOSTIC
653 if (curaddr < low || curaddr >= high) {
654 printf("vm_page_alloc_memory returned non-sensical"
655 " address 0x%lx\n", curaddr);
656 panic("dmamem_alloc_range");
657 }
658 #endif
659 if (curaddr == (lastaddr + PAGE_SIZE))
660 segs[curseg].ds_len += PAGE_SIZE;
661 else {
662 curseg++;
663 segs[curseg].ds_addr = curaddr;
664 segs[curseg].ds_len = PAGE_SIZE;
665 }
666 lastaddr = curaddr;
667 }
668
669 *rsegs = curseg + 1;
670
671 return (0);
672 }
673