1 /* $OpenBSD: bus_dma.c,v 1.17 2019/12/20 13:34:41 visa Exp $ */
2
3 /*
4 * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28 /*-
29 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
30 * All rights reserved.
31 *
32 * This code is derived from software contributed to The NetBSD Foundation
33 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
34 * NASA Ames Research Center.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
46 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
47 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
49 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
50 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
51 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
52 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
53 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
54 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
55 * POSSIBILITY OF SUCH DAMAGE.
56 */
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63
64 #include <uvm/uvm_extern.h>
65
66 #include <mips64/cache.h>
67 #include <machine/cpu.h>
68 #include <machine/autoconf.h>
69
70 #include <machine/bus.h>
71
72 /*
73 * Common function for DMA map creation. May be called by bus-specific
74 * DMA map creation functions.
75 */
76 int
_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)77 _dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
78 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
79 {
80 struct machine_bus_dmamap *map;
81 void *mapstore;
82 size_t mapsize;
83
84 /*
85 * Allocate and initialize the DMA map. The end of the map
86 * is a variable-sized array of segments, so we allocate enough
87 * room for them in one shot.
88 *
89 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
90 * of ALLOCNOW notifies others that we've reserved these resources,
91 * and they are not to be freed.
92 *
93 * The bus_dmamap_t includes one bus_dma_segment_t, hence
94 * the (nsegments - 1).
95 */
96 mapsize = sizeof(struct machine_bus_dmamap) +
97 (sizeof(bus_dma_segment_t) * (nsegments - 1));
98 if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
99 (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
100 return (ENOMEM);
101
102 map = (struct machine_bus_dmamap *)mapstore;
103 map->_dm_size = size;
104 map->_dm_segcnt = nsegments;
105 map->_dm_maxsegsz = maxsegsz;
106 map->_dm_boundary = boundary;
107 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
108
109 *dmamp = map;
110 return (0);
111 }
112
113 /*
114 * Common function for DMA map destruction. May be called by bus-specific
115 * DMA map destruction functions.
116 */
117 void
_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)118 _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
119 {
120 size_t mapsize;
121
122 mapsize = sizeof(struct machine_bus_dmamap) +
123 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
124 free(map, M_DEVBUF, mapsize);
125 }
126
127 /*
128 * Common function for loading a DMA map with a linear buffer. May
129 * be called by bus-specific DMA map load functions.
130 */
131 int
_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)132 _dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
133 struct proc *p, int flags)
134 {
135 paddr_t lastaddr;
136 int seg, error;
137
138 /*
139 * Make sure that on error condition we return "no valid mappings".
140 */
141 map->dm_nsegs = 0;
142 map->dm_mapsize = 0;
143
144 if (buflen > map->_dm_size)
145 return (EINVAL);
146
147 seg = 0;
148 error = (*t->_dmamap_load_buffer)(t, map, buf, buflen, p, flags,
149 &lastaddr, &seg, 1);
150 if (error == 0) {
151 map->dm_nsegs = seg + 1;
152 map->dm_mapsize = buflen;
153 }
154
155 return (error);
156 }
157
158 /*
159 * Like _bus_dmamap_load(), but for mbufs.
160 */
161 int
_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)162 _dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
163 {
164 paddr_t lastaddr;
165 int seg, error, first;
166 struct mbuf *m;
167
168 /*
169 * Make sure that on error condition we return "no valid mappings".
170 */
171 map->dm_nsegs = 0;
172 map->dm_mapsize = 0;
173
174 #ifdef DIAGNOSTIC
175 if ((m0->m_flags & M_PKTHDR) == 0)
176 panic("_dmamap_load_mbuf: no packet header");
177 #endif
178
179 if (m0->m_pkthdr.len > map->_dm_size)
180 return (EINVAL);
181
182 first = 1;
183 seg = 0;
184 error = 0;
185 for (m = m0; m != NULL && error == 0; m = m->m_next) {
186 if (m->m_len == 0)
187 continue;
188 error = (*t->_dmamap_load_buffer)(t, map, m->m_data, m->m_len,
189 NULL, flags, &lastaddr, &seg, first);
190 first = 0;
191 }
192 if (error == 0) {
193 map->dm_nsegs = seg + 1;
194 map->dm_mapsize = m0->m_pkthdr.len;
195 }
196
197 return (error);
198 }
199
200 /*
201 * Like _dmamap_load(), but for uios.
202 */
203 int
_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)204 _dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
205 {
206 paddr_t lastaddr;
207 int seg, i, error, first;
208 bus_size_t minlen, resid;
209 struct proc *p = NULL;
210 struct iovec *iov;
211 void *addr;
212
213 /*
214 * Make sure that on error condition we return "no valid mappings".
215 */
216 map->dm_nsegs = 0;
217 map->dm_mapsize = 0;
218
219 resid = uio->uio_resid;
220 iov = uio->uio_iov;
221
222 if (uio->uio_segflg == UIO_USERSPACE) {
223 p = uio->uio_procp;
224 #ifdef DIAGNOSTIC
225 if (p == NULL)
226 panic("_dmamap_load_uio: USERSPACE but no proc");
227 #endif
228 }
229
230 first = 1;
231 seg = 0;
232 error = 0;
233 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
234 /*
235 * Now at the first iovec to load. Load each iovec
236 * until we have exhausted the residual count.
237 */
238 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
239 addr = (void *)iov[i].iov_base;
240
241 error = (*t->_dmamap_load_buffer)(t, map, addr, minlen,
242 p, flags, &lastaddr, &seg, first);
243 first = 0;
244
245 resid -= minlen;
246 }
247 if (error == 0) {
248 map->dm_nsegs = seg + 1;
249 map->dm_mapsize = uio->uio_resid;
250 }
251
252 return (error);
253 }
254
255 /*
256 * Like _dmamap_load(), but for raw memory allocated with
257 * bus_dmamem_alloc().
258 */
259 int
_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)260 _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
261 int nsegs, bus_size_t size, int flags)
262 {
263 bus_addr_t paddr, baddr, bmask, lastaddr = 0;
264 bus_size_t plen, sgsize, mapsize;
265 int first = 1;
266 int i, seg = 0;
267
268 /*
269 * Make sure that on error condition we return "no valid mappings".
270 */
271 map->dm_mapsize = 0;
272 map->dm_nsegs = 0;
273
274 if (nsegs > map->_dm_segcnt || size > map->_dm_size)
275 return (EINVAL);
276
277 mapsize = size;
278 bmask = ~(map->_dm_boundary - 1);
279
280 for (i = 0; i < nsegs && size > 0; i++) {
281 paddr = segs[i].ds_addr;
282 plen = MIN(segs[i].ds_len, size);
283
284 while (plen > 0) {
285 /*
286 * Compute the segment size, and adjust counts.
287 */
288 sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
289 if (plen < sgsize)
290 sgsize = plen;
291
292 if (paddr > dma_constraint.ucr_high)
293 panic("Non dma-reachable buffer at "
294 "paddr %#lx(raw)", paddr);
295
296 /*
297 * Make sure we don't cross any boundaries.
298 */
299 if (map->_dm_boundary > 0) {
300 baddr = (paddr + map->_dm_boundary) & bmask;
301 if (sgsize > (baddr - paddr))
302 sgsize = (baddr - paddr);
303 }
304
305 /*
306 * Insert chunk into a segment, coalescing with
307 * previous segment if possible.
308 */
309 if (first) {
310 map->dm_segs[seg].ds_addr = paddr;
311 map->dm_segs[seg].ds_len = sgsize;
312 first = 0;
313 } else {
314 if (paddr == lastaddr &&
315 (map->dm_segs[seg].ds_len + sgsize) <=
316 map->_dm_maxsegsz &&
317 (map->_dm_boundary == 0 ||
318 (map->dm_segs[seg].ds_addr & bmask) ==
319 (paddr & bmask)))
320 map->dm_segs[seg].ds_len += sgsize;
321 else {
322 if (++seg >= map->_dm_segcnt)
323 return (EINVAL);
324 map->dm_segs[seg].ds_addr = paddr;
325 map->dm_segs[seg].ds_len = sgsize;
326 }
327 }
328
329 paddr += sgsize;
330 plen -= sgsize;
331 size -= sgsize;
332
333 lastaddr = paddr;
334 }
335 }
336
337 map->dm_mapsize = mapsize;
338 map->dm_nsegs = seg + 1;
339 return (0);
340 }
341
342 /*
343 * Common function for unloading a DMA map. May be called by
344 * bus-specific DMA map unload functions.
345 */
346 void
_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)347 _dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
348 {
349 /*
350 * No resources to free; just mark the mappings as
351 * invalid.
352 */
353 map->dm_nsegs = 0;
354 map->dm_mapsize = 0;
355 }
356
357 /*
358 * Common function for DMA map synchronization. May be called
359 * by bus-specific DMA map synchronization functions.
360 */
361 void
_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t addr,bus_size_t size,int op)362 _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
363 bus_size_t size, int op)
364 {
365 int nsegs;
366 int curseg;
367 struct cpu_info *ci = curcpu();
368
369 nsegs = map->dm_nsegs;
370 curseg = 0;
371
372 while (size && nsegs) {
373 paddr_t paddr;
374 vaddr_t vaddr;
375 bus_size_t ssize;
376
377 ssize = map->dm_segs[curseg].ds_len;
378 paddr = map->dm_segs[curseg]._ds_paddr;
379 vaddr = map->dm_segs[curseg]._ds_vaddr;
380
381 if (addr != 0) {
382 if (addr >= ssize) {
383 addr -= ssize;
384 ssize = 0;
385 } else {
386 vaddr += addr;
387 paddr += addr;
388 ssize -= addr;
389 addr = 0;
390 }
391 }
392 if (ssize > size)
393 ssize = size;
394
395 if (IS_XKPHYS(vaddr) && XKPHYS_TO_CCA(vaddr) == CCA_NC) {
396 size -= ssize;
397 ssize = 0;
398 }
399
400 if (ssize != 0) {
401 /*
402 * If only PREWRITE is requested, writeback.
403 * PREWRITE with PREREAD writebacks
404 * and invalidates (if noncoherent) *all* cache levels.
405 * Otherwise, just invalidate (if noncoherent).
406 */
407 if (op & BUS_DMASYNC_PREWRITE) {
408 if (op & BUS_DMASYNC_PREREAD)
409 Mips_IOSyncDCache(ci, vaddr,
410 ssize, CACHE_SYNC_X);
411 else
412 Mips_IOSyncDCache(ci, vaddr,
413 ssize, CACHE_SYNC_W);
414 } else
415 if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) {
416 Mips_IOSyncDCache(ci, vaddr,
417 ssize, CACHE_SYNC_R);
418 }
419 size -= ssize;
420 }
421 curseg++;
422 nsegs--;
423 }
424
425 if (size != 0) {
426 panic("_dmamap_sync: ran off map!");
427 }
428 }
429
430 /*
431 * Common function for DMA-safe memory allocation. May be called
432 * by bus-specific DMA memory allocation functions.
433 */
434 int
_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)435 _dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
436 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
437 int flags)
438 {
439 return _dmamem_alloc_range(t, size, alignment, boundary,
440 segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
441 }
442
443 /*
444 * Common function for freeing DMA-safe memory. May be called by
445 * bus-specific DMA memory free functions.
446 */
447 void
_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)448 _dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
449 {
450 vm_page_t m;
451 bus_addr_t addr;
452 struct pglist mlist;
453 int curseg;
454
455 /*
456 * Build a list of pages to free back to the VM system.
457 */
458 TAILQ_INIT(&mlist);
459 for (curseg = 0; curseg < nsegs; curseg++) {
460 for (addr = segs[curseg].ds_addr;
461 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
462 addr += PAGE_SIZE) {
463 m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
464 TAILQ_INSERT_TAIL(&mlist, m, pageq);
465 }
466 }
467
468 uvm_pglistfree(&mlist);
469 }
470
471 /*
472 * Common function for mapping DMA-safe memory. May be called by
473 * bus-specific DMA memory map functions.
474 */
475 int
_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)476 _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
477 caddr_t *kvap, int flags)
478 {
479 vaddr_t va, sva;
480 size_t ssize;
481 paddr_t pa;
482 bus_addr_t addr;
483 int curseg, error, pmap_flags;
484 const struct kmem_dyn_mode *kd;
485
486 if (nsegs == 1) {
487 pa = (*t->_device_to_pa)(segs[0].ds_addr);
488 if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
489 *kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
490 else
491 *kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
492 return (0);
493 }
494
495 size = round_page(size);
496 kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
497 va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
498 if (va == 0)
499 return (ENOMEM);
500
501 *kvap = (caddr_t)va;
502
503 sva = va;
504 ssize = size;
505 pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
506 if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
507 pmap_flags |= PMAP_NOCACHE;
508 for (curseg = 0; curseg < nsegs; curseg++) {
509 for (addr = segs[curseg].ds_addr;
510 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
511 addr += NBPG, va += NBPG, size -= NBPG) {
512 if (size == 0)
513 panic("_dmamem_map: size botch");
514 pa = (*t->_device_to_pa)(addr);
515 error = pmap_enter(pmap_kernel(), va, pa,
516 PROT_READ | PROT_WRITE,
517 PROT_READ | PROT_WRITE | pmap_flags);
518 if (error) {
519 pmap_update(pmap_kernel());
520 km_free((void *)sva, ssize, &kv_any, &kp_none);
521 return (error);
522 }
523
524 /*
525 * This is redundant with what pmap_enter() did
526 * above, but will take care of forcing other
527 * mappings of the same page (if any) to be
528 * uncached.
529 * If there are no multiple mappings of that
530 * page, this amounts to a noop.
531 */
532 if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
533 pmap_page_cache(PHYS_TO_VM_PAGE(pa),
534 PGF_UNCACHED);
535 }
536 pmap_update(pmap_kernel());
537 }
538
539 return (0);
540 }
541
542 /*
543 * Common function for unmapping DMA-safe memory. May be called by
544 * bus-specific DMA memory unmapping functions.
545 */
546 void
_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)547 _dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
548 {
549 if (IS_XKPHYS((vaddr_t)kva))
550 return;
551
552 km_free(kva, round_page(size), &kv_any, &kp_none);
553 }
554
555 /*
556 * Common function for mmap(2)'ing DMA-safe memory. May be called by
557 * bus-specific DMA mmap(2)'ing functions.
558 */
559 paddr_t
_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)560 _dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
561 int prot, int flags)
562 {
563 int i;
564
565 for (i = 0; i < nsegs; i++) {
566 #ifdef DIAGNOSTIC
567 if (off & PGOFSET)
568 panic("_dmamem_mmap: offset unaligned");
569 if (segs[i].ds_addr & PGOFSET)
570 panic("_dmamem_mmap: segment unaligned");
571 if (segs[i].ds_len & PGOFSET)
572 panic("_dmamem_mmap: segment size not multiple"
573 " of page size");
574 #endif
575 if (off >= segs[i].ds_len) {
576 off -= segs[i].ds_len;
577 continue;
578 }
579
580 return ((*t->_device_to_pa)(segs[i].ds_addr) + off);
581 }
582
583 /* Page not found. */
584 return (-1);
585 }
586
587 /**********************************************************************
588 * DMA utility functions
589 **********************************************************************/
590
591 /*
592 * Utility function to load a linear buffer. lastaddrp holds state
593 * between invocations (for multiple-buffer loads). segp contains
594 * the starting segment on entrance, and the ending segment on exit.
595 * first indicates if this is the first invocation of this function.
596 */
597 int
_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,paddr_t * lastaddrp,int * segp,int first)598 _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
599 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
600 int *segp, int first)
601 {
602 bus_size_t sgsize;
603 bus_addr_t lastaddr, baddr, bmask;
604 paddr_t curaddr;
605 vaddr_t vaddr = (vaddr_t)buf;
606 int seg;
607 pmap_t pmap;
608
609 if (p != NULL)
610 pmap = p->p_vmspace->vm_map.pmap;
611 else
612 pmap = pmap_kernel();
613
614 lastaddr = *lastaddrp;
615 bmask = ~(map->_dm_boundary - 1);
616 if (t->_dma_mask != 0)
617 bmask &= t->_dma_mask;
618
619 for (seg = *segp; buflen > 0; ) {
620 /*
621 * Get the physical address for this segment.
622 */
623 if (pmap_extract(pmap, vaddr, &curaddr) == 0)
624 panic("_dmapmap_load_buffer: pmap_extract(%p, %lx) failed!",
625 pmap, vaddr);
626
627 /*
628 * Compute the segment size, and adjust counts.
629 */
630 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
631 if (buflen < sgsize)
632 sgsize = buflen;
633
634 /*
635 * Make sure we don't cross any boundaries.
636 */
637 if (map->_dm_boundary > 0) {
638 baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
639 bmask;
640 if (sgsize > (baddr - (bus_addr_t)curaddr))
641 sgsize = (baddr - (bus_addr_t)curaddr);
642 }
643
644 /*
645 * Insert chunk into a segment, coalescing with
646 * previous segment if possible.
647 */
648 if (first) {
649 map->dm_segs[seg].ds_addr =
650 (*t->_pa_to_device)(curaddr);
651 map->dm_segs[seg].ds_len = sgsize;
652 map->dm_segs[seg]._ds_paddr = curaddr;
653 map->dm_segs[seg]._ds_vaddr = vaddr;
654 first = 0;
655 } else {
656 if ((bus_addr_t)curaddr == lastaddr &&
657 (map->dm_segs[seg].ds_len + sgsize) <=
658 map->_dm_maxsegsz &&
659 (map->_dm_boundary == 0 ||
660 (map->dm_segs[seg].ds_addr & bmask) ==
661 ((bus_addr_t)curaddr & bmask)))
662 map->dm_segs[seg].ds_len += sgsize;
663 else {
664 if (++seg >= map->_dm_segcnt)
665 break;
666 map->dm_segs[seg].ds_addr =
667 (*t->_pa_to_device)(curaddr);
668 map->dm_segs[seg].ds_len = sgsize;
669 map->dm_segs[seg]._ds_paddr = curaddr;
670 map->dm_segs[seg]._ds_vaddr = vaddr;
671 }
672 }
673
674 lastaddr = (bus_addr_t)curaddr + sgsize;
675 vaddr += sgsize;
676 buflen -= sgsize;
677 }
678
679 *segp = seg;
680 *lastaddrp = lastaddr;
681
682 /*
683 * Did we fit?
684 */
685 if (buflen != 0)
686 return (EFBIG); /* XXX better return value here? */
687
688 return (0);
689 }
690
691 /*
692 * Allocate physical memory from the given physical address range.
693 * Called by DMA-safe memory allocation methods.
694 */
695 int
_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)696 _dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
697 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
698 int flags, paddr_t low, paddr_t high)
699 {
700 paddr_t curaddr, lastaddr;
701 vm_page_t m;
702 struct pglist mlist;
703 int curseg, error, plaflag;
704
705 /* Always round the size. */
706 size = round_page(size);
707
708 /*
709 * Allocate pages from the VM system.
710 */
711 plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
712 if (flags & BUS_DMA_ZERO)
713 plaflag |= UVM_PLA_ZERO;
714
715 TAILQ_INIT(&mlist);
716 error = uvm_pglistalloc(size, low, high, alignment, boundary,
717 &mlist, nsegs, plaflag);
718 if (error)
719 return (error);
720
721 /*
722 * Compute the location, size, and number of segments actually
723 * returned by the VM code.
724 */
725 m = TAILQ_FIRST(&mlist);
726 curseg = 0;
727 lastaddr = segs[curseg].ds_addr =
728 (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
729 segs[curseg].ds_len = PAGE_SIZE;
730 m = TAILQ_NEXT(m, pageq);
731
732 for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
733 curaddr = VM_PAGE_TO_PHYS(m);
734 #ifdef DIAGNOSTIC
735 if (curaddr < low || curaddr >= high) {
736 printf("vm_page_alloc_memory returned non-sensical"
737 " address 0x%lx\n", curaddr);
738 panic("_dmamem_alloc_range");
739 }
740 #endif
741 curaddr = (*t->_pa_to_device)(curaddr);
742 if (curaddr == (lastaddr + PAGE_SIZE))
743 segs[curseg].ds_len += PAGE_SIZE;
744 else {
745 curseg++;
746 segs[curseg].ds_addr = curaddr;
747 segs[curseg].ds_len = PAGE_SIZE;
748 }
749 lastaddr = curaddr;
750 }
751
752 *rsegs = curseg + 1;
753
754 return (0);
755 }
756