1 /* $NetBSD: bus_dma.c,v 1.36 2022/07/26 20:08:54 andvar Exp $ */
2 /* NetBSD: bus_dma.c,v 1.20 2000/01/10 03:24:36 simonb Exp */
3
4 /*-
5 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.36 2022/07/26 20:08:54 andvar Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mbuf.h>
40 #include <sys/device.h>
41 #include <sys/proc.h>
42 #include <sys/kmem.h>
43
44 #include <uvm/uvm.h>
45
46 #include <mips/cache.h>
47
48 #define _ARC_BUS_DMA_PRIVATE
49 #include <sys/bus.h>
50
51 paddr_t kvtophys(vaddr_t); /* XXX */
52
53 static int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t,
54 void *, bus_size_t, struct vmspace *, int, paddr_t *,
55 int *, int);
56
57 void
_bus_dma_tag_init(bus_dma_tag_t t)58 _bus_dma_tag_init(bus_dma_tag_t t)
59 {
60
61 t->dma_offset = 0;
62
63 t->_dmamap_create = _bus_dmamap_create;
64 t->_dmamap_destroy = _bus_dmamap_destroy;
65 t->_dmamap_load = _bus_dmamap_load;
66 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf;
67 t->_dmamap_load_uio = _bus_dmamap_load_uio;
68 t->_dmamap_load_raw = _bus_dmamap_load_raw;
69 t->_dmamap_unload = _bus_dmamap_unload;
70 t->_dmamap_sync = _bus_dmamap_sync;
71 t->_dmamem_alloc = _bus_dmamem_alloc;
72 t->_dmamem_free = _bus_dmamem_free;
73 t->_dmamem_map = _bus_dmamem_map;
74 t->_dmamem_unmap = _bus_dmamem_unmap;
75 t->_dmamem_mmap = _bus_dmamem_mmap;
76 }
77
78 static size_t
_bus_dmamap_mapsize(int const nsegments)79 _bus_dmamap_mapsize(int const nsegments)
80 {
81 KASSERT(nsegments > 0);
82 return sizeof(struct arc_bus_dmamap) +
83 (sizeof(bus_dma_segment_t) * (nsegments - 1));
84 }
85
86 /*
87 * Common function for DMA map creation. May be called by bus-specific
88 * DMA map creation functions.
89 */
90 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)91 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
92 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
93 {
94 struct arc_bus_dmamap *map;
95 void *mapstore;
96
97 /*
98 * Allocate and initialize the DMA map. The end of the map
99 * is a variable-sized array of segments, so we allocate enough
100 * room for them in one shot.
101 *
102 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
103 * of ALLOCNOW notifies others that we've reserved these resources,
104 * and they are not to be freed.
105 *
106 * The bus_dmamap_t includes one bus_dma_segment_t, hence
107 * the (nsegments - 1).
108 */
109 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
110 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
111 return ENOMEM;
112
113 map = (struct arc_bus_dmamap *)mapstore;
114 map->_dm_size = size;
115 map->_dm_segcnt = nsegments;
116 map->_dm_maxmaxsegsz = maxsegsz;
117 map->_dm_boundary = boundary;
118 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
119 map->_dm_vmspace = NULL;
120 map->dm_maxsegsz = maxsegsz;
121 map->dm_mapsize = 0; /* no valid mappings */
122 map->dm_nsegs = 0;
123
124 *dmamp = map;
125 return 0;
126 }
127
128 /*
129 * Common function for DMA map destruction. May be called by bus-specific
130 * DMA map destruction functions.
131 */
132 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)133 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
134 {
135
136 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
137 }
138
139 /*
140 * Utility function to load a linear buffer. lastaddrp holds state
141 * between invocations (for multiple-buffer loads). segp contains
142 * the starting segment on entrance, and the ending segment on exit.
143 * first indicates if this is the first invocation of this function.
144 */
145 static int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags,paddr_t * lastaddrp,int * segp,int first)146 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
147 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
148 int *segp, int first)
149 {
150 bus_size_t sgsize;
151 bus_addr_t baddr, bmask;
152 paddr_t curaddr, lastaddr;
153 vaddr_t vaddr = (vaddr_t)buf;
154 int seg;
155
156 lastaddr = *lastaddrp;
157 bmask = ~(map->_dm_boundary - 1);
158
159 for (seg = *segp; buflen > 0 ; ) {
160 /*
161 * Get the physical address for this segment.
162 */
163 if (!VMSPACE_IS_KERNEL_P(vm))
164 (void)pmap_extract(vm_map_pmap(&vm->vm_map),
165 vaddr, &curaddr);
166 else
167 curaddr = kvtophys(vaddr);
168
169 /*
170 * Compute the segment size, and adjust counts.
171 */
172 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
173 if (buflen < sgsize)
174 sgsize = buflen;
175
176 /*
177 * Make sure we don't cross any boundaries.
178 */
179 if (map->_dm_boundary > 0) {
180 baddr = (curaddr + map->_dm_boundary) & bmask;
181 if (sgsize > (baddr - curaddr))
182 sgsize = (baddr - curaddr);
183 }
184
185 /*
186 * Insert chunk into a segment, coalescing with
187 * the previous segment if possible.
188 */
189 if (first) {
190 map->dm_segs[seg].ds_addr = curaddr + t->dma_offset;
191 map->dm_segs[seg].ds_len = sgsize;
192 map->dm_segs[seg]._ds_vaddr = vaddr;
193 map->dm_segs[seg]._ds_paddr = curaddr;
194 first = 0;
195 } else {
196 if (curaddr == lastaddr &&
197 (map->dm_segs[seg].ds_len + sgsize) <=
198 map->dm_maxsegsz &&
199 (map->_dm_boundary == 0 ||
200 (map->dm_segs[seg]._ds_paddr & bmask) ==
201 (curaddr & bmask)))
202 map->dm_segs[seg].ds_len += sgsize;
203 else {
204 if (++seg >= map->_dm_segcnt)
205 break;
206 map->dm_segs[seg].ds_addr =
207 curaddr + t->dma_offset;
208 map->dm_segs[seg].ds_len = sgsize;
209 map->dm_segs[seg]._ds_vaddr = vaddr;
210 map->dm_segs[seg]._ds_paddr = curaddr;
211 }
212 }
213
214 lastaddr = curaddr + sgsize;
215 vaddr += sgsize;
216 buflen -= sgsize;
217 }
218
219 *segp = seg;
220 *lastaddrp = lastaddr;
221
222 /*
223 * Did we fit?
224 */
225 if (buflen != 0)
226 return EFBIG; /* XXX better return value here? */
227
228 return 0;
229 }
230
231 /*
232 * Common function for loading a direct-mapped DMA map with a linear
233 * buffer.
234 */
235 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)236 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
237 bus_size_t buflen, struct proc *p, int flags)
238 {
239 paddr_t lastaddr;
240 int seg, error;
241 struct vmspace *vm;
242
243 /*
244 * Make sure that on error condition we return "no valid mappings".
245 */
246 map->dm_mapsize = 0;
247 map->dm_nsegs = 0;
248 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
249
250 if (buflen > map->_dm_size)
251 return EINVAL;
252
253 if (p != NULL) {
254 vm = p->p_vmspace;
255 } else {
256 vm = vmspace_kernel();
257 }
258
259 seg = 0;
260 error = _bus_dmamap_load_buffer(t, map, buf, buflen,
261 vm, flags, &lastaddr, &seg, 1);
262 if (error == 0) {
263 map->dm_mapsize = buflen;
264 map->dm_nsegs = seg + 1;
265 map->_dm_vmspace = vm;
266
267 /*
268 * For linear buffers, we support marking the mapping
269 * as COHERENT.
270 *
271 * XXX Check TLB entries for cache-inhibit bits?
272 */
273 if (buf >= (void *)MIPS_KSEG1_START &&
274 buf < (void *)MIPS_KSEG2_START)
275 map->_dm_flags |= ARC_DMAMAP_COHERENT;
276 }
277 return error;
278 }
279
280 /*
281 * Like _bus_dmamap_load(), but for mbufs.
282 */
283 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)284 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
285 int flags)
286 {
287 paddr_t lastaddr;
288 int seg, error, first;
289 struct mbuf *m;
290
291 /*
292 * Make sure that on error condition we return "no valid mappings."
293 */
294 map->dm_mapsize = 0;
295 map->dm_nsegs = 0;
296 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
297
298 #ifdef DIAGNOSTIC
299 if ((m0->m_flags & M_PKTHDR) == 0)
300 panic("_bus_dmamap_load_mbuf: no packet header");
301 #endif
302
303 if (m0->m_pkthdr.len > map->_dm_size)
304 return EINVAL;
305
306 first = 1;
307 seg = 0;
308 error = 0;
309 for (m = m0; m != NULL && error == 0; m = m->m_next) {
310 if (m->m_len == 0)
311 continue;
312 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
313 vmspace_kernel(), flags, &lastaddr, &seg, first);
314 first = 0;
315 }
316 if (error == 0) {
317 map->dm_mapsize = m0->m_pkthdr.len;
318 map->dm_nsegs = seg + 1;
319 map->_dm_vmspace = vmspace_kernel(); /* always kernel */
320 }
321 return error;
322 }
323
324 /*
325 * Like _bus_dmamap_load(), but for uios.
326 */
327 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)328 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
329 int flags)
330 {
331 paddr_t lastaddr;
332 int seg, i, error, first;
333 bus_size_t minlen, resid;
334 struct iovec *iov;
335 void *addr;
336
337 /*
338 * Make sure that on error condition we return "no valid mappings."
339 */
340 map->dm_mapsize = 0;
341 map->dm_nsegs = 0;
342 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
343
344 resid = uio->uio_resid;
345 iov = uio->uio_iov;
346
347 first = 1;
348 seg = 0;
349 error = 0;
350 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
351 /*
352 * Now at the first iovec to load. Load each iovec
353 * until we have exhausted the residual count.
354 */
355 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
356 addr = (void *)iov[i].iov_base;
357
358 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
359 uio->uio_vmspace, flags, &lastaddr, &seg, first);
360 first = 0;
361
362 resid -= minlen;
363 }
364 if (error == 0) {
365 map->dm_mapsize = uio->uio_resid;
366 map->dm_nsegs = seg + 1;
367 map->_dm_vmspace = uio->uio_vmspace;
368 }
369 return error;
370 }
371
372 /*
373 * Like _bus_dmamap_load(), but for raw memory.
374 */
375 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)376 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
377 int nsegs, bus_size_t size, int flags)
378 {
379
380 panic("_bus_dmamap_load_raw: not implemented");
381 }
382
383 /*
384 * Common function for unloading a DMA map. May be called by
385 * chipset-specific DMA map unload functions.
386 */
387 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)388 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
389 {
390
391 /*
392 * No resources to free; just mark the mappings as
393 * invalid.
394 */
395 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
396 map->dm_mapsize = 0;
397 map->dm_nsegs = 0;
398 map->_dm_flags &= ~ARC_DMAMAP_COHERENT;
399 }
400
401 /*
402 * Common function for DMA map synchronization. May be called by
403 * chipset-specific DMA map synchronization functions.
404 *
405 * This version works with the virtually-indexed, write-back cache
406 * found in the MIPS-3 CPUs available in ARC machines.
407 */
408 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)409 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
410 bus_size_t len, int ops)
411 {
412 const struct mips_cache_info * const mci = &mips_cache_info;
413 bus_size_t minlen;
414 bus_addr_t addr, start, end, preboundary, firstboundary, lastboundary;
415 int i, useindex;
416
417 /*
418 * Mixing PRE and POST operations is not allowed.
419 */
420 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
421 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
422 panic("_bus_dmamap_sync: mix PRE and POST");
423
424 #ifdef DIAGNOSTIC
425 if (offset >= map->dm_mapsize)
426 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
427 offset, map->dm_mapsize);
428 if (len == 0 || (offset + len) > map->dm_mapsize)
429 panic("_bus_dmamap_sync: bad length");
430 #endif
431
432 /*
433 * Since we're dealing with a virtually-indexed, write-back
434 * cache, we need to do the following things:
435 *
436 * PREREAD -- Invalidate D-cache. Note we might have
437 * to also write-back here if we have to use an Index
438 * op, or if the buffer start/end is not cache-line aligned.
439 *
440 * PREWRITE -- Write-back the D-cache. If we have to use
441 * an Index op, we also have to invalidate. Note that if
442 * we are doing PREREAD|PREWRITE, we can collapse everything
443 * into a single op.
444 *
445 * POSTREAD -- Nothing.
446 *
447 * POSTWRITE -- Nothing.
448 */
449
450 /*
451 * Flush the write buffer.
452 * XXX Is this always necessary?
453 */
454 wbflush();
455
456 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
457 if (ops == 0)
458 return;
459
460 /*
461 * If the mapping is of COHERENT DMA-safe memory, no cache
462 * flush is necessary.
463 */
464 if (map->_dm_flags & ARC_DMAMAP_COHERENT)
465 return;
466
467 /*
468 * If the mapping belongs to the kernel, or it belongs
469 * to the currently-running process (XXX actually, vmspace),
470 * then we can use Hit ops. Otherwise, Index ops.
471 *
472 * This should be true the vast majority of the time.
473 */
474 if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
475 map->_dm_vmspace == curproc->p_vmspace))
476 useindex = 0;
477 else
478 useindex = 1;
479
480 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
481 /* Find the beginning segment. */
482 if (offset >= map->dm_segs[i].ds_len) {
483 offset -= map->dm_segs[i].ds_len;
484 continue;
485 }
486
487 /*
488 * Now at the first segment to sync; nail
489 * each segment until we have exhausted the
490 * length.
491 */
492 minlen = len < map->dm_segs[i].ds_len - offset ?
493 len : map->dm_segs[i].ds_len - offset;
494
495 addr = map->dm_segs[i]._ds_vaddr;
496
497 #ifdef BUS_DMA_DEBUG
498 printf("bus_dmamap_sync: flushing segment %d "
499 "(0x%lx..0x%lx) ...", i, addr + offset,
500 addr + offset + minlen - 1);
501 #endif
502
503 /*
504 * If we are forced to use Index ops, it's always a
505 * Write-back,Invalidate, so just do one test.
506 */
507 if (__predict_false(useindex)) {
508 mips_dcache_wbinv_range_index(addr + offset, minlen);
509 #ifdef BUS_DMA_DEBUG
510 printf("\n");
511 #endif
512 offset = 0;
513 len -= minlen;
514 continue;
515 }
516
517 start = addr + offset;
518 switch (ops) {
519 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
520 mips_dcache_wbinv_range(start, minlen);
521 break;
522
523 case BUS_DMASYNC_PREREAD:
524 end = start + minlen;
525 preboundary = start & ~mci->mci_dcache_align_mask;
526 firstboundary = (start + mci->mci_dcache_align_mask)
527 & ~mci->mci_dcache_align_mask;
528 lastboundary = end & ~mci->mci_dcache_align_mask;
529 if (preboundary < start && preboundary < lastboundary)
530 mips_dcache_wbinv_range(preboundary,
531 mci->mci_dcache_align);
532 if (firstboundary < lastboundary)
533 mips_dcache_inv_range(firstboundary,
534 lastboundary - firstboundary);
535 if (lastboundary < end)
536 mips_dcache_wbinv_range(lastboundary,
537 mci->mci_dcache_align);
538 break;
539
540 case BUS_DMASYNC_PREWRITE:
541 mips_dcache_wb_range(start, minlen);
542 break;
543 }
544 #ifdef BUS_DMA_DEBUG
545 printf("\n");
546 #endif
547 offset = 0;
548 len -= minlen;
549 }
550 }
551
552 /*
553 * Common function for DMA-safe memory allocation. May be called
554 * by bus-specific DMA memory allocation functions.
555 */
556 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)557 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
558 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
559 int flags)
560 {
561
562 return _bus_dmamem_alloc_range(t, size, alignment, boundary,
563 segs, nsegs, rsegs, flags, pmap_limits.avail_start,
564 trunc_page(pmap_limits.avail_end));
565 }
566
567 /*
568 * Allocate physical memory from the given physical address range.
569 * Called by DMA-safe memory allocation methods.
570 */
571 int
_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)572 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
573 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
574 int flags, paddr_t low, paddr_t high)
575 {
576 paddr_t curaddr, lastaddr;
577 struct vm_page *m;
578 struct pglist mlist;
579 int curseg, error;
580
581 /* Always round the size. */
582 size = round_page(size);
583
584 high = pmap_limits.avail_end - PAGE_SIZE;
585
586 /*
587 * Allocate pages from the VM system.
588 */
589 error = uvm_pglistalloc(size, low, high, alignment, boundary,
590 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
591 if (error)
592 return error;
593
594 /*
595 * Compute the location, size, and number of segments actually
596 * returned by the VM code.
597 */
598 m = TAILQ_FIRST(&mlist);
599 curseg = 0;
600 lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
601 segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
602 segs[curseg].ds_len = PAGE_SIZE;
603 m = TAILQ_NEXT(m, pageq.queue);
604
605 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
606 curaddr = VM_PAGE_TO_PHYS(m);
607 #ifdef DIAGNOSTIC
608 if (curaddr < pmap_limits.avail_start || curaddr >= high) {
609 printf("uvm_pglistalloc returned non-sensical"
610 " address 0x%llx\n", (long long)curaddr);
611 panic("_bus_dmamem_alloc_range");
612 }
613 #endif
614 if (curaddr == (lastaddr + PAGE_SIZE))
615 segs[curseg].ds_len += PAGE_SIZE;
616 else {
617 curseg++;
618 segs[curseg].ds_addr = curaddr + t->dma_offset;
619 segs[curseg].ds_len = PAGE_SIZE;
620 segs[curseg]._ds_paddr = curaddr;
621 }
622 lastaddr = curaddr;
623 }
624
625 *rsegs = curseg + 1;
626
627 return 0;
628 }
629
630 /*
631 * Common function for freeing DMA-safe memory. May be called by
632 * bus-specific DMA memory free functions.
633 */
634 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)635 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
636 {
637 struct vm_page *m;
638 bus_addr_t addr;
639 struct pglist mlist;
640 int curseg;
641
642 /*
643 * Build a list of pages to free back to the VM system.
644 */
645 TAILQ_INIT(&mlist);
646 for (curseg = 0; curseg < nsegs; curseg++) {
647 for (addr = segs[curseg]._ds_paddr;
648 addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
649 addr += PAGE_SIZE) {
650 m = PHYS_TO_VM_PAGE(addr);
651 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
652 }
653 }
654
655 uvm_pglistfree(&mlist);
656 }
657
658 /*
659 * Common function for mapping DMA-safe memory. May be called by
660 * bus-specific DMA memory map functions.
661 */
662 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)663 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
664 size_t size, void **kvap, int flags)
665 {
666 vaddr_t va;
667 bus_addr_t addr;
668 int curseg;
669 const uvm_flag_t kmflags =
670 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
671
672 /*
673 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
674 * TLB thrashing.
675 */
676 if (nsegs == 1) {
677 if (flags & BUS_DMA_COHERENT)
678 *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr);
679 else
680 *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr);
681 return 0;
682 }
683
684 size = round_page(size);
685
686 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
687
688 if (va == 0)
689 return ENOMEM;
690
691 *kvap = (void *)va;
692
693 for (curseg = 0; curseg < nsegs; curseg++) {
694 segs[curseg]._ds_vaddr = va;
695 for (addr = segs[curseg]._ds_paddr;
696 addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
697 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
698 if (size == 0)
699 panic("_bus_dmamem_map: size botch");
700 pmap_enter(pmap_kernel(), va, addr,
701 VM_PROT_READ | VM_PROT_WRITE,
702 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
703
704 /* XXX Do something about COHERENT here. */
705 }
706 }
707 pmap_update(pmap_kernel());
708
709 return 0;
710 }
711
712 /*
713 * Common function for unmapping DMA-safe memory. May be called by
714 * bus-specific DMA memory unmapping functions.
715 */
716 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)717 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
718 {
719
720 #ifdef DIAGNOSTIC
721 if ((u_long)kva & PGOFSET)
722 panic("_bus_dmamem_unmap");
723 #endif
724
725 /*
726 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
727 * not in KSEG2).
728 */
729 if (kva >= (void *)MIPS_KSEG0_START &&
730 kva < (void *)MIPS_KSEG2_START)
731 return;
732
733 size = round_page(size);
734 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
735 pmap_update(pmap_kernel());
736 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
737 }
738
739 /*
740 * Common function for mmap(2)'ing DMA-safe memory. May be called by
741 * bus-specific DMA mmap(2)'ing functions.
742 */
743 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)744 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
745 off_t off, int prot, int flags)
746 {
747 int i;
748
749 for (i = 0; i < nsegs; i++) {
750 #ifdef DIAGNOSTIC
751 if (off & PGOFSET)
752 panic("_bus_dmamem_mmap: offset unaligned");
753 if (segs[i]._ds_paddr & PGOFSET)
754 panic("_bus_dmamem_mmap: segment unaligned");
755 if (segs[i].ds_len & PGOFSET)
756 panic("_bus_dmamem_mmap: segment size not multiple"
757 " of page size");
758 #endif
759 if (off >= segs[i].ds_len) {
760 off -= segs[i].ds_len;
761 continue;
762 }
763
764 return mips_btop(segs[i]._ds_paddr + off);
765 }
766
767 /* Page not found. */
768 return -1;
769 }
770