xref: /netbsd-src/sys/arch/mips/mips/bus_dma.c (revision 361e78e1cc99cee464f6227bc159d12e37f9009a)
1 /*	$NetBSD: bus_dma.c,v 1.49 2024/10/21 06:47:10 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2001, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
34 
35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.49 2024/10/21 06:47:10 skrll Exp $");
36 
37 #define _MIPS_BUS_DMA_PRIVATE
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 #include <sys/evcnt.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/mbuf.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 
50 #include <uvm/uvm.h>
51 
52 #include <mips/cache.h>
53 #ifdef _LP64
54 #include <mips/mips3_pte.h>
55 #endif
56 
57 #include <mips/locore.h>
58 
59 const struct mips_bus_dmamap_ops mips_bus_dmamap_ops =
60     _BUS_DMAMAP_OPS_INITIALIZER;
61 const struct mips_bus_dmamem_ops mips_bus_dmamem_ops =
62     _BUS_DMAMEM_OPS_INITIALIZER;
63 const struct mips_bus_dmatag_ops mips_bus_dmatag_ops =
64     _BUS_DMATAG_OPS_INITIALIZER;
65 
66 static struct evcnt bus_dma_creates =
67 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
68 static struct evcnt bus_dma_bounced_creates =
69 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
70 static struct evcnt bus_dma_loads =
71 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
72 static struct evcnt bus_dma_bounced_loads =
73 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
74 static struct evcnt bus_dma_read_bounces =
75 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
76 static struct evcnt bus_dma_write_bounces =
77 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
78 static struct evcnt bus_dma_bounced_unloads =
79 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
80 static struct evcnt bus_dma_unloads =
81 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
82 static struct evcnt bus_dma_bounced_destroys =
83 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
84 static struct evcnt bus_dma_destroys =
85 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
86 
87 EVCNT_ATTACH_STATIC(bus_dma_creates);
88 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
89 EVCNT_ATTACH_STATIC(bus_dma_loads);
90 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
91 EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
92 EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
93 EVCNT_ATTACH_STATIC(bus_dma_unloads);
94 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
95 EVCNT_ATTACH_STATIC(bus_dma_destroys);
96 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
97 
98 #define	STAT_INCR(x)	(bus_dma_ ## x.ev_count++)
99 
100 paddr_t kvtophys(vaddr_t);	/* XXX */
101 
102 /*
103  * Utility function to load a linear buffer.  segp contains the starting
104  * segment on entrance, and the ending segment on exit. first indicates
105  * if this is the first invocation of this function.
106  */
107 static int
108 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
109     void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
110     int *segp, vaddr_t lastvaddr, bool first)
111 {
112 	paddr_t baddr, curaddr, lastaddr;
113 	vaddr_t vaddr = (vaddr_t)buf;
114 	bus_dma_segment_t *ds = &map->dm_segs[*segp];
115 	bus_dma_segment_t * const eds = &map->dm_segs[map->_dm_segcnt];
116 	const bus_addr_t bmask = ~(map->_dm_boundary - 1);
117 	const bool d_cache_coherent =
118 	    (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) != 0;
119 
120 	lastaddr = ds->ds_addr + ds->ds_len;
121 
122 	while (buflen > 0) {
123 		/*
124 		 * Get the physical address for this segment.
125 		 */
126 		if (!VMSPACE_IS_KERNEL_P(vm))
127 			(void) pmap_extract(vm_map_pmap(&vm->vm_map), vaddr,
128 			    &curaddr);
129 		else
130 			curaddr = kvtophys(vaddr);
131 
132 		/*
133 		 * If we're beyond the current DMA window, indicate
134 		 * that and try to fall back onto something else.
135 		 */
136 		if (curaddr < t->_bounce_alloc_lo
137 		    || (t->_bounce_alloc_hi != 0
138 			&& curaddr >= t->_bounce_alloc_hi))
139 			return EINVAL;
140 #if BUS_DMA_DEBUG
141 		printf("dma: addr %#"PRIxPADDR" -> %#"PRIxPADDR"\n", curaddr,
142 		    (curaddr - t->_bounce_alloc_lo) + t->_wbase);
143 #endif
144 		curaddr = (curaddr - t->_bounce_alloc_lo) + t->_wbase;
145 
146 		/*
147 		 * Compute the segment size, and adjust counts.
148 		 */
149 		bus_size_t sgsize = PAGE_SIZE - ((uintptr_t)vaddr & PGOFSET);
150 		if (sgsize > buflen) {
151 			sgsize = buflen;
152 		}
153 		if (sgsize > map->dm_maxsegsz) {
154 			sgsize = map->dm_maxsegsz;
155 		}
156 
157 		/*
158 		 * Make sure we don't cross any boundaries.
159 		 */
160 		if (map->_dm_boundary > 0) {
161 			baddr = (curaddr + map->_dm_boundary) & bmask;
162 			if (sgsize > baddr - curaddr) {
163 				sgsize = baddr - curaddr;
164 			}
165 		}
166 
167 		/*
168 		 * Insert chunk into a segment, coalescing with
169 		 * the previous segment if possible.
170 		 */
171 		if (!first
172 		    && curaddr == lastaddr
173 		    && (d_cache_coherent
174 #ifndef __mips_o32
175 			|| !MIPS_CACHE_VIRTUAL_ALIAS
176 #endif
177 			|| vaddr == lastvaddr)
178 		    && (ds->ds_len + sgsize) <= map->dm_maxsegsz
179 		    && (map->_dm_boundary == 0
180 			|| ((ds->ds_addr ^ curaddr) & bmask) == 0)) {
181 			ds->ds_len += sgsize;
182 		} else {
183 			if (!first && ++ds >= eds)
184 				break;
185 			ds->ds_addr = curaddr;
186 			ds->ds_len = sgsize;
187 			ds->_ds_vaddr = (intptr_t)vaddr;
188 			first = false;
189 			/*
190 			 * If this segment uses the correct color, try to see
191 			 * if we can use a direct-mapped VA for the segment.
192 			 */
193 			if (!mips_cache_badalias(curaddr, vaddr)) {
194 #ifdef __mips_o32
195 				if (MIPS_KSEG0_P(curaddr + sgsize - 1)) {
196 					ds->_ds_vaddr =
197 					    MIPS_PHYS_TO_KSEG0(curaddr);
198 				}
199 #else
200 				/*
201 				 * All physical addresses can be accessed
202 				 * via XKPHYS.
203 				 */
204 		    		ds->_ds_vaddr =
205 				    MIPS_PHYS_TO_XKPHYS_CACHED(curaddr);
206 #endif
207 			}
208 			/* Make sure this is a valid kernel address */
209 			KASSERTMSG(ds->_ds_vaddr < 0,
210 			    "_ds_vaddr %#"PRIxREGISTER, ds->_ds_vaddr);
211 		}
212 
213 		lastaddr = curaddr + sgsize;
214 		vaddr += sgsize;
215 		buflen -= sgsize;
216 		lastvaddr = vaddr;
217 	}
218 
219 	*segp = ds - map->dm_segs;
220 
221 	/*
222 	 * Did we fit?
223 	 */
224 	if (buflen != 0) {
225 		/*
226 		 * If there is a chained window, we will automatically
227 		 * fall back to it.
228 		 */
229 		return EFBIG;		/* XXX better return value here? */
230 	}
231 
232 	return 0;
233 }
234 
235 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
236 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
237     bus_size_t size, int flags);
238 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
239 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
240     int direction);
241 
242 static int
243 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
244     size_t buflen, int buftype, int flags)
245 {
246 	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
247 	struct vmspace * const vm = vmspace_kernel();
248 	int seg, error;
249 
250 	KASSERT(cookie != NULL);
251 	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
252 
253 	/*
254 	 * Allocate bounce pages, if necessary.
255 	 */
256 	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
257 		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
258 		if (error)
259 			return error;
260 	}
261 
262 	/*
263 	 * Cache a pointer to the caller's buffer and load the DMA map
264 	 * with the bounce buffer.
265 	 */
266 	cookie->id_origbuf = buf;
267 	cookie->id_origbuflen = buflen;
268 	cookie->id_buftype = buftype;
269 	seg = 0;
270 	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
271 	    buflen, vm, flags, &seg, 0, true);
272 	if (error)
273 		return error;
274 
275 	STAT_INCR(bounced_loads);
276 	map->dm_mapsize = buflen;
277 	map->dm_nsegs = seg + 1;
278 	map->_dm_vmspace = vm;
279 	/*
280 	 * If our cache is coherent, then the map must be coherent too.
281 	 */
282 	if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
283 		map->_dm_flags |= _BUS_DMAMAP_COHERENT;
284 
285 	/* ...so _bus_dmamap_sync() knows we're bouncing */
286 	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
287 	return 0;
288 }
289 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
290 
291 static size_t
292 _bus_dmamap_mapsize(int const nsegments)
293 {
294 	KASSERT(nsegments > 0);
295 	return sizeof(struct mips_bus_dmamap) +
296 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
297 }
298 
299 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
300 static size_t
301 _bus_dmamap_cookiesize(int const nsegments)
302 {
303 	KASSERT(nsegments > 0);
304 	return sizeof(struct mips_bus_dma_cookie) +
305 	    (sizeof(bus_dma_segment_t) * nsegments);
306 }
307 #endif
308 
309 /*
310  * Common function for DMA map creation.  May be called by bus-specific
311  * DMA map creation functions.
312  */
313 int
314 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
315     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
316 {
317 	struct mips_bus_dmamap *map;
318 	void *mapstore;
319 	const int allocflags =
320 	    ((flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
321 
322 	int error = 0;
323 
324 	/*
325 	 * Allocate and initialize the DMA map.  The end of the map
326 	 * is a variable-sized array of segments, so we allocate enough
327 	 * room for them in one shot.
328 	 *
329 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
330 	 * of ALLOCNOW notifies others that we've reserved these resources,
331 	 * and they are not to be freed.
332 	 *
333 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
334 	 * the (nsegments - 1).
335 	 */
336 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
337 	     allocflags)) == NULL)
338 		return ENOMEM;
339 
340 	map = mapstore;
341 	map->_dm_size = size;
342 	map->_dm_segcnt = nsegments;
343 	map->_dm_maxmaxsegsz = maxsegsz;
344 	map->_dm_boundary = boundary;
345 	map->_dm_bounce_thresh = t->_bounce_thresh;
346 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
347 	map->_dm_vmspace = NULL;
348 	map->dm_maxsegsz = maxsegsz;
349 	map->dm_mapsize = 0;		/* no valid mappings */
350 	map->dm_nsegs = 0;
351 
352 	*dmamp = map;
353 
354 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
355 	struct mips_bus_dma_cookie *cookie;
356 	int cookieflags;
357 	void *cookiestore;
358 
359 	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh - 1)
360 		map->_dm_bounce_thresh = 0;
361 	cookieflags = 0;
362 
363 	if (t->_may_bounce != NULL) {
364 		error = (*t->_may_bounce)(t, map, flags, &cookieflags);
365 		if (error != 0)
366 			goto out;
367 	}
368 
369 	if (map->_dm_bounce_thresh != 0)
370 		cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
371 
372 	if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
373 		STAT_INCR(creates);
374 		return 0;
375 	}
376 
377 	/*
378 	 * Allocate our cookie.
379 	 */
380 	if ((cookiestore = kmem_zalloc(_bus_dmamap_cookiesize(nsegments),
381 		    allocflags)) == NULL) {
382 		error = ENOMEM;
383 		goto out;
384 	}
385 	cookie = (struct mips_bus_dma_cookie *)cookiestore;
386 	cookie->id_flags = cookieflags;
387 	map->_dm_cookie = cookie;
388 	STAT_INCR(bounced_creates);
389 
390 	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
391  out:
392 	if (error)
393 		_bus_dmamap_destroy(t, map);
394 #else
395 	STAT_INCR(creates);
396 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
397 
398 	return error;
399 }
400 
401 /*
402  * Common function for DMA map destruction.  May be called by bus-specific
403  * DMA map destruction functions.
404  */
405 void
406 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
407 {
408 
409 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
410 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
411 
412 	/*
413 	 * Free any bounce pages this map might hold.
414 	 */
415 	if (cookie != NULL) {
416 		if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
417 			STAT_INCR(bounced_unloads);
418 		map->dm_nsegs = 0;
419 		if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
420 			_bus_dma_free_bouncebuf(t, map);
421 		STAT_INCR(bounced_destroys);
422 		kmem_free(cookie, _bus_dmamap_cookiesize(map->_dm_segcnt));
423 	} else
424 #endif
425 	STAT_INCR(destroys);
426 	if (map->dm_nsegs > 0)
427 		STAT_INCR(unloads);
428 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
429 }
430 
431 /*
432  * Common function for loading a direct-mapped DMA map with a linear
433  * buffer.  Called by bus-specific DMA map load functions with the
434  * OR value appropriate for indicating "direct-mapped" for that
435  * chipset.
436  */
437 int
438 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
439     bus_size_t buflen, struct proc *p, int flags)
440 {
441 	int seg, error;
442 	struct vmspace *vm;
443 
444 	if (map->dm_nsegs > 0) {
445 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
446 		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
447 		if (cookie != NULL) {
448 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
449 				STAT_INCR(bounced_unloads);
450 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
451 			}
452 			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
453 		} else
454 #endif
455 		STAT_INCR(unloads);
456 	}
457 	/*
458 	 * Make sure that on error condition we return "no valid mappings".
459 	 */
460 	map->dm_mapsize = 0;
461 	map->dm_nsegs = 0;
462 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
463 
464 	if (buflen > map->_dm_size)
465 		return EINVAL;
466 
467 	if (p != NULL) {
468 		vm = p->p_vmspace;
469 	} else {
470 		vm = vmspace_kernel();
471 	}
472 
473 	seg = 0;
474 	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
475 	    vm, flags, &seg, 0, true);
476 	if (error == 0) {
477 		map->dm_mapsize = buflen;
478 		map->dm_nsegs = seg + 1;
479 		map->_dm_vmspace = vm;
480 
481 		STAT_INCR(loads);
482 
483 		/*
484 		 * For linear buffers, we support marking the mapping
485 		 * as COHERENT.
486 		 *
487 		 * XXX Check TLB entries for cache-inhibit bits?
488 		 */
489 		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
490 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
491 		else if (MIPS_KSEG1_P(buf))
492 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
493 #ifdef _LP64
494 		else if (MIPS_XKPHYS_P((vaddr_t)buf) &&
495 		    (MIPS_XKPHYS_TO_CCA((vaddr_t)buf) ==
496 			MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED)))
497 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
498 #endif
499 		return 0;
500 	}
501 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
502 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
503 	if (cookie != NULL &&
504 	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
505 		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
506 		    _BUS_DMA_BUFTYPE_LINEAR, flags);
507 	}
508 #endif
509 	return error;
510 }
511 
512 /*
513  * Like _bus_dmamap_load(), but for mbufs.
514  */
515 int
516 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
517     struct mbuf *m0, int flags)
518 {
519 	int seg, error;
520 	struct mbuf *m;
521 	struct vmspace *vm = vmspace_kernel();
522 
523 	if (map->dm_nsegs > 0) {
524 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
525 		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
526 		if (cookie != NULL) {
527 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
528 				STAT_INCR(bounced_unloads);
529 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
530 			}
531 			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
532 		} else
533 #endif
534 		STAT_INCR(unloads);
535 	}
536 
537 	/*
538 	 * Make sure that on error condition we return "no valid mappings."
539 	 */
540 	map->dm_mapsize = 0;
541 	map->dm_nsegs = 0;
542 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
543 
544 	KASSERT(m0->m_flags & M_PKTHDR);
545 
546 	if (m0->m_pkthdr.len > map->_dm_size)
547 		return EINVAL;
548 
549 	vaddr_t lastvaddr = 0;
550 	bool first = true;
551 	seg = 0;
552 	error = 0;
553 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
554 		if (m->m_len == 0)
555 			continue;
556 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
557 		    vm, flags, &seg, lastvaddr, first);
558 		first = false;
559 		lastvaddr = (vaddr_t)m->m_data + m->m_len;
560 	}
561 	if (error == 0) {
562 		map->dm_mapsize = m0->m_pkthdr.len;
563 		map->dm_nsegs = seg + 1;
564 		map->_dm_vmspace = vm;		/* always kernel */
565 		/*
566 		 * If our cache is coherent, then the map must be coherent too.
567 		 */
568 		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
569 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
570 		return 0;
571 	}
572 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
573 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
574 	if (cookie != NULL &&
575 	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
576 		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
577 		    _BUS_DMA_BUFTYPE_MBUF, flags);
578 	}
579 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
580 	return error;
581 }
582 
583 /*
584  * Like _bus_dmamap_load(), but for uios.
585  */
586 int
587 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
588     struct uio *uio, int flags)
589 {
590 	int seg, i, error;
591 	bus_size_t minlen, resid;
592 	struct iovec *iov;
593 	void *addr;
594 
595 	if (map->dm_nsegs > 0) {
596 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
597 		struct mips_bus_dma_cookie *const cookie = map->_dm_cookie;
598 		if (cookie != NULL) {
599 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
600 				STAT_INCR(bounced_unloads);
601 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
602 			}
603 			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
604 		} else
605 #endif
606 		STAT_INCR(unloads);
607 	}
608 	/*
609 	 * Make sure that on error condition we return "no valid mappings."
610 	 */
611 	map->dm_mapsize = 0;
612 	map->dm_nsegs = 0;
613 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
614 
615 	resid = uio->uio_resid;
616 	iov = uio->uio_iov;
617 
618 	vaddr_t lastvaddr = 0;
619 	bool first = true;
620 	seg = 0;
621 	error = 0;
622 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
623 		/*
624 		 * Now at the first iovec to load.  Load each iovec
625 		 * until we have exhausted the residual count.
626 		 */
627 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
628 		addr = (void *)iov[i].iov_base;
629 
630 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
631 		    uio->uio_vmspace, flags, &seg, lastvaddr, first);
632 		first = false;
633 		lastvaddr = (vaddr_t)addr + minlen;
634 
635 		resid -= minlen;
636 	}
637 	if (error == 0) {
638 		map->dm_mapsize = uio->uio_resid;
639 		map->dm_nsegs = seg + 1;
640 		map->_dm_vmspace = uio->uio_vmspace;
641 		/*
642 		 * If our cache is coherent, then the map must be coherent too.
643 		 */
644 		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
645 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
646 		return 0;
647 	}
648 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
649 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
650 	if (cookie != NULL &&
651 	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
652 		error = _bus_dma_load_bouncebuf(t, map, uio, uio->uio_resid,
653 		    _BUS_DMA_BUFTYPE_UIO, flags);
654 	}
655 #endif
656 	return error;
657 }
658 
659 /*
660  * Like _bus_dmamap_load(), but for raw memory.
661  */
662 int
663 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
664     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
665 {
666 
667 	struct vmspace *const vm = vmspace_kernel();
668 	const bool coherent_p =
669 	    (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
670 	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
671 	bus_size_t mapsize = 0;
672 	vaddr_t lastvaddr = 0;
673 	bool first = true;
674 	int curseg = 0;
675 	int error = 0;
676 
677 	for (; error == 0 && nsegs-- > 0; segs++) {
678 		void *kva;
679 #ifdef _LP64
680 		if (cached_p) {
681 			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
682 			    segs->ds_addr);
683 		} else {
684 			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
685 			    segs->ds_addr);
686 		}
687 #else
688 		if (segs->ds_addr >= MIPS_PHYS_MASK)
689 			return EFBIG;
690 		if (cached_p) {
691 			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
692 		} else {
693 			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
694 		}
695 #endif	/* _LP64 */
696 		mapsize += segs->ds_len;
697 		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
698 		    vm, flags, &curseg, lastvaddr, first);
699 		first = false;
700 		lastvaddr = (vaddr_t)kva + segs->ds_len;
701 	}
702 	if (error == 0) {
703 		map->dm_mapsize = mapsize;
704 		map->dm_nsegs = curseg + 1;
705 		map->_dm_vmspace = vm;		/* always kernel */
706 		/*
707 		 * If our cache is coherent, then the map must be coherent too.
708 		 */
709 		if (coherent_p)
710 			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
711 		return 0;
712 	}
713 	/*
714 	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
715 	 * that's a bug which we will not workaround.
716 	 */
717 	return error;
718 }
719 
720 /*
721  * Common function for unloading a DMA map.  May be called by
722  * chipset-specific DMA map unload functions.
723  */
724 void
725 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
726 {
727 	if (map->dm_nsegs > 0) {
728 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
729 		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
730 		if (cookie != NULL) {
731 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
732 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
733 				STAT_INCR(bounced_unloads);
734 			}
735 			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
736 		} else
737 #endif
738 
739 		STAT_INCR(unloads);
740 	}
741 	/*
742 	 * No resources to free; just mark the mappings as
743 	 * invalid.
744 	 */
745 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
746 	map->dm_mapsize = 0;
747 	map->dm_nsegs = 0;
748 	map->_dm_flags &= ~_BUS_DMAMAP_COHERENT;
749 }
750 
751 /*
752  * Common function for DMA map synchronization.  May be called
753  * by chipset-specific DMA map synchronization functions.
754  *
755  * This version works with the virtually-indexed, write-back cache
756  * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics.
757  */
758 void
759 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
760     bus_size_t len, int ops)
761 {
762 	bus_size_t minlen;
763 
764 #ifdef DIAGNOSTIC
765 	/*
766 	 * Mixing PRE and POST operations is not allowed.
767 	 */
768 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
769 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
770 		panic("_bus_dmamap_sync: mix PRE and POST");
771 
772 	if (offset >= map->dm_mapsize)
773 		panic("%s: bad offset 0x%jx >= 0x%jx", __func__,
774 		    (intmax_t)offset, (intmax_t)map->dm_mapsize);
775 	if ((offset + len) > map->dm_mapsize)
776 		panic("%s: bad length 0x%jx + 0x%jx > 0x%jx", __func__,
777 		    (intmax_t)offset, (intmax_t)len,
778 		    (intmax_t)map->dm_mapsize);
779 #endif
780 
781 	/*
782 	 * Since we're dealing with a virtually-indexed, write-back
783 	 * cache, we need to do the following things:
784 	 *
785 	 *	PREREAD -- Invalidate D-cache.  Note we might have
786 	 *	to also write-back here if we have to use an Index
787 	 *	op, or if the buffer start/end is not cache-line aligned.
788 	 *
789 	 *	PREWRITE -- Write-back the D-cache.  If we have to use
790 	 *	an Index op, we also have to invalidate.  Note that if
791 	 *	we are doing PREREAD|PREWRITE, we can collapse everything
792 	 *	into a single op.
793 	 *
794 	 *	POSTREAD -- Nothing.
795 	 *
796 	 *	POSTWRITE -- Nothing.
797 	 */
798 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
799 	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
800 	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
801 	    && (ops & BUS_DMASYNC_PREWRITE) && len != 0) {
802 		STAT_INCR(write_bounces);
803 		/*
804 		 * Copy the caller's buffer to the bounce buffer.
805 		 */
806 		switch (cookie->id_buftype) {
807 		case _BUS_DMA_BUFTYPE_LINEAR:
808 			memcpy((char *)cookie->id_bouncebuf + offset,
809 			    cookie->id_origlinearbuf + offset, len);
810 			break;
811 		case _BUS_DMA_BUFTYPE_MBUF:
812 			m_copydata(cookie->id_origmbuf, offset, len,
813 			    (char *)cookie->id_bouncebuf + offset);
814 			break;
815 		case _BUS_DMA_BUFTYPE_UIO:
816 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
817 			    cookie->id_origuio, len, UIO_WRITE);
818 			break;
819 #ifdef DIAGNOSTIC
820 		case _BUS_DMA_BUFTYPE_RAW:
821 			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
822 			break;
823 
824 		case _BUS_DMA_BUFTYPE_INVALID:
825 			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
826 			break;
827 
828 		default:
829 			panic("_bus_dmamap_sync: unknown buffer type %d\n",
830 			    cookie->id_buftype);
831 			break;
832 #endif /* DIAGNOSTIC */
833 		}
834 	}
835 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
836 
837 	/*
838 	 * Flush the write buffer.
839 	 * XXX Is this always necessary?
840 	 */
841 	wbflush();
842 
843 	/*
844 	 * If the mapping is of COHERENT DMA-safe memory or this isn't a
845 	 * PREREAD or PREWRITE, no cache flush is necessary.  Check to see
846 	 * if we need to bounce it.
847 	 */
848 	if ((map->_dm_flags & _BUS_DMAMAP_COHERENT) ||
849 	    (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
850 		goto bounce_it;
851 
852 #ifdef __mips_o32
853 	/*
854 	 * If the mapping belongs to the kernel, or it belongs
855 	 * to the currently-running process (XXX actually, vmspace),
856 	 * then we can use Hit ops.  Otherwise, Index ops.
857 	 *
858 	 * This should be true the vast majority of the time.
859 	 */
860 	const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
861 	    map->_dm_vmspace != curproc->p_vmspace);
862 #endif
863 
864 	bus_dma_segment_t *seg = map->dm_segs;
865 	bus_dma_segment_t *const lastseg = seg + map->dm_nsegs;
866 	/*
867 	 * Skip segments until offset are within a segment.
868 	 */
869 	for (; offset >= seg->ds_len; seg++) {
870 		offset -= seg->ds_len;
871 	}
872 
873 	for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) {
874 		/*
875 		 * Now at the first segment to sync; nail each segment until we
876 		 * have exhausted the length.
877 		 */
878 		register_t vaddr = seg->_ds_vaddr + offset;
879 		minlen = ulmin(len, seg->ds_len - offset);
880 
881 #ifdef BUS_DMA_DEBUG
882 		printf("bus_dmamap_sync(ops=%d: flushing segment %p "
883 		    "(0x%"PRIxREGISTER"+%"PRIxBUSADDR
884 		    ", 0x%"PRIxREGISTER"+0x%"PRIxBUSADDR
885 		    ") (olen = %"PRIxBUSADDR")...", ops, seg,
886 		    vaddr - offset, offset,
887 		    vaddr - offset, offset + minlen - 1, len);
888 #endif
889 
890 		/*
891 		 * If we are forced to use Index ops, it's always a
892 		 * Write-back,Invalidate, so just do one test.
893 		 */
894 #ifdef __mips_o32
895 		if (__predict_false(useindex || vaddr == 0)) {
896 			mips_dcache_wbinv_range_index(vaddr, minlen);
897 #ifdef BUS_DMA_DEBUG
898 			printf("\n");
899 #endif
900 			continue;
901 		}
902 #endif
903 
904 		switch (ops) {
905 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
906 			mips_dcache_wbinv_range(vaddr, minlen);
907 			break;
908 
909 		case BUS_DMASYNC_PREREAD: {
910 			struct mips_cache_info *const mci = &mips_cache_info;
911 			register_t start = vaddr;
912 			register_t end = vaddr + minlen;
913 			register_t preboundary, firstboundary, lastboundary;
914 			register_t mask = mci->mci_dcache_align_mask;
915 
916 			preboundary = start & ~mask;
917 			firstboundary = (start + mask) & ~mask;
918 			lastboundary = end & ~mask;
919 			if (preboundary < start && preboundary < lastboundary)
920 				mips_dcache_wbinv_range(preboundary,
921 				    mci->mci_dcache_align);
922 			if (firstboundary < lastboundary)
923 				mips_dcache_inv_range(firstboundary,
924 				    lastboundary - firstboundary);
925 			if (lastboundary < end)
926 				mips_dcache_wbinv_range(lastboundary,
927 				    mci->mci_dcache_align);
928 			break;
929 		}
930 
931 		case BUS_DMASYNC_PREWRITE:
932 			mips_dcache_wb_range(vaddr, minlen);
933 			break;
934 		}
935 #ifdef BUS_DMA_DEBUG
936 		printf("\n");
937 #endif
938 	}
939 
940   bounce_it:
941 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
942 	if ((ops & BUS_DMASYNC_POSTREAD) == 0 ||
943 	    cookie == NULL ||
944 	    (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0 ||
945 	    len == 0)
946 		return;
947 
948 	STAT_INCR(read_bounces);
949 	/*
950 	 * Copy the bounce buffer to the caller's buffer.
951 	 */
952 	switch (cookie->id_buftype) {
953 	case _BUS_DMA_BUFTYPE_LINEAR:
954 		memcpy(cookie->id_origlinearbuf + offset,
955 		    (char *)cookie->id_bouncebuf + offset, len);
956 		break;
957 
958 	case _BUS_DMA_BUFTYPE_MBUF:
959 		m_copyback(cookie->id_origmbuf, offset, len,
960 		    (char *)cookie->id_bouncebuf + offset);
961 		break;
962 
963 	case _BUS_DMA_BUFTYPE_UIO:
964 		_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
965 		    cookie->id_origuio, len, UIO_READ);
966 		break;
967 #ifdef DIAGNOSTIC
968 	case _BUS_DMA_BUFTYPE_RAW:
969 		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
970 		break;
971 
972 	case _BUS_DMA_BUFTYPE_INVALID:
973 		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
974 		break;
975 
976 	default:
977 		panic("_bus_dmamap_sync: unknown buffer type %d\n",
978 		    cookie->id_buftype);
979 		break;
980 #endif
981 	}
982 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
983 	return;
984 }
985 
986 /*
987  * Common function for DMA-safe memory allocation.  May be called
988  * by bus-specific DMA memory allocation functions.
989  */
990 int
991 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
992     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
993     int flags)
994 {
995 	bus_addr_t high;
996 
997 	if (t->_bounce_alloc_hi != 0 &&
998 	    _BUS_AVAIL_END > t->_bounce_alloc_hi - 1)
999 		high = t->_bounce_alloc_hi - 1;
1000 	else
1001 		high = _BUS_AVAIL_END;
1002 
1003 	return _bus_dmamem_alloc_range(t, size, alignment, boundary,
1004 	    segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high);
1005 }
1006 
1007 /*
1008  * Allocate physical memory from the given physical address range.
1009  * Called by DMA-safe memory allocation methods.
1010  */
1011 int
1012 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1013     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1014     int flags, paddr_t low, paddr_t high)
1015 {
1016 	paddr_t curaddr, lastaddr;
1017 	struct vm_page *m;
1018 	struct pglist mlist;
1019 	int curseg, error;
1020 
1021 	/* Always round the size. */
1022 	size = round_page(size);
1023 
1024 	/*
1025 	 * Allocate pages from the VM system.
1026 	 */
1027 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
1028 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1029 	if (error)
1030 		return error;
1031 
1032 	/*
1033 	 * Compute the location, size, and number of segments actually
1034 	 * returned by the VM code.
1035 	 */
1036 	m = TAILQ_FIRST(&mlist);
1037 	curseg = 0;
1038 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1039 	segs[curseg].ds_len = PAGE_SIZE;
1040 	m = TAILQ_NEXT(m, pageq.queue);
1041 
1042 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1043 		curaddr = VM_PAGE_TO_PHYS(m);
1044 #ifdef DIAGNOSTIC
1045 		if (curaddr < low || curaddr >= high) {
1046 			printf("uvm_pglistalloc returned non-sensical"
1047 			    " address 0x%"PRIxPADDR"\n", curaddr);
1048 			panic("_bus_dmamem_alloc");
1049 		}
1050 #endif
1051 		if (curaddr == (lastaddr + PAGE_SIZE))
1052 			segs[curseg].ds_len += PAGE_SIZE;
1053 		else {
1054 			curseg++;
1055 			segs[curseg].ds_addr = curaddr;
1056 			segs[curseg].ds_len = PAGE_SIZE;
1057 		}
1058 		lastaddr = curaddr;
1059 	}
1060 
1061 	*rsegs = curseg + 1;
1062 
1063 	return 0;
1064 }
1065 
1066 /*
1067  * Common function for freeing DMA-safe memory.  May be called by
1068  * bus-specific DMA memory free functions.
1069  */
1070 void
1071 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1072 {
1073 	struct vm_page *m;
1074 	bus_addr_t addr;
1075 	struct pglist mlist;
1076 	int curseg;
1077 
1078 	/*
1079 	 * Build a list of pages to free back to the VM system.
1080 	 */
1081 	TAILQ_INIT(&mlist);
1082 	for (curseg = 0; curseg < nsegs; curseg++) {
1083 		for (addr = segs[curseg].ds_addr;
1084 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1085 		    addr += PAGE_SIZE) {
1086 			m = PHYS_TO_VM_PAGE(addr);
1087 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1088 		}
1089 	}
1090 
1091 	uvm_pglistfree(&mlist);
1092 }
1093 
1094 /*
1095  * Common function for mapping DMA-safe memory.  May be called by
1096  * bus-specific DMA memory map functions.
1097  */
1098 int
1099 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1100     size_t size, void **kvap, int flags)
1101 {
1102 	vaddr_t va;
1103 	bus_addr_t addr;
1104 	int curseg;
1105 	const uvm_flag_t kmflags =
1106 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1107 
1108 	/*
1109 	 * If we're only mapping 1 segment, use K0SEG, to avoid
1110 	 * TLB thrashing.
1111 	 */
1112 #ifdef _LP64
1113 	if (nsegs == 1) {
1114 		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1115 			== 0) &&
1116 		    (flags & BUS_DMA_COHERENT))
1117 			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
1118 			    segs[0].ds_addr);
1119 		else
1120 			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
1121 			    segs[0].ds_addr);
1122 		return 0;
1123 	}
1124 #else
1125 	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
1126 		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1127 			== 0) &&
1128 		    (flags & BUS_DMA_COHERENT))
1129 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
1130 		else
1131 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
1132 		return 0;
1133 	}
1134 #endif	/* _LP64 */
1135 
1136 	size = round_page(size);
1137 
1138 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1139 
1140 	if (va == 0)
1141 		return ENOMEM;
1142 
1143 	*kvap = (void *)va;
1144 
1145 	for (curseg = 0; curseg < nsegs; curseg++) {
1146 		for (addr = trunc_page(segs[curseg].ds_addr);
1147 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1148 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1149 			if (size == 0)
1150 				panic("_bus_dmamem_map: size botch");
1151 			pmap_enter(pmap_kernel(), va, addr,
1152 			    VM_PROT_READ | VM_PROT_WRITE,
1153 			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
1154 		}
1155 	}
1156 	pmap_update(pmap_kernel());
1157 
1158 	return 0;
1159 }
1160 
1161 /*
1162  * Common function for unmapping DMA-safe memory.  May be called by
1163  * bus-specific DMA memory unmapping functions.
1164  */
1165 void
1166 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1167 {
1168 
1169 #ifdef DIAGNOSTIC
1170 	if ((uintptr_t)kva & PGOFSET)
1171 		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
1172 #endif
1173 
1174 	/*
1175 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
1176 	 * not in KSEG2 or XKSEG).
1177 	 */
1178 	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
1179 		return;
1180 #ifdef _LP64
1181 	if (MIPS_XKPHYS_P((vaddr_t)kva))
1182 		return;
1183 #endif
1184 
1185 	size = round_page(size);
1186 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
1187 	pmap_update(pmap_kernel());
1188 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1189 }
1190 
1191 /*
1192  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
1193  * bus-specific DMA mmap(2)'ing functions.
1194  */
1195 paddr_t
1196 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1197     off_t off, int prot, int flags)
1198 {
1199 	int i;
1200 	paddr_t pa;
1201 
1202 	for (i = 0; i < nsegs; i++) {
1203 #ifdef DIAGNOSTIC
1204 		if (off & PGOFSET)
1205 			panic("_bus_dmamem_mmap: offset unaligned");
1206 		if (segs[i].ds_addr & PGOFSET)
1207 			panic("_bus_dmamem_mmap: segment unaligned");
1208 		if (segs[i].ds_len & PGOFSET)
1209 			panic("_bus_dmamem_mmap: segment size not multiple"
1210 			    " of page size");
1211 #endif
1212 		if (off >= segs[i].ds_len) {
1213 			off -= segs[i].ds_len;
1214 			continue;
1215 		}
1216 
1217 		pa = (paddr_t)segs[i].ds_addr + off;
1218 
1219 /*
1220  * This is for machines which use normal RAM as video memory, so userland can
1221  * mmap() it and treat it like device memory, which is normally uncached.
1222  * Needed for X11 on SGI O2, will likely be needed on things like CI20.
1223  */
1224 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1225 		if (flags & BUS_DMA_PREFETCHABLE)
1226 			return mips_btop(pa | PGC_NOCACHE);
1227 		else
1228 			return mips_btop(pa);
1229 #else
1230 		return mips_btop(pa);
1231 #endif
1232 	}
1233 
1234 	/* Page not found. */
1235 	return -1;
1236 }
1237 
1238 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1239 static int
1240 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1241     bus_size_t size, int flags)
1242 {
1243 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
1244 	int error = 0;
1245 
1246 #ifdef DIAGNOSTIC
1247 	if (cookie == NULL)
1248 		panic("_bus_dma_alloc_bouncebuf: no cookie");
1249 #endif
1250 
1251 	cookie->id_bouncebuflen = round_page(size);
1252 	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1253 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1254 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1255 	if (error)
1256 		goto out;
1257 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1258 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1259 	    (void **)&cookie->id_bouncebuf, flags);
1260 
1261  out:
1262 	if (error) {
1263 		_bus_dmamem_free(t, cookie->id_bouncesegs,
1264 		    cookie->id_nbouncesegs);
1265 		cookie->id_bouncebuflen = 0;
1266 		cookie->id_nbouncesegs = 0;
1267 	} else {
1268 		cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1269 	}
1270 
1271 	return error;
1272 }
1273 
1274 static void
1275 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1276 {
1277 	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
1278 
1279 #ifdef DIAGNOSTIC
1280 	if (cookie == NULL)
1281 		panic("_bus_dma_alloc_bouncebuf: no cookie");
1282 #endif
1283 
1284 	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1285 	_bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1286 	cookie->id_bouncebuflen = 0;
1287 	cookie->id_nbouncesegs = 0;
1288 	cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1289 }
1290 
1291 /*
1292  * This function does the same as uiomove, but takes an explicit
1293  * direction, and does not update the uio structure.
1294  */
1295 static int
1296 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1297 {
1298 	struct iovec *iov;
1299 	int error;
1300 	struct vmspace *vm;
1301 	char *cp;
1302 	size_t resid, cnt;
1303 	int i;
1304 
1305 	iov = uio->uio_iov;
1306 	vm = uio->uio_vmspace;
1307 	cp = buf;
1308 	resid = n;
1309 
1310 	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1311 		iov = &uio->uio_iov[i];
1312 		if (iov->iov_len == 0)
1313 			continue;
1314 		cnt = MIN(resid, iov->iov_len);
1315 
1316 		if (!VMSPACE_IS_KERNEL_P(vm)) {
1317 			preempt_point();
1318 		}
1319 		if (direction == UIO_READ) {
1320 			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1321 		} else {
1322 			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1323 		}
1324 		if (error)
1325 			return error;
1326 		cp += cnt;
1327 		resid -= cnt;
1328 	}
1329 	return 0;
1330 }
1331 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
1332 
1333 int
1334 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1335     bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1336 {
1337 
1338 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1339 	if (((tag->_bounce_thresh != 0 &&
1340 		    max_addr >= tag->_bounce_thresh - 1 &&
1341 		    tag->_bounce_alloc_hi != 0 &&
1342 		    max_addr >= tag->_bounce_alloc_hi - 1) ||
1343 		(tag->_bounce_alloc_hi == 0 && max_addr > _BUS_AVAIL_END)) &&
1344 	    (min_addr <= tag->_bounce_alloc_lo)) {
1345 		*newtag = tag;
1346 		/* if the tag must be freed, add a reference */
1347 		if (tag->_tag_needs_free)
1348 			tag->_tag_needs_free++;
1349 		return 0;
1350 	}
1351 
1352 	if ((*newtag = kmem_alloc(sizeof(struct mips_bus_dma_tag),
1353 		    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1354 		return ENOMEM;
1355 
1356 	**newtag = *tag;
1357 	(*newtag)->_tag_needs_free = 1;
1358 
1359 	if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
1360 		(*newtag)->_bounce_thresh = max_addr;
1361 	if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
1362 		(*newtag)->_bounce_alloc_hi = max_addr;
1363 	if (min_addr > tag->_bounce_alloc_lo)
1364 		(*newtag)->_bounce_alloc_lo = min_addr;
1365 	(*newtag)->_wbase +=
1366 	    (*newtag)->_bounce_alloc_lo - tag->_bounce_alloc_lo;
1367 
1368 	return 0;
1369 #else
1370 	return EOPNOTSUPP;
1371 #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
1372 }
1373 
1374 void
1375 _bus_dmatag_destroy(bus_dma_tag_t tag)
1376 {
1377 #ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1378 	switch (tag->_tag_needs_free) {
1379 	case 0:
1380 		break;				/* not allocated with malloc */
1381 	case 1:
1382 		kmem_free(tag, sizeof(*tag));	/* last reference to tag */
1383 		break;
1384 	default:
1385 		tag->_tag_needs_free--;		/* one less reference */
1386 	}
1387 #endif
1388 }
1389