xref: /netbsd-src/sys/arch/sparc64/dev/iommu.c (revision 9fbd88883c38d0c0fbfcbe66d76fe6b0fab3f9de)
1 /*	$NetBSD: iommu.c,v 1.44 2001/10/17 18:43:04 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000 Matthew R. Green
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*-
32  * Copyright (c) 1998 The NetBSD Foundation, Inc.
33  * All rights reserved.
34  *
35  * This code is derived from software contributed to The NetBSD Foundation
36  * by Paul Kranenburg.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *        This product includes software developed by the NetBSD
49  *        Foundation, Inc. and its contributors.
50  * 4. Neither the name of The NetBSD Foundation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * Copyright (c) 1992, 1993
69  *	The Regents of the University of California.  All rights reserved.
70  *
71  * This software was developed by the Computer Systems Engineering group
72  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
73  * contributed to Berkeley.
74  *
75  * All advertising materials mentioning features or use of this software
76  * must display the following acknowledgement:
77  *	This product includes software developed by the University of
78  *	California, Lawrence Berkeley Laboratory.
79  *
80  * Redistribution and use in source and binary forms, with or without
81  * modification, are permitted provided that the following conditions
82  * are met:
83  * 1. Redistributions of source code must retain the above copyright
84  *    notice, this list of conditions and the following disclaimer.
85  * 2. Redistributions in binary form must reproduce the above copyright
86  *    notice, this list of conditions and the following disclaimer in the
87  *    documentation and/or other materials provided with the distribution.
88  * 3. All advertising materials mentioning features or use of this software
89  *    must display the following acknowledgement:
90  *	This product includes software developed by the University of
91  *	California, Berkeley and its contributors.
92  * 4. Neither the name of the University nor the names of its contributors
93  *    may be used to endorse or promote products derived from this software
94  *    without specific prior written permission.
95  *
96  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
97  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
98  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
99  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
100  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
101  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
102  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
103  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
104  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
105  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
106  * SUCH DAMAGE.
107  *
108  *	from: NetBSD: sbus.c,v 1.13 1999/05/23 07:24:02 mrg Exp
109  *	from: @(#)sbus.c	8.1 (Berkeley) 6/11/93
110  */
111 
112 /*
113  * UltraSPARC IOMMU support; used by both the sbus and pci code.
114  */
115 #include "opt_ddb.h"
116 
117 #include <sys/param.h>
118 #include <sys/extent.h>
119 #include <sys/malloc.h>
120 #include <sys/systm.h>
121 #include <sys/device.h>
122 #include <sys/proc.h>
123 
124 #include <uvm/uvm_extern.h>
125 
126 #include <machine/bus.h>
127 #include <sparc64/sparc64/cache.h>
128 #include <sparc64/dev/iommureg.h>
129 #include <sparc64/dev/iommuvar.h>
130 
131 #include <machine/autoconf.h>
132 #include <machine/cpu.h>
133 
134 #ifdef DEBUG
135 #define IDB_BUSDMA	0x1
136 #define IDB_IOMMU	0x2
137 #define IDB_INFO	0x4
138 #define	IDB_SYNC	0x8
139 int iommudebug = 0x0;
140 #define DPRINTF(l, s)   do { if (iommudebug & l) printf s; } while (0)
141 #else
142 #define DPRINTF(l, s)
143 #endif
144 
145 #define iommu_strbuf_flush(i,v) do {				\
146 	if ((i)->is_sb[0])					\
147 		bus_space_write_8((i)->is_bustag,		\
148 			(bus_space_handle_t)(u_long)		\
149 			&(i)->is_sb[0]->strbuf_pgflush,		\
150 			0, (v));				\
151 	if ((i)->is_sb[1])					\
152 		bus_space_write_8((i)->is_bustag,		\
153 			(bus_space_handle_t)(u_long)		\
154 			&(i)->is_sb[1]->strbuf_pgflush,		\
155 			0, (v));				\
156 	} while (0)
157 
158 static	int iommu_strbuf_flush_done __P((struct iommu_state *));
159 
160 /*
161  * initialise the UltraSPARC IOMMU (SBUS or PCI):
162  *	- allocate and setup the iotsb.
163  *	- enable the IOMMU
164  *	- initialise the streaming buffers (if they exist)
165  *	- create a private DVMA map.
166  */
167 void
168 iommu_init(name, is, tsbsize, iovabase)
169 	char *name;
170 	struct iommu_state *is;
171 	int tsbsize;
172 	u_int32_t iovabase;
173 {
174 	psize_t size;
175 	vaddr_t va;
176 	paddr_t pa;
177 	struct vm_page *m;
178 	struct pglist mlist;
179 
180 	/*
181 	 * Setup the iommu.
182 	 *
183 	 * The sun4u iommu is part of the SBUS or PCI controller so we
184 	 * will deal with it here..
185 	 *
186 	 * The IOMMU address space always ends at 0xffffe000, but the starting
187 	 * address depends on the size of the map.  The map size is 1024 * 2 ^
188 	 * is->is_tsbsize entries, where each entry is 8 bytes.  The start of
189 	 * the map can be calculated by (0xffffe000 << (8 + is->is_tsbsize)).
190 	 */
191 	is->is_cr = (tsbsize << 16) | IOMMUCR_EN;
192 	is->is_tsbsize = tsbsize;
193 	is->is_dvmabase = iovabase;
194 	if (iovabase == -1) is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
195 
196 	/*
197 	 * Allocate memory for I/O pagetables.  They need to be physically
198 	 * contiguous.
199 	 */
200 
201 	size = NBPG<<(is->is_tsbsize);
202 	TAILQ_INIT(&mlist);
203 	if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
204 		(paddr_t)NBPG, (paddr_t)0, &mlist, 1, 0) != 0)
205 		panic("iommu_init: no memory");
206 
207 	va = uvm_km_valloc(kernel_map, size);
208 	if (va == 0)
209 		panic("iommu_init: no memory");
210 	is->is_tsb = (int64_t *)va;
211 
212 	m = TAILQ_FIRST(&mlist);
213 	is->is_ptsb = VM_PAGE_TO_PHYS(m);
214 
215 	/* Map the pages */
216 	for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
217 		pa = VM_PAGE_TO_PHYS(m);
218 		pmap_enter(pmap_kernel(), va, pa | PMAP_NVC,
219 			VM_PROT_READ|VM_PROT_WRITE,
220 			VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
221 		va += NBPG;
222 	}
223 	pmap_update(pmap_kernel());
224 	bzero(is->is_tsb, size);
225 
226 #ifdef DEBUG
227 	if (iommudebug & IDB_INFO)
228 	{
229 		/* Probe the iommu */
230 		struct iommureg *regs = is->is_iommu;
231 
232 		printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n",
233 		    (u_long)&regs->iommu_cr,
234 		    (u_long)&regs->iommu_tsb,
235 		    (u_long)&regs->iommu_flush);
236 		printf("iommu cr=%llx tsb=%llx\n", (unsigned long long)regs->iommu_cr, (unsigned long long)regs->iommu_tsb);
237 		printf("TSB base %p phys %llx\n", (void *)is->is_tsb, (unsigned long long)is->is_ptsb);
238 		delay(1000000); /* 1 s */
239 	}
240 #endif
241 
242 	/*
243 	 * Initialize streaming buffer, if it is there.
244 	 */
245 	if (is->is_sb[0] || is->is_sb[1])
246 		(void)pmap_extract(pmap_kernel(), (vaddr_t)&is->is_flush[0],
247 		    (paddr_t *)&is->is_flushpa);
248 
249 	/*
250 	 * now actually start up the IOMMU
251 	 */
252 	iommu_reset(is);
253 
254 	/*
255 	 * Now all the hardware's working we need to allocate a dvma map.
256 	 */
257 	printf("DVMA map: %x to %x\n",
258 		(unsigned int)is->is_dvmabase,
259 		(unsigned int)(is->is_dvmabase+(size<<10)));
260 	is->is_dvmamap = extent_create(name,
261 				       is->is_dvmabase, (u_long)IOTSB_VEND,
262 				       M_DEVBUF, 0, 0, EX_NOWAIT);
263 }
264 
265 /*
266  * Streaming buffers don't exist on the UltraSPARC IIi; we should have
267  * detected that already and disabled them.  If not, we will notice that
268  * they aren't there when the STRBUF_EN bit does not remain.
269  */
270 void
271 iommu_reset(is)
272 	struct iommu_state *is;
273 {
274 
275 	/* Need to do 64-bit stores */
276 	bus_space_write_8(is->is_bustag,
277 		(bus_space_handle_t)(u_long)&is->is_iommu->iommu_tsb,
278 		0, is->is_ptsb);
279 	/* Enable IOMMU in diagnostic mode */
280 	bus_space_write_8(is->is_bustag,
281 		(bus_space_handle_t)(u_long)&is->is_iommu->iommu_cr,
282 		0, is->is_cr|IOMMUCR_DE);
283 
284 	if (is->is_sb[0]) {
285 
286 		/* Enable diagnostics mode? */
287 		bus_space_write_8(is->is_bustag,
288 			(bus_space_handle_t)(u_long)&is->is_sb[0]->strbuf_ctl,
289 			0, STRBUF_EN);
290 
291 		/* No streaming buffers? Disable them */
292 		if (bus_space_read_8(is->is_bustag,
293 			(bus_space_handle_t)(u_long)&is->is_sb[0]->strbuf_ctl,
294 			0) == 0)
295 		is->is_sb[0] = 0;
296 	}
297 
298 	if (is->is_sb[1]) {
299 
300 		/* Enable diagnostics mode? */
301 		bus_space_write_8(is->is_bustag,
302 			(bus_space_handle_t)(u_long)&is->is_sb[1]->strbuf_ctl,
303 			0, STRBUF_EN);
304 
305 		/* No streaming buffers? Disable them */
306 		if (bus_space_read_8(is->is_bustag,
307 			(bus_space_handle_t)(u_long)&is->is_sb[1]->strbuf_ctl,
308 			0) == 0)
309 		is->is_sb[1] = 0;
310 	}
311 }
312 
313 /*
314  * Here are the iommu control routines.
315  */
316 void
317 iommu_enter(is, va, pa, flags)
318 	struct iommu_state *is;
319 	vaddr_t va;
320 	int64_t pa;
321 	int flags;
322 {
323 	int64_t tte;
324 
325 #ifdef DIAGNOSTIC
326 	if (va < is->is_dvmabase)
327 		panic("iommu_enter: va %#lx not in DVMA space", va);
328 #endif
329 
330 	tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
331 			(flags&BUS_DMA_STREAMING));
332 
333 	/* Is the streamcache flush really needed? */
334 	if (is->is_sb[0] || is->is_sb[1]) {
335 		iommu_strbuf_flush(is, va);
336 		iommu_strbuf_flush_done(is);
337 	}
338 	DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n",
339 		       (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
340 	is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
341 	bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
342 			  &is->is_iommu->iommu_flush, 0, va);
343 	DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
344 		       va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
345 		       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
346 		       (u_long)tte));
347 }
348 
349 
350 /*
351  * Find the value of a DVMA address (debug routine).
352  */
353 paddr_t
354 iommu_extract(is, dva)
355 	struct iommu_state *is;
356 	vaddr_t dva;
357 {
358 	int64_t tte = 0;
359 
360 	if (dva >= is->is_dvmabase)
361 		tte = is->is_tsb[IOTSBSLOT(dva,is->is_tsbsize)];
362 
363 	if ((tte&IOTTE_V) == 0)
364 		return ((paddr_t)-1L);
365 	return (tte&IOTTE_PAMASK);
366 }
367 
368 /*
369  * iommu_remove: removes mappings created by iommu_enter
370  *
371  * Only demap from IOMMU if flag is set.
372  *
373  * XXX: this function needs better internal error checking.
374  */
375 void
376 iommu_remove(is, va, len)
377 	struct iommu_state *is;
378 	vaddr_t va;
379 	size_t len;
380 {
381 
382 #ifdef DIAGNOSTIC
383 	if (va < is->is_dvmabase)
384 		panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
385 	if ((long)(va + len) < (long)va)
386 		panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
387 		      (long) va, (long) len);
388 	if (len & ~0xfffffff)
389 		panic("iommu_remove: rediculous len 0x%lx", (u_long)len);
390 #endif
391 
392 	va = trunc_page(va);
393 	DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
394 	    va, (u_long)IOTSBSLOT(va,is->is_tsbsize),
395 	    &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]));
396 	while (len > 0) {
397 		DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d for va %p size %lx\n",
398 		    (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, (u_long)len));
399 		if (is->is_sb[0] || is->is_sb[0]) {
400 			DPRINTF(IDB_IOMMU, ("iommu_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
401 			       (void *)(u_long)va, (long)IOTSBSLOT(va,is->is_tsbsize),
402 			       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
403 			       (long)(is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
404 			       (u_long)len));
405 			iommu_strbuf_flush(is, va);
406 			if (len <= NBPG)
407 				iommu_strbuf_flush_done(is);
408 			DPRINTF(IDB_IOMMU, ("iommu_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
409 			       (void *)(u_long)va, (long)IOTSBSLOT(va,is->is_tsbsize),
410 			       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
411 			       (long)(is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
412 			       (u_long)len));
413 		}
414 
415 		if (len <= NBPG)
416 			len = 0;
417 		else
418 			len -= NBPG;
419 
420 		is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = 0;
421 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
422 				  &is->is_iommu->iommu_flush, 0, va);
423 		va += NBPG;
424 	}
425 }
426 
427 static int
428 iommu_strbuf_flush_done(is)
429 	struct iommu_state *is;
430 {
431 	struct timeval cur, flushtimeout;
432 
433 #define BUMPTIME(t, usec) { \
434 	register volatile struct timeval *tp = (t); \
435 	register long us; \
436  \
437 	tp->tv_usec = us = tp->tv_usec + (usec); \
438 	if (us >= 1000000) { \
439 		tp->tv_usec = us - 1000000; \
440 		tp->tv_sec++; \
441 	} \
442 }
443 
444 	if (!is->is_sb[0] && !is->is_sb[1])
445 		return (0);
446 
447 	/*
448 	 * Streaming buffer flushes:
449 	 *
450 	 *   1 Tell strbuf to flush by storing va to strbuf_pgflush.  If
451 	 *     we're not on a cache line boundary (64-bits):
452 	 *   2 Store 0 in flag
453 	 *   3 Store pointer to flag in flushsync
454 	 *   4 wait till flushsync becomes 0x1
455 	 *
456 	 * If it takes more than .5 sec, something
457 	 * went wrong.
458 	 */
459 
460 	is->is_flush[0] = 1;
461 	is->is_flush[1] = 1;
462 	if (is->is_sb[0]) {
463 		is->is_flush[0] = 0;
464 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
465 			&is->is_sb[0]->strbuf_flushsync, 0, is->is_flushpa);
466 	}
467 	if (is->is_sb[1]) {
468 		is->is_flush[0] = 1;
469 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
470 			&is->is_sb[1]->strbuf_flushsync, 0, is->is_flushpa + 8);
471 	}
472 
473 	microtime(&flushtimeout);
474 	cur = flushtimeout;
475 	BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
476 
477 	DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx,%lx "
478 		"at va = %lx pa = %lx now=%lx:%lx until = %lx:%lx\n",
479 		(long)is->is_flush[0], (long)is->is_flush[1],
480 		(long)&is->is_flush[0], (long)is->is_flushpa,
481 		cur.tv_sec, cur.tv_usec,
482 		flushtimeout.tv_sec, flushtimeout.tv_usec));
483 
484 	/* Bypass non-coherent D$ */
485 	while ((!ldxa(is->is_flushpa, ASI_PHYS_CACHED) ||
486 		!ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED)) &&
487 		((cur.tv_sec <= flushtimeout.tv_sec) &&
488 			(cur.tv_usec <= flushtimeout.tv_usec)))
489 		microtime(&cur);
490 
491 #ifdef DIAGNOSTIC
492 	if (!ldxa(is->is_flushpa, ASI_PHYS_CACHED) ||
493 	    !ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED)) {
494 		printf("iommu_strbuf_flush_done: flush timeout %p,%p at %p\n",
495 			(void *)(u_long)is->is_flush[0],
496 			(void *)(u_long)is->is_flush[1],
497 			(void *)(u_long)is->is_flushpa); /* panic? */
498 #ifdef DDB
499 		Debugger();
500 #endif
501 	}
502 #endif
503 	DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n"));
504 	return (is->is_flush[0] && is->is_flush[1]);
505 }
506 
507 /*
508  * IOMMU DVMA operations, common to SBUS and PCI.
509  */
510 int
511 iommu_dvmamap_load(t, is, map, buf, buflen, p, flags)
512 	bus_dma_tag_t t;
513 	struct iommu_state *is;
514 	bus_dmamap_t map;
515 	void *buf;
516 	bus_size_t buflen;
517 	struct proc *p;
518 	int flags;
519 {
520 	int s;
521 	int err;
522 	bus_size_t sgsize;
523 	paddr_t curaddr;
524 	u_long dvmaddr, sgstart, sgend;
525 	bus_size_t align, boundary;
526 	vaddr_t vaddr = (vaddr_t)buf;
527 	int seg;
528 	pmap_t pmap;
529 
530 	if (map->dm_nsegs) {
531 		/* Already in use?? */
532 #ifdef DIAGNOSTIC
533 		printf("iommu_dvmamap_load: map still in use\n");
534 #endif
535 		bus_dmamap_unload(t, map);
536 	}
537 	/*
538 	 * Make sure that on error condition we return "no valid mappings".
539 	 */
540 	map->dm_nsegs = 0;
541 
542 	if (buflen > map->_dm_size) {
543 		DPRINTF(IDB_BUSDMA,
544 		    ("iommu_dvmamap_load(): error %d > %d -- "
545 		     "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
546 		return (EINVAL);
547 	}
548 
549 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
550 
551 	/*
552 	 * A boundary presented to bus_dmamem_alloc() takes precedence
553 	 * over boundary in the map.
554 	 */
555 	if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
556 		boundary = map->_dm_boundary;
557 	align = max(map->dm_segs[0]._ds_align, NBPG);
558 	s = splhigh();
559 	/*
560 	 * If our segment size is larger than the boundary we need to
561 	 * split the transfer up int little pieces ourselves.
562 	 */
563 	err = extent_alloc(is->is_dvmamap, sgsize, align,
564 		(sgsize > boundary) ? 0 : boundary,
565 		EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dvmaddr);
566 	splx(s);
567 
568 #ifdef DEBUG
569 	if (err || (dvmaddr == (bus_addr_t)-1))
570 	{
571 		printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
572 		    (int)sgsize, flags);
573 #ifdef DDB
574 		Debugger();
575 #endif
576 	}
577 #endif
578 	if (err != 0)
579 		return (err);
580 
581 	if (dvmaddr == (bus_addr_t)-1)
582 		return (ENOMEM);
583 
584 	/* Set the active DVMA map */
585 	map->_dm_dvmastart = dvmaddr;
586 	map->_dm_dvmasize = sgsize;
587 
588 	/*
589 	 * Now split the DVMA range into segments, not crossing
590 	 * the boundary.
591 	 */
592 	seg = 0;
593 	sgstart = dvmaddr + (vaddr & PGOFSET);
594 	sgend = sgstart + buflen - 1;
595 	map->dm_segs[seg].ds_addr = sgstart;
596 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary-1 %lx "
597 		"~(boundary-1) %lx\n", boundary, (boundary-1), ~(boundary-1)));
598 	while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
599 		/* Oops.  We crossed a boundary.  Split the xfer. */
600 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
601 			"seg %d start %lx size %lx\n", seg,
602 			map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
603 		map->dm_segs[seg].ds_len = sgstart & (boundary - 1);
604 		if (++seg > map->_dm_segcnt) {
605 			/* Too many segments.  Fail the operation. */
606 			DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
607 				"too many segments %d\n", seg));
608 			s = splhigh();
609 			/* How can this fail?  And if it does what can we do? */
610 			err = extent_free(is->is_dvmamap,
611 				dvmaddr, sgsize, EX_NOWAIT);
612 			map->_dm_dvmastart = 0;
613 			map->_dm_dvmasize = 0;
614 			splx(s);
615 			return (E2BIG);
616 		}
617 		sgstart = roundup(sgstart, boundary);
618 		map->dm_segs[seg].ds_addr = sgstart;
619 	}
620 	map->dm_segs[seg].ds_len = sgend - sgstart + 1;
621 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
622 		"seg %d start %lx size %lx\n", seg,
623 		map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
624 	map->dm_nsegs = seg+1;
625 	map->dm_mapsize = buflen;
626 
627 	if (p != NULL)
628 		pmap = p->p_vmspace->vm_map.pmap;
629 	else
630 		pmap = pmap_kernel();
631 
632 	for (; buflen > 0; ) {
633 		/*
634 		 * Get the physical address for this page.
635 		 */
636 		if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
637 			bus_dmamap_unload(t, map);
638 			return (-1);
639 		}
640 
641 		/*
642 		 * Compute the segment size, and adjust counts.
643 		 */
644 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
645 		if (buflen < sgsize)
646 			sgsize = buflen;
647 
648 		DPRINTF(IDB_BUSDMA,
649 		    ("iommu_dvmamap_load: map %p loading va %p "
650 			    "dva %lx at pa %lx\n",
651 			    map, (void *)vaddr, (long)dvmaddr,
652 			    (long)(curaddr&~(NBPG-1))));
653 		iommu_enter(is, trunc_page(dvmaddr), trunc_page(curaddr),
654 		    flags);
655 
656 		dvmaddr += PAGE_SIZE;
657 		vaddr += sgsize;
658 		buflen -= sgsize;
659 	}
660 	return (0);
661 }
662 
663 
664 void
665 iommu_dvmamap_unload(t, is, map)
666 	bus_dma_tag_t t;
667 	struct iommu_state *is;
668 	bus_dmamap_t map;
669 {
670 	int error, s;
671 	bus_size_t sgsize;
672 
673 	/* Flush the iommu */
674 #ifdef DEBUG
675 	if (!map->_dm_dvmastart) {
676 		printf("iommu_dvmamap_unload: No dvmastart is zero\n");
677 #ifdef DDB
678 		Debugger();
679 #endif
680 	}
681 #endif
682 	iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
683 
684 	/* Flush the caches */
685 	bus_dmamap_unload(t->_parent, map);
686 
687 	/* Mark the mappings as invalid. */
688 	map->dm_mapsize = 0;
689 	map->dm_nsegs = 0;
690 
691 	s = splhigh();
692 	error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
693 		map->_dm_dvmasize, EX_NOWAIT);
694 	map->_dm_dvmastart = 0;
695 	map->_dm_dvmasize = 0;
696 	splx(s);
697 	if (error != 0)
698 		printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
699 
700 	/* Clear the map */
701 }
702 
703 
704 int
705 iommu_dvmamap_load_raw(t, is, map, segs, nsegs, flags, size)
706 	bus_dma_tag_t t;
707 	struct iommu_state *is;
708 	bus_dmamap_t map;
709 	bus_dma_segment_t *segs;
710 	int nsegs;
711 	int flags;
712 	bus_size_t size;
713 {
714 	struct vm_page *m;
715 	int i, j, s;
716 	int left;
717 	int err;
718 	bus_size_t sgsize;
719 	paddr_t pa;
720 	bus_size_t boundary, align;
721 	u_long dvmaddr, sgstart, sgend;
722 	struct pglist *mlist;
723 	int pagesz = PAGE_SIZE;
724 
725 	if (map->dm_nsegs) {
726 		/* Already in use?? */
727 #ifdef DIAGNOSTIC
728 		printf("iommu_dvmamap_load_raw: map still in use\n");
729 #endif
730 		bus_dmamap_unload(t, map);
731 	}
732 
733 	/*
734 	 * A boundary presented to bus_dmamem_alloc() takes precedence
735 	 * over boundary in the map.
736 	 */
737 	if ((boundary = segs[0]._ds_boundary) == 0)
738 		boundary = map->_dm_boundary;
739 
740 	align = max(segs[0]._ds_align, NBPG);
741 
742 	/*
743 	 * Make sure that on error condition we return "no valid mappings".
744 	 */
745 	map->dm_nsegs = 0;
746 	/* Count up the total number of pages we need */
747 	pa = segs[0].ds_addr;
748 	sgsize = 0;
749 	left = size;
750 	for (i=0; left && i<nsegs; i++) {
751 		if (round_page(pa) != round_page(segs[i].ds_addr))
752 			sgsize = round_page(sgsize);
753 		sgsize += min(left, segs[i].ds_len);
754 		left -= segs[i].ds_len;
755 		pa = segs[i].ds_addr + segs[i].ds_len;
756 	}
757 	sgsize = round_page(sgsize);
758 
759 	s = splhigh();
760 	/*
761 	 * If our segment size is larger than the boundary we need to
762 	 * split the transfer up int little pieces ourselves.
763 	 */
764 	err = extent_alloc(is->is_dvmamap, sgsize, align,
765 		(sgsize > boundary) ? 0 : boundary,
766 		((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
767 		EX_BOUNDZERO, (u_long *)&dvmaddr);
768 	splx(s);
769 
770 	if (err != 0)
771 		return (err);
772 
773 #ifdef DEBUG
774 	if (dvmaddr == (bus_addr_t)-1)
775 	{
776 		printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n",
777 		    (int)sgsize, flags);
778 		Debugger();
779 	}
780 #endif
781 	if (dvmaddr == (bus_addr_t)-1)
782 		return (ENOMEM);
783 
784 	/* Set the active DVMA map */
785 	map->_dm_dvmastart = dvmaddr;
786 	map->_dm_dvmasize = sgsize;
787 
788 	if ((mlist = segs[0]._ds_mlist) == NULL) {
789 		u_long prev_va = NULL;
790 		/*
791 		 * This segs is made up of individual physical pages,
792 		 * probably by _bus_dmamap_load_uio() or
793 		 * _bus_dmamap_load_mbuf().  Ignore the mlist and
794 		 * load each segment individually.
795 		 */
796 		map->dm_mapsize = size;
797 
798 		i = j = 0;
799 		pa = segs[i].ds_addr;
800 		dvmaddr += (pa & PGOFSET);
801 		left = min(size, segs[i].ds_len);
802 
803 		sgstart = dvmaddr;
804 		sgend = sgstart + left - 1;
805 
806 		map->dm_segs[j].ds_addr = dvmaddr;
807 		map->dm_segs[j].ds_len = left;
808 
809 		/* Set the size (which we will be destroying */
810 		map->dm_mapsize = size;
811 
812 		while (size > 0) {
813 			int incr;
814 
815 			if (left <= 0) {
816 				u_long offset;
817 
818 				/*
819 				 * If the two segs are on different physical
820 				 * pages move to a new virtual page.
821 				 */
822 				if (trunc_page(pa) !=
823 					trunc_page(segs[++i].ds_addr))
824 					dvmaddr += NBPG;
825 
826 				pa = segs[i].ds_addr;
827 				left = min(size, segs[i].ds_len);
828 
829 				offset = (pa & PGOFSET);
830 				if (dvmaddr == trunc_page(dvmaddr) + offset) {
831 					/* We can combine segments */
832 					map->dm_segs[j].ds_len += left;
833 					sgend += left;
834 				} else {
835 					/* Need a new segment */
836 					dvmaddr = trunc_page(dvmaddr) + offset;
837 					DPRINTF(IDB_INFO,
838 						("iommu_dvmamap_load_raw: "
839 							"seg %d start %lx "
840 							"size %lx\n", j,
841 							map->dm_segs[j].ds_addr,
842 							map->dm_segs[j].
843 							ds_len));
844 					if (++j > map->_dm_segcnt)
845 						goto fail;
846 					map->dm_segs[j].ds_addr = dvmaddr;
847 					map->dm_segs[j].ds_len = left;
848 
849 					sgstart = dvmaddr;
850 					sgend = sgstart + left - 1;
851 				}
852 
853 			}
854 
855 			/* Check for boundary issues */
856 			while ((sgstart & ~(boundary - 1)) !=
857 				(sgend & ~(boundary - 1))) {
858 				/* Need a new segment. */
859 				map->dm_segs[j].ds_len =
860 					sgstart & (boundary - 1);
861 				DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
862 					"seg %d start %lx size %lx\n", j,
863 					map->dm_segs[j].ds_addr,
864 					map->dm_segs[j].ds_len));
865 				if (++j > map->_dm_segcnt) {
866 fail:
867 					iommu_dvmamap_unload(t, is, map);
868 					return (E2BIG);
869 				}
870 				sgstart = roundup(sgstart, boundary);
871 				map->dm_segs[j].ds_addr = sgstart;
872 				map->dm_segs[j].ds_len = sgend - sgstart + 1;
873 			}
874 
875 			if (sgsize == 0)
876 				panic("iommu_dmamap_load_raw: size botch");
877 
878 			DPRINTF(IDB_BUSDMA,
879 				("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
880 					map, (long)dvmaddr, (long)(pa)));
881 			/* Enter it if we haven't before. */
882 			if (prev_va != trunc_page(dvmaddr))
883 				iommu_enter(is, prev_va = trunc_page(dvmaddr),
884 					trunc_page(pa), flags);
885 			incr = min(pagesz, left);
886 			dvmaddr += incr;
887 			pa += incr;
888 			left -= incr;
889 			size -= incr;
890 		}
891 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
892 			"seg %d start %lx size %lx\n", j,
893 			map->dm_segs[j].ds_addr, map->dm_segs[j].ds_len));
894 		map->dm_nsegs = j+1;
895 		return (0);
896 	}
897 	/*
898 	 * This was allocated with bus_dmamem_alloc.
899 	 * The pages are on an `mlist'.
900 	 */
901 	map->dm_mapsize = size;
902 	i = 0;
903 	sgstart = dvmaddr;
904 	sgend = sgstart + size - 1;
905 	map->dm_segs[i].ds_addr = sgstart;
906 	while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
907 		/* Oops.  We crossed a boundary.  Split the xfer. */
908 		map->dm_segs[i].ds_len = sgstart & (boundary - 1);
909 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
910 			"seg %d start %lx size %lx\n", i,
911 			map->dm_segs[i].ds_addr,
912 			map->dm_segs[i].ds_len));
913 		if (++i > map->_dm_segcnt) {
914 			/* Too many segments.  Fail the operation. */
915 			s = splhigh();
916 			/* How can this fail?  And if it does what can we do? */
917 			err = extent_free(is->is_dvmamap,
918 				dvmaddr, sgsize, EX_NOWAIT);
919 			map->_dm_dvmastart = 0;
920 			map->_dm_dvmasize = 0;
921 			splx(s);
922 			return (E2BIG);
923 		}
924 		sgstart = roundup(sgstart, boundary);
925 		map->dm_segs[i].ds_addr = sgstart;
926 	}
927 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
928 			"seg %d start %lx size %lx\n", i,
929 			map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len));
930 	map->dm_segs[i].ds_len = sgend - sgstart + 1;
931 
932 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
933 		if (sgsize == 0)
934 			panic("iommu_dmamap_load_raw: size botch");
935 		pa = VM_PAGE_TO_PHYS(m);
936 
937 		DPRINTF(IDB_BUSDMA,
938 		    ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
939 		    map, (long)dvmaddr, (long)(pa)));
940 		iommu_enter(is, dvmaddr, pa, flags);
941 
942 		dvmaddr += pagesz;
943 		sgsize -= pagesz;
944 	}
945 	map->dm_mapsize = size;
946 	map->dm_nsegs = i+1;
947 	return (0);
948 }
949 
950 void
951 iommu_dvmamap_sync(t, is, map, offset, len, ops)
952 	bus_dma_tag_t t;
953 	struct iommu_state *is;
954 	bus_dmamap_t map;
955 	bus_addr_t offset;
956 	bus_size_t len;
957 	int ops;
958 {
959 	vaddr_t va = map->dm_segs[0].ds_addr + offset;
960 
961 	/*
962 	 * We only support one DMA segment; supporting more makes this code
963          * too unweildy.
964 	 */
965 
966 	if (ops & BUS_DMASYNC_PREREAD) {
967 		DPRINTF(IDB_SYNC,
968 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
969 		     "BUS_DMASYNC_PREREAD\n", (void *)(u_long)va, (u_long)len));
970 
971 		/* Nothing to do */;
972 	}
973 	if (ops & BUS_DMASYNC_POSTREAD) {
974 		DPRINTF(IDB_SYNC,
975 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
976 		     "BUS_DMASYNC_POSTREAD\n", (void *)(u_long)va, (u_long)len));
977 		/* if we have a streaming buffer, flush it here first */
978 		if (is->is_sb[0] || is->is_sb[1])
979 			while (len > 0) {
980 				DPRINTF(IDB_BUSDMA,
981 				    ("iommu_dvmamap_sync: flushing va %p, %lu "
982 				     "bytes left\n", (void *)(u_long)va, (u_long)len));
983 				iommu_strbuf_flush(is, va);
984 				if (len <= NBPG) {
985 					iommu_strbuf_flush_done(is);
986 					len = 0;
987 				} else
988 					len -= NBPG;
989 				va += NBPG;
990 			}
991 	}
992 	if (ops & BUS_DMASYNC_PREWRITE) {
993 		DPRINTF(IDB_SYNC,
994 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
995 		     "BUS_DMASYNC_PREWRITE\n", (void *)(u_long)va, (u_long)len));
996 		/* if we have a streaming buffer, flush it here first */
997 		if (is->is_sb[0] || is->is_sb[1])
998 			while (len > 0) {
999 				DPRINTF(IDB_BUSDMA,
1000 				    ("iommu_dvmamap_sync: flushing va %p, %lu "
1001 				     "bytes left\n", (void *)(u_long)va, (u_long)len));
1002 				iommu_strbuf_flush(is, va);
1003 				if (len <= NBPG) {
1004 					iommu_strbuf_flush_done(is);
1005 					len = 0;
1006 				} else
1007 					len -= NBPG;
1008 				va += NBPG;
1009 			}
1010 	}
1011 	if (ops & BUS_DMASYNC_POSTWRITE) {
1012 		DPRINTF(IDB_SYNC,
1013 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
1014 		     "BUS_DMASYNC_POSTWRITE\n", (void *)(u_long)va, (u_long)len));
1015 		/* Nothing to do */;
1016 	}
1017 }
1018 
1019 int
1020 iommu_dvmamem_alloc(t, is, size, alignment, boundary, segs, nsegs, rsegs, flags)
1021 	bus_dma_tag_t t;
1022 	struct iommu_state *is;
1023 	bus_size_t size, alignment, boundary;
1024 	bus_dma_segment_t *segs;
1025 	int nsegs;
1026 	int *rsegs;
1027 	int flags;
1028 {
1029 
1030 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx "
1031 	   "segp %p flags %d\n", (unsigned long long)size,
1032 	   (unsigned long long)alignment, (unsigned long long)boundary,
1033 	   segs, flags));
1034 	return (bus_dmamem_alloc(t->_parent, size, alignment, boundary,
1035 	    segs, nsegs, rsegs, flags|BUS_DMA_DVMA));
1036 }
1037 
1038 void
1039 iommu_dvmamem_free(t, is, segs, nsegs)
1040 	bus_dma_tag_t t;
1041 	struct iommu_state *is;
1042 	bus_dma_segment_t *segs;
1043 	int nsegs;
1044 {
1045 
1046 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n",
1047 	    segs, nsegs));
1048 	bus_dmamem_free(t->_parent, segs, nsegs);
1049 }
1050 
1051 /*
1052  * Map the DVMA mappings into the kernel pmap.
1053  * Check the flags to see whether we're streaming or coherent.
1054  */
1055 int
1056 iommu_dvmamem_map(t, is, segs, nsegs, size, kvap, flags)
1057 	bus_dma_tag_t t;
1058 	struct iommu_state *is;
1059 	bus_dma_segment_t *segs;
1060 	int nsegs;
1061 	size_t size;
1062 	caddr_t *kvap;
1063 	int flags;
1064 {
1065 	struct vm_page *m;
1066 	vaddr_t va;
1067 	bus_addr_t addr;
1068 	struct pglist *mlist;
1069 	int cbit;
1070 
1071 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
1072 	    segs, nsegs, size));
1073 
1074 	/*
1075 	 * Allocate some space in the kernel map, and then map these pages
1076 	 * into this space.
1077 	 */
1078 	size = round_page(size);
1079 	va = uvm_km_valloc(kernel_map, size);
1080 	if (va == 0)
1081 		return (ENOMEM);
1082 
1083 	*kvap = (caddr_t)va;
1084 
1085 	/*
1086 	 * digest flags:
1087 	 */
1088 	cbit = 0;
1089 	if (flags & BUS_DMA_COHERENT)	/* Disable vcache */
1090 		cbit |= PMAP_NVC;
1091 	if (flags & BUS_DMA_NOCACHE)	/* sideffects */
1092 		cbit |= PMAP_NC;
1093 
1094 	/*
1095 	 * Now take this and map it into the CPU.
1096 	 */
1097 	mlist = segs[0]._ds_mlist;
1098 	for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1099 #ifdef DIAGNOSTIC
1100 		if (size == 0)
1101 			panic("iommu_dvmamem_map: size botch");
1102 #endif
1103 		addr = VM_PAGE_TO_PHYS(m);
1104 		DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
1105 		    "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit));
1106 		pmap_enter(pmap_kernel(), va, addr | cbit,
1107 		    VM_PROT_READ | VM_PROT_WRITE,
1108 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
1109 		va += PAGE_SIZE;
1110 		size -= PAGE_SIZE;
1111 	}
1112 	pmap_update(pmap_kernel());
1113 
1114 	return (0);
1115 }
1116 
1117 /*
1118  * Unmap DVMA mappings from kernel
1119  */
1120 void
1121 iommu_dvmamem_unmap(t, is, kva, size)
1122 	bus_dma_tag_t t;
1123 	struct iommu_state *is;
1124 	caddr_t kva;
1125 	size_t size;
1126 {
1127 
1128 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
1129 	    kva, size));
1130 
1131 #ifdef DIAGNOSTIC
1132 	if ((u_long)kva & PGOFSET)
1133 		panic("iommu_dvmamem_unmap");
1134 #endif
1135 
1136 	size = round_page(size);
1137 	pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1138 	pmap_update(pmap_kernel());
1139 #if 0
1140 	/*
1141 	 * XXX ? is this necessary? i think so and i think other
1142 	 * implementations are missing it.
1143 	 */
1144 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
1145 #endif
1146 }
1147