xref: /netbsd-src/sys/arch/hppa/hppa/mainbus.c (revision a8c74629f602faa0ccf8a463757d7baf858bbf3a)
1 /*	$NetBSD: mainbus.c,v 1.5 2020/10/16 17:50:44 macallan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*	$OpenBSD: mainbus.c,v 1.74 2009/04/20 00:42:06 oga Exp $	*/
33 
34 /*
35  * Copyright (c) 1998-2004 Michael Shalayeff
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57  * THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.5 2020/10/16 17:50:44 macallan Exp $");
62 
63 #include "locators.h"
64 #include "power.h"
65 #include "lcd.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/device.h>
70 #include <sys/reboot.h>
71 #include <sys/extent.h>
72 #include <sys/mbuf.h>
73 #include <sys/proc.h>
74 
75 #include <uvm/uvm_page.h>
76 #include <uvm/uvm.h>
77 
78 #include <machine/pdc.h>
79 #include <machine/iomod.h>
80 #include <machine/autoconf.h>
81 
82 #include <hppa/hppa/machdep.h>
83 #include <hppa/dev/cpudevs.h>
84 
85 #if NLCD > 0
86 static struct pdc_chassis_info pdc_chassis_info;
87 #endif
88 
89 #ifdef MBUSDEBUG
90 
91 #define	DPRINTF(s)	do {	\
92 	if (mbusdebug)		\
93 		printf s;	\
94 } while(0)
95 
96 int mbusdebug = 1;
97 #else
98 #define	DPRINTF(s)	/* */
99 #endif
100 
101 struct mainbus_softc {
102 	device_t sc_dv;
103 };
104 
105 int	mbmatch(device_t, cfdata_t, void *);
106 void	mbattach(device_t, device_t, void *);
107 
108 CFATTACH_DECL_NEW(mainbus, sizeof(struct mainbus_softc),
109     mbmatch, mbattach, NULL, NULL);
110 
111 extern struct cfdriver mainbus_cd;
112 
113 static int mb_attached;
114 
115 /* from machdep.c */
116 extern struct extent *hppa_io_extent;
117 
118 uint8_t mbus_r1(void *, bus_space_handle_t, bus_size_t);
119 uint16_t mbus_r2(void *, bus_space_handle_t, bus_size_t);
120 uint32_t mbus_r4(void *, bus_space_handle_t, bus_size_t);
121 uint64_t mbus_r8(void *, bus_space_handle_t, bus_size_t);
122 void mbus_w1(void *, bus_space_handle_t, bus_size_t, uint8_t);
123 void mbus_w2(void *, bus_space_handle_t, bus_size_t, uint16_t);
124 void mbus_w4(void *, bus_space_handle_t, bus_size_t, uint32_t);
125 void mbus_w8(void *, bus_space_handle_t, bus_size_t, uint64_t);
126 void mbus_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
127 void mbus_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
128 void mbus_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
129 void mbus_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
130 void mbus_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
131 void mbus_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
132 void mbus_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
133 void mbus_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
134 void mbus_rr_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
135 void mbus_rr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
136 void mbus_rr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
137 void mbus_rr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
138 void mbus_wr_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
139 void mbus_wr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
140 void mbus_wr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
141 void mbus_wr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
142 void mbus_sm_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
143 void mbus_sm_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
144 void mbus_sm_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
145 void mbus_sm_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
146 void mbus_sr_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
147 void mbus_sr_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
148 void mbus_sr_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
149 void mbus_sr_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
150 void mbus_cp_1(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
151 void mbus_cp_2(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
152 void mbus_cp_4(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
153 void mbus_cp_8(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
154 
155 int mbus_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *);
156 int mbus_map(void *, bus_addr_t, bus_size_t, int, bus_space_handle_t *);
157 void mbus_unmap(void *, bus_space_handle_t, bus_size_t);
158 int mbus_alloc(void *, bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
159 void mbus_free(void *, bus_space_handle_t, bus_size_t);
160 int mbus_subregion(void *, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *);
161 void mbus_barrier(void *, bus_space_handle_t, bus_size_t, bus_size_t, int);
162 void *mbus_vaddr(void *, bus_space_handle_t);
163 paddr_t mbus_mmap(void *, bus_addr_t, off_t, int, int);
164 
165 int mbus_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
166 void mbus_dmamap_destroy(void *, bus_dmamap_t);
167 int mbus_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *, int);
168 int mbus_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
169 int mbus_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
170 int mbus_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int);
171 void mbus_dmamap_unload(void *, bus_dmamap_t);
172 void mbus_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
173 int mbus_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int);
174 void mbus_dmamem_free(void *, bus_dma_segment_t *, int);
175 int mbus_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
176 void mbus_dmamem_unmap(void *, void *, size_t);
177 paddr_t mbus_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
178 int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
179     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
180     int *segp, int first);
181 
182 extern struct pdc_btlb pdc_btlb;
183 static uint32_t bmm[HPPA_FLEX_COUNT/32];
184 
185 int
186 mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
187     bus_space_handle_t *bshp)
188 {
189 	vaddr_t pa, spa, epa;
190 	int flex;
191 
192 	DPRINTF(("\n%s(%lx,%lx,%scachable,%p)\n", __func__,
193 	    bpa, size, flags? "" : "non", bshp));
194 
195 	KASSERT(bpa >= HPPA_IOSPACE);
196 	KASSERT(!(flags & BUS_SPACE_MAP_CACHEABLE));
197 
198 	/*
199 	 * Mappings are established in HPPA_FLEX_SIZE units,
200 	 * either with BTLB, or regular mappings of the whole area.
201 	 */
202 	for (pa = bpa ; size != 0; pa = epa) {
203 		flex = HPPA_FLEX(pa);
204 		spa = pa & HPPA_FLEX_MASK;
205 		epa = spa + HPPA_FLEX_SIZE; /* may wrap to 0... */
206 
207 		size -= uimin(size, HPPA_FLEX_SIZE - (pa - spa));
208 
209 		/* do need a new mapping? */
210 		if (bmm[flex / 32] & (1 << (flex % 32))) {
211 			DPRINTF(("%s: already mapped flex=%x, mask=%x\n",
212 			    __func__, flex, bmm[flex / 32]));
213 			continue;
214 		}
215 
216 		DPRINTF(("%s: adding flex=%x %lx-%lx, ", __func__, flex, spa,
217 		    epa - 1));
218 
219 		bmm[flex / 32] |= (1 << (flex % 32));
220 
221 		while (spa != epa) {
222 			DPRINTF(("%s: kenter 0x%lx-0x%lx", __func__, spa,
223 			    epa));
224 			for (; spa != epa; spa += PAGE_SIZE)
225 				pmap_kenter_pa(spa, spa,
226 				    VM_PROT_READ | VM_PROT_WRITE, 0);
227 		}
228 	}
229 
230 	*bshp = bpa;
231 
232 	/* Success. */
233 	return 0;
234 }
235 
236 int
237 mbus_map(void *v, bus_addr_t bpa, bus_size_t size, int flags,
238     bus_space_handle_t *bshp)
239 {
240 	int error;
241 
242 	/*
243 	 * We must only be called with addresses in I/O space.
244 	 */
245 	KASSERT(bpa >= HPPA_IOSPACE);
246 
247 	/*
248 	 * Allocate the region of I/O space.
249 	 */
250 	error = extent_alloc_region(hppa_io_extent, bpa, size, EX_NOWAIT);
251 	if (error)
252 		return error;
253 
254 	/*
255 	 * Map the region of I/O space.
256 	 */
257 	error = mbus_add_mapping(bpa, size, flags, bshp);
258 	if (error) {
259 		DPRINTF(("bus_space_map: pa 0x%lx, size 0x%lx failed\n",
260 		    bpa, size));
261 		if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
262 			printf ("bus_space_map: can't free region\n");
263 		}
264 	}
265 
266 	return error;
267 }
268 
269 void
270 mbus_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
271 {
272 	bus_addr_t bpa = bsh;
273 	int error;
274 
275 	/*
276 	 * Free the region of I/O space.
277 	 */
278 	error = extent_free(hppa_io_extent, bpa, size, EX_NOWAIT);
279 	if (error) {
280 		DPRINTF(("bus_space_unmap: ps 0x%lx, size 0x%lx\n",
281 		    bpa, size));
282 		panic("bus_space_unmap: can't free region (%d)", error);
283 	}
284 }
285 
286 int
287 mbus_alloc(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
288     bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp,
289     bus_space_handle_t *bshp)
290 {
291 	bus_addr_t bpa;
292 	int error;
293 
294 	if (rstart < hppa_io_extent->ex_start ||
295 	    rend > hppa_io_extent->ex_end)
296 		panic("bus_space_alloc: bad region start/end");
297 
298 	/*
299 	 * Allocate the region of I/O space.
300 	 */
301 	error = extent_alloc_subregion1(hppa_io_extent, rstart, rend, size,
302 	    align, 0, boundary, EX_NOWAIT, &bpa);
303 	if (error)
304 		return error;
305 
306 	/*
307 	 * Map the region of I/O space.
308 	 */
309 	error = mbus_add_mapping(bpa, size, flags, bshp);
310 	if (error) {
311 		DPRINTF(("bus_space_alloc: pa 0x%lx, size 0x%lx failed\n",
312 		    bpa, size));
313 		if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
314 			printf("bus_space_alloc: can't free region\n");
315 		}
316 	}
317 
318 	*addrp = bpa;
319 
320 	return error;
321 }
322 
323 void
324 mbus_free(void *v, bus_space_handle_t h, bus_size_t size)
325 {
326 	/* bus_space_unmap() does all that we need to do. */
327 	mbus_unmap(v, h, size);
328 }
329 
330 int
331 mbus_subregion(void *v, bus_space_handle_t bsh, bus_size_t offset,
332     bus_size_t size, bus_space_handle_t *nbshp)
333 {
334 	*nbshp = bsh + offset;
335 	return(0);
336 }
337 
338 void
339 mbus_barrier(void *v, bus_space_handle_t h, bus_size_t o, bus_size_t l, int op)
340 {
341 	sync_caches();
342 }
343 
344 void*
345 mbus_vaddr(void *v, bus_space_handle_t h)
346 {
347 	/*
348 	 * We must only be called with addresses in I/O space.
349 	 */
350 	KASSERT(h >= HPPA_IOSPACE);
351 	return (void*)h;
352 }
353 
354 paddr_t
355 mbus_mmap(void *v, bus_addr_t addr, off_t off, int prot, int flags)
356 {
357 	return btop(addr + off);
358 }
359 
360 uint8_t
361 mbus_r1(void *v, bus_space_handle_t h, bus_size_t o)
362 {
363 	return *((volatile uint8_t *)(h + o));
364 }
365 
366 uint16_t
367 mbus_r2(void *v, bus_space_handle_t h, bus_size_t o)
368 {
369 	return *((volatile uint16_t *)(h + o));
370 }
371 
372 uint32_t
373 mbus_r4(void *v, bus_space_handle_t h, bus_size_t o)
374 {
375 	return *((volatile uint32_t *)(h + o));
376 }
377 
378 uint64_t
379 mbus_r8(void *v, bus_space_handle_t h, bus_size_t o)
380 {
381 	return *((volatile uint64_t *)(h + o));
382 }
383 
384 void
385 mbus_w1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv)
386 {
387 	*((volatile uint8_t *)(h + o)) = vv;
388 }
389 
390 void
391 mbus_w2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv)
392 {
393 	*((volatile uint16_t *)(h + o)) = vv;
394 }
395 
396 void
397 mbus_w4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv)
398 {
399 	*((volatile uint32_t *)(h + o)) = vv;
400 }
401 
402 void
403 mbus_w8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv)
404 {
405 	*((volatile uint64_t *)(h + o)) = vv;
406 }
407 
408 
409 void
410 mbus_rm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
411 {
412 	h += o;
413 	while (c--)
414 		*(a++) = *(volatile uint8_t *)h;
415 }
416 
417 void
418 mbus_rm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
419 {
420 	h += o;
421 	while (c--)
422 		*(a++) = *(volatile uint16_t *)h;
423 }
424 
425 void
426 mbus_rm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
427 {
428 	h += o;
429 	while (c--)
430 		*(a++) = *(volatile uint32_t *)h;
431 }
432 
433 void
434 mbus_rm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
435 {
436 	h += o;
437 	while (c--)
438 		*(a++) = *(volatile uint64_t *)h;
439 }
440 
441 void
442 mbus_wm_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
443 {
444 	h += o;
445 	while (c--)
446 		*(volatile uint8_t *)h = *(a++);
447 }
448 
449 void
450 mbus_wm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
451 {
452 	h += o;
453 	while (c--)
454 		*(volatile uint16_t *)h = *(a++);
455 }
456 
457 void
458 mbus_wm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
459 {
460 	h += o;
461 	while (c--)
462 		*(volatile uint32_t *)h = *(a++);
463 }
464 
465 void
466 mbus_wm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
467 {
468 	h += o;
469 	while (c--)
470 		*(volatile uint64_t *)h = *(a++);
471 }
472 
473 void
474 mbus_sm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
475 {
476 	h += o;
477 	while (c--)
478 		*(volatile uint8_t *)h = vv;
479 }
480 
481 void
482 mbus_sm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
483 {
484 	h += o;
485 	while (c--)
486 		*(volatile uint16_t *)h = vv;
487 }
488 
489 void
490 mbus_sm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
491 {
492 	h += o;
493 	while (c--)
494 		*(volatile uint32_t *)h = vv;
495 }
496 
497 void
498 mbus_sm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
499 {
500 	h += o;
501 	while (c--)
502 		*(volatile uint64_t *)h = vv;
503 }
504 
505 void mbus_rrm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t*a, bus_size_t c);
506 void mbus_rrm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t*a, bus_size_t c);
507 void mbus_rrm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t*a, bus_size_t c);
508 
509 void mbus_wrm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c);
510 void mbus_wrm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c);
511 void mbus_wrm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c);
512 
513 void
514 mbus_rr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
515 {
516 	volatile uint8_t *p;
517 
518 	h += o;
519 	p = (void *)h;
520 	while (c--)
521 		*a++ = *p++;
522 }
523 
524 void
525 mbus_rr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
526 {
527 	volatile uint16_t *p;
528 
529 	h += o;
530 	p = (void *)h;
531 	while (c--)
532 		*a++ = *p++;
533 }
534 
535 void
536 mbus_rr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
537 {
538 	volatile uint32_t *p;
539 
540 	h += o;
541 	p = (void *)h;
542 	while (c--)
543 		*a++ = *p++;
544 }
545 
546 void
547 mbus_rr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
548 {
549 	volatile uint64_t *p;
550 
551 	h += o;
552 	p = (void *)h;
553 	while (c--)
554 		*a++ = *p++;
555 }
556 
557 void
558 mbus_wr_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
559 {
560 	volatile uint8_t *p;
561 
562 	h += o;
563 	p = (void *)h;
564 	while (c--)
565 		*p++ = *a++;
566 }
567 
568 void
569 mbus_wr_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
570 {
571 	volatile uint16_t *p;
572 
573 	h += o;
574 	p = (void *)h;
575 	while (c--)
576 		*p++ = *a++;
577 }
578 
579 void
580 mbus_wr_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
581 {
582 	volatile uint32_t *p;
583 
584 	h += o;
585 	p = (void *)h;
586 	while (c--)
587 		*p++ = *a++;
588 }
589 
590 void
591 mbus_wr_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
592 {
593 	volatile uint64_t *p;
594 
595 	h += o;
596 	p = (void *)h;
597 	while (c--)
598 		*p++ = *a++;
599 }
600 
601 void mbus_rrr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
602 void mbus_rrr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
603 void mbus_rrr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
604 
605 void mbus_wrr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
606 void mbus_wrr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
607 void mbus_wrr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
608 
609 void
610 mbus_sr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
611 {
612 	volatile uint8_t *p;
613 
614 	h += o;
615 	p = (void *)h;
616 	while (c--)
617 		*p++ = vv;
618 }
619 
620 void
621 mbus_sr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
622 {
623 	volatile uint16_t *p;
624 
625 	h += o;
626 	p = (void *)h;
627 	while (c--)
628 		*p++ = vv;
629 }
630 
631 void
632 mbus_sr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
633 {
634 	volatile uint32_t *p;
635 
636 	h += o;
637 	p = (void *)h;
638 	while (c--)
639 		*p++ = vv;
640 }
641 
642 void
643 mbus_sr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
644 {
645 	volatile uint64_t *p;
646 
647 	h += o;
648 	p = (void *)h;
649 	while (c--)
650 		*p++ = vv;
651 }
652 
653 void
654 mbus_cp_1(void *v, bus_space_handle_t h1, bus_size_t o1,
655 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
656 {
657 	volatile uint8_t *p1, *p2;
658 
659 	h1 += o1;
660 	h2 += o2;
661 	p1 = (void *)h1;
662 	p2 = (void *)h2;
663 	while (c--)
664 		*p1++ = *p2++;
665 }
666 
667 void
668 mbus_cp_2(void *v, bus_space_handle_t h1, bus_size_t o1,
669 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
670 {
671 	volatile uint16_t *p1, *p2;
672 
673 	h1 += o1;
674 	h2 += o2;
675 	p1 = (void *)h1;
676 	p2 = (void *)h2;
677 	while (c--)
678 		*p1++ = *p2++;
679 }
680 
681 void
682 mbus_cp_4(void *v, bus_space_handle_t h1, bus_size_t o1,
683 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
684 {
685 	volatile uint32_t *p1, *p2;
686 
687 	h1 += o1;
688 	h2 += o2;
689 	p1 = (void *)h1;
690 	p2 = (void *)h2;
691 	while (c--)
692 		*p1++ = *p2++;
693 }
694 
695 void
696 mbus_cp_8(void *v, bus_space_handle_t h1, bus_size_t o1,
697 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
698 {
699 	volatile uint64_t *p1, *p2;
700 
701 	h1 += o1;
702 	h2 += o2;
703 	p1 = (void *)h1;
704 	p2 = (void *)h2;
705 	while (c--)
706 		*p1++ = *p2++;
707 }
708 
709 
710 const struct hppa_bus_space_tag hppa_bustag = {
711 	NULL,
712 
713 	mbus_map, mbus_unmap, mbus_subregion, mbus_alloc, mbus_free,
714 	mbus_barrier, mbus_vaddr, mbus_mmap,
715 	mbus_r1,    mbus_r2,   mbus_r4,   mbus_r8,
716 	mbus_w1,    mbus_w2,   mbus_w4,   mbus_w8,
717 	mbus_rm_1,  mbus_rm_2, mbus_rm_4, mbus_rm_8,
718 	mbus_wm_1,  mbus_wm_2, mbus_wm_4, mbus_wm_8,
719 	mbus_sm_1,  mbus_sm_2, mbus_sm_4, mbus_sm_8,
720 	/* *_stream_* are the same as non-stream for native busses */
721 		    mbus_rm_2, mbus_rm_4, mbus_rm_8,
722 		    mbus_wm_2, mbus_wm_4, mbus_wm_8,
723 	mbus_rr_1,  mbus_rr_2, mbus_rr_4, mbus_rr_8,
724 	mbus_wr_1,  mbus_wr_2, mbus_wr_4, mbus_wr_8,
725 	/* *_stream_* are the same as non-stream for native busses */
726 		    mbus_rr_2, mbus_rr_4, mbus_rr_8,
727 		    mbus_wr_2, mbus_wr_4, mbus_wr_8,
728 	mbus_sr_1,  mbus_sr_2, mbus_sr_4, mbus_sr_8,
729 	mbus_cp_1,  mbus_cp_2, mbus_cp_4, mbus_cp_8
730 };
731 
732 /*
733  * Common function for DMA map creation.  May be called by bus-specific DMA map
734  * creation functions.
735  */
736 int
737 mbus_dmamap_create(void *v, bus_size_t size, int nsegments, bus_size_t maxsegsz,
738     bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
739 {
740 	struct hppa_bus_dmamap *map;
741 	size_t mapsize;
742 
743 	/*
744 	 * Allocate and initialize the DMA map.  The end of the map is a
745 	 * variable-sized array of segments, so we allocate enough room for
746 	 * them in one shot.
747 	 *
748 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation of
749 	 * ALLOCNOW notifies others that we've reserved these resources, and
750 	 * they are not to be freed.
751 	 *
752 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence the
753 	 * (nsegments - 1).
754 	 */
755 	mapsize = sizeof(struct hppa_bus_dmamap) +
756 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
757 	map = malloc(mapsize, M_DMAMAP,
758 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
759 	if (!map)
760 		return ENOMEM;
761 
762 	memset(map, 0, mapsize);
763 	map->_dm_size = size;
764 	map->_dm_segcnt = nsegments;
765 	map->_dm_maxsegsz = maxsegsz;
766 	map->_dm_boundary = boundary;
767 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
768 	map->dm_mapsize = 0;		/* no valid mappings */
769 	map->dm_nsegs = 0;
770 
771 	*dmamp = map;
772 	return 0;
773 }
774 
775 /*
776  * Common function for DMA map destruction.  May be called by bus-specific DMA
777  * map destruction functions.
778  */
779 void
780 mbus_dmamap_destroy(void *v, bus_dmamap_t map)
781 {
782 
783 	/*
784 	 * If the handle contains a valid mapping, unload it.
785 	 */
786 	if (map->dm_mapsize != 0)
787 		mbus_dmamap_unload(v, map);
788 
789 	free(map, M_DMAMAP);
790 }
791 
792 /*
793  * load DMA map with a linear buffer.
794  */
795 int
796 mbus_dmamap_load(void *v, bus_dmamap_t map, void *buf, bus_size_t buflen,
797     struct proc *p, int flags)
798 {
799 	vaddr_t lastaddr;
800 	int seg, error;
801 	struct vmspace *vm;
802 
803 	/*
804 	 * Make sure that on error condition we return "no valid mappings".
805 	 */
806 	map->dm_mapsize = 0;
807 	map->dm_nsegs = 0;
808 
809 	if (buflen > map->_dm_size)
810 		return EINVAL;
811 
812 	if (p != NULL) {
813 		vm = p->p_vmspace;
814 	} else {
815 		vm = vmspace_kernel();
816 	}
817 
818 	seg = 0;
819 	error = _bus_dmamap_load_buffer(NULL, map, buf, buflen, vm, flags,
820 	    &lastaddr, &seg, 1);
821 	if (error == 0) {
822 		map->dm_mapsize = buflen;
823 		map->dm_nsegs = seg + 1;
824 	}
825 	return error;
826 }
827 
828 /*
829  * Like bus_dmamap_load(), but for mbufs.
830  */
831 int
832 mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0,
833     int flags)
834 {
835 	vaddr_t lastaddr;
836 	int seg, error, first;
837 	struct mbuf *m;
838 
839 	/*
840 	 * Make sure that on error condition we return "no valid mappings."
841 	 */
842 	map->dm_mapsize = 0;
843 	map->dm_nsegs = 0;
844 
845 	KASSERT(m0->m_flags & M_PKTHDR);
846 
847 	if (m0->m_pkthdr.len > map->_dm_size)
848 		return EINVAL;
849 
850 	first = 1;
851 	seg = 0;
852 	error = 0;
853 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
854 		if (m->m_len == 0)
855 			continue;
856 		error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len,
857 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
858 		first = 0;
859 	}
860 	if (error == 0) {
861 		map->dm_mapsize = m0->m_pkthdr.len;
862 		map->dm_nsegs = seg + 1;
863 	}
864 	return error;
865 }
866 
867 /*
868  * Like bus_dmamap_load(), but for uios.
869  */
870 int
871 mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio,
872     int flags)
873 {
874 	vaddr_t lastaddr;
875 	int seg, i, error, first;
876 	bus_size_t minlen, resid;
877 	struct iovec *iov;
878 	void *addr;
879 
880 	/*
881 	 * Make sure that on error condition we return "no valid mappings."
882 	 */
883 	map->dm_mapsize = 0;
884 	map->dm_nsegs = 0;
885 
886 	resid = uio->uio_resid;
887 	iov = uio->uio_iov;
888 
889 	first = 1;
890 	seg = 0;
891 	error = 0;
892 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
893 		/*
894 		 * Now at the first iovec to load.  Load each iovec
895 		 * until we have exhausted the residual count.
896 		 */
897 		minlen = MIN(resid, iov[i].iov_len);
898 		addr = (void *)iov[i].iov_base;
899 
900 		error = _bus_dmamap_load_buffer(NULL, map, addr, minlen,
901 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
902 		first = 0;
903 
904 		resid -= minlen;
905 	}
906 	if (error == 0) {
907 		map->dm_mapsize = uio->uio_resid;
908 		map->dm_nsegs = seg + 1;
909 	}
910 	return error;
911 }
912 
913 /*
914  * Like bus_dmamap_load(), but for raw memory allocated with
915  * bus_dmamem_alloc().
916  */
917 int
918 mbus_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
919     int nsegs, bus_size_t size, int flags)
920 {
921 	struct pglist *mlist;
922 	struct vm_page *m;
923 	paddr_t pa, pa_next;
924 	bus_size_t mapsize;
925 	bus_size_t pagesz = PAGE_SIZE;
926 	int seg;
927 
928 	/*
929 	 * Make sure that on error condition we return "no valid mappings".
930 	 */
931 	map->dm_nsegs = 0;
932 	map->dm_mapsize = 0;
933 
934 	/* Load the allocated pages. */
935 	mlist = segs[0]._ds_mlist;
936 	pa_next = 0;
937 	seg = -1;
938 	mapsize = size;
939 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
940 
941 		if (size == 0)
942 			panic("mbus_dmamem_load_raw: size botch");
943 
944 		pa = VM_PAGE_TO_PHYS(m);
945 		if (pa != pa_next) {
946 			if (++seg >= map->_dm_segcnt)
947 				panic("mbus_dmamem_load_raw: nsegs botch");
948 			map->dm_segs[seg].ds_addr = pa;
949 			map->dm_segs[seg].ds_len = 0;
950 		}
951 		pa_next = pa + PAGE_SIZE;
952 		if (size < pagesz)
953 			pagesz = size;
954 		map->dm_segs[seg].ds_len += pagesz;
955 		size -= pagesz;
956 	}
957 
958 	/* Make the map truly valid. */
959 	map->dm_nsegs = seg + 1;
960 	map->dm_mapsize = mapsize;
961 
962 	return 0;
963 }
964 
965 /*
966  * unload a DMA map.
967  */
968 void
969 mbus_dmamap_unload(void *v, bus_dmamap_t map)
970 {
971 	/*
972 	 * If this map was loaded with mbus_dmamap_load, we don't need to do
973 	 * anything.  If this map was loaded with mbus_dmamap_load_raw, we also
974 	 * don't need to do anything.
975 	 */
976 
977 	/* Mark the mappings as invalid. */
978 	map->dm_mapsize = 0;
979 	map->dm_nsegs = 0;
980 }
981 
982 void
983 mbus_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t offset, bus_size_t len,
984     int ops)
985 {
986 	int i;
987 
988 	/*
989 	 * Mixing of PRE and POST operations is not allowed.
990 	 */
991 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
992 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
993 		panic("mbus_dmamap_sync: mix PRE and POST");
994 
995 #ifdef DIAGNOSTIC
996 	if (offset >= map->dm_mapsize)
997 		panic("mbus_dmamap_sync: bad offset %lu (map size is %lu)",
998 		    offset, map->dm_mapsize);
999 	if ((offset + len) > map->dm_mapsize)
1000 		panic("mbus_dmamap_sync: bad length");
1001 #endif
1002 
1003 	/*
1004 	 * For a virtually-indexed write-back cache, we need to do the
1005 	 * following things:
1006 	 *
1007 	 *	PREREAD -- Invalidate the D-cache.  We do this here in case a
1008 	 *	write-back is required by the back-end.
1009 	 *
1010 	 *	PREWRITE -- Write-back the D-cache.  Note that if we are doing
1011 	 *	a PREREAD|PREWRITE, we can collapse the whole thing into a
1012 	 *	single Wb-Inv.
1013 	 *
1014 	 *	POSTREAD -- Nothing.
1015 	 *
1016 	 *	POSTWRITE -- Nothing.
1017 	 */
1018 
1019 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1020 	if (len == 0 || ops == 0)
1021 		return;
1022 
1023 	for (i = 0; len != 0 && i < map->dm_nsegs; i++) {
1024 		if (offset >= map->dm_segs[i].ds_len)
1025 			offset -= map->dm_segs[i].ds_len;
1026 		else {
1027 			bus_size_t l = map->dm_segs[i].ds_len - offset;
1028 
1029 			if (l > len)
1030 				l = len;
1031 
1032 			fdcache(HPPA_SID_KERNEL, map->dm_segs[i]._ds_va +
1033 			    offset, l);
1034 			len -= l;
1035 			offset = 0;
1036 		}
1037 	}
1038 
1039  	/* for either operation sync the shit away */
1040 	__asm __volatile ("sync\n\tsyncdma\n\tsync\n\t"
1041 	    "nop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop" ::: "memory");
1042 }
1043 
1044 /*
1045  * Common function for DMA-safe memory allocation.  May be called by bus-
1046  * specific DMA memory allocation functions.
1047  */
1048 int
1049 mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
1050     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1051     int flags)
1052 {
1053 	paddr_t low, high;
1054 	struct pglist *mlist;
1055 	struct vm_page *m;
1056 	paddr_t pa, pa_next;
1057 	int seg;
1058 	int error;
1059 
1060 	DPRINTF(("%s: size 0x%lx align 0x%lx bdry %0lx segs %p nsegs %d\n",
1061 	    __func__, size, alignment, boundary, segs, nsegs));
1062 
1063 	/* Always round the size. */
1064 	size = round_page(size);
1065 
1066 	/* Decide where we can allocate pages. */
1067 	low = 0;
1068 	high = ((flags & BUS_DMA_24BIT) ? (1 << 24) : 0) - 1;
1069 
1070 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1071 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1072 		return ENOMEM;
1073 
1074 	/*
1075 	 * Allocate physical pages from the VM system.
1076 	 */
1077 	TAILQ_INIT(mlist);
1078 	error = uvm_pglistalloc(size, low, high, 0, 0, mlist, nsegs,
1079 	    (flags & BUS_DMA_NOWAIT) == 0);
1080 
1081 	/* If we don't have the pages. */
1082 	if (error) {
1083 		DPRINTF(("%s: uvm_pglistalloc(%lx, %lx, %lx, 0, 0, %p, %d, %0x)"
1084 		    " failed", __func__, size, low, high, mlist, nsegs,
1085 		    (flags & BUS_DMA_NOWAIT) == 0));
1086 		free(mlist, M_DEVBUF);
1087 		return error;
1088 	}
1089 
1090 	pa_next = 0;
1091 	seg = -1;
1092 
1093 	TAILQ_FOREACH(m, mlist, pageq.queue) {
1094 		pa = VM_PAGE_TO_PHYS(m);
1095 		if (pa != pa_next) {
1096 			if (++seg >= nsegs) {
1097 				uvm_pglistfree(mlist);
1098 				free(mlist, M_DEVBUF);
1099 				return ENOMEM;
1100 			}
1101 			segs[seg].ds_addr = pa;
1102 			segs[seg].ds_len = PAGE_SIZE;
1103 			segs[seg]._ds_mlist = NULL;
1104 			segs[seg]._ds_va = 0;
1105 		} else
1106 			segs[seg].ds_len += PAGE_SIZE;
1107 		pa_next = pa + PAGE_SIZE;
1108 	}
1109 	*rsegs = seg + 1;
1110 
1111 	/*
1112 	 * Simply keep a pointer around to the linked list, so
1113 	 * bus_dmamap_free() can return it.
1114 	 *
1115 	 * Nobody should touch the pageq.queue fields while these pages are in
1116 	 * our custody.
1117 	 */
1118 	segs[0]._ds_mlist = mlist;
1119 
1120 	/*
1121 	 * We now have physical pages, but no kernel virtual addresses yet.
1122 	 * These may be allocated in bus_dmamap_map.
1123 	 */
1124 	return 0;
1125 }
1126 
1127 void
1128 mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
1129 {
1130 	struct pglist *mlist;
1131 	/*
1132 	 * Return the list of physical pages back to the VM system.
1133 	 */
1134 	mlist = segs[0]._ds_mlist;
1135 	if (mlist == NULL)
1136 		return;
1137 
1138 	uvm_pglistfree(mlist);
1139 	free(mlist, M_DEVBUF);
1140 }
1141 
1142 /*
1143  * Common function for mapping DMA-safe memory.  May be called by bus-specific
1144  * DMA memory map functions.
1145  */
1146 int
1147 mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
1148     void **kvap, int flags)
1149 {
1150 	bus_addr_t addr;
1151 	vaddr_t va;
1152 	int curseg;
1153 	u_int pmflags =
1154 	    hppa_cpu_hastlbu_p() ? PMAP_NOCACHE : 0;
1155 	const uvm_flag_t kmflags =
1156 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1157 
1158 	size = round_page(size);
1159 
1160 	/* Get a chunk of kernel virtual space. */
1161 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1162 	if (__predict_false(va == 0))
1163 		return ENOMEM;
1164 
1165 	*kvap = (void *)va;
1166 
1167 	for (curseg = 0; curseg < nsegs; curseg++) {
1168 		segs[curseg]._ds_va = va;
1169 		for (addr = segs[curseg].ds_addr;
1170 		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len); ) {
1171 			KASSERT(size != 0);
1172 
1173 			pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE,
1174 			   pmflags);
1175 
1176 			addr += PAGE_SIZE;
1177 			va += PAGE_SIZE;
1178 			size -= PAGE_SIZE;
1179 		}
1180 	}
1181 	pmap_update(pmap_kernel());
1182 	return 0;
1183 }
1184 
1185 /*
1186  * Common function for unmapping DMA-safe memory.  May be called by bus-
1187  * specific DMA memory unmapping functions.
1188  */
1189 void
1190 mbus_dmamem_unmap(void *v, void *kva, size_t size)
1191 {
1192 
1193 	KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);
1194 
1195 	size = round_page(size);
1196 	pmap_kremove((vaddr_t)kva, size);
1197 	pmap_update(pmap_kernel());
1198 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1199 }
1200 
1201 /*
1202  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by bus-
1203  * specific DMA mmap(2)'ing functions.
1204  */
1205 paddr_t
1206 mbus_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs,
1207 	off_t off, int prot, int flags)
1208 {
1209 	int i;
1210 
1211 	for (i = 0; i < nsegs; i++) {
1212 		KASSERT((off & PGOFSET) == 0);
1213 		KASSERT((segs[i].ds_addr & PGOFSET) == 0);
1214 		KASSERT((segs[i].ds_len & PGOFSET) == 0);
1215 
1216 		if (off >= segs[i].ds_len) {
1217 			off -= segs[i].ds_len;
1218 			continue;
1219 		}
1220 
1221 		return btop((u_long)segs[i].ds_addr + off);
1222 	}
1223 
1224 	/* Page not found. */
1225 	return -1;
1226 }
1227 
1228 int
1229 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1230     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
1231     int *segp, int first)
1232 {
1233 	bus_size_t sgsize;
1234 	bus_addr_t curaddr, lastaddr, baddr, bmask;
1235 	vaddr_t vaddr = (vaddr_t)buf;
1236 	int seg;
1237 	pmap_t pmap;
1238 
1239 	pmap = vm_map_pmap(&vm->vm_map);
1240 
1241 	lastaddr = *lastaddrp;
1242 	bmask = ~(map->_dm_boundary - 1);
1243 
1244 	for (seg = *segp; buflen > 0; ) {
1245 		bool ok __diagused;
1246 		/*
1247 		 * Get the physical address for this segment.
1248 		 */
1249 		ok = pmap_extract(pmap, vaddr, &curaddr);
1250 		KASSERT(ok == true);
1251 
1252 		/*
1253 		 * Compute the segment size, and adjust counts.
1254 		 */
1255 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1256 		if (buflen < sgsize)
1257 			sgsize = buflen;
1258 
1259 		/*
1260 		 * Make sure we don't cross any boundaries.
1261 		 */
1262 		if (map->_dm_boundary > 0) {
1263 			baddr = (curaddr + map->_dm_boundary) & bmask;
1264 			if (sgsize > (baddr - curaddr))
1265 				sgsize = (baddr - curaddr);
1266 		}
1267 
1268 		/*
1269 		 * Insert chunk into a segment, coalescing with previous
1270 		 * segment if possible.
1271 		 */
1272 		if (first) {
1273 			map->dm_segs[seg].ds_addr = curaddr;
1274 			map->dm_segs[seg].ds_len = sgsize;
1275 			map->dm_segs[seg]._ds_va = vaddr;
1276 			first = 0;
1277 		} else {
1278 			if (curaddr == lastaddr &&
1279 			    (map->dm_segs[seg].ds_len + sgsize) <=
1280 			     map->_dm_maxsegsz &&
1281 			    (map->_dm_boundary == 0 ||
1282 			     (map->dm_segs[seg].ds_addr & bmask) ==
1283 			     (curaddr & bmask)))
1284 				map->dm_segs[seg].ds_len += sgsize;
1285 			else {
1286 				if (++seg >= map->_dm_segcnt)
1287 					break;
1288 				map->dm_segs[seg].ds_addr = curaddr;
1289 				map->dm_segs[seg].ds_len = sgsize;
1290 				map->dm_segs[seg]._ds_va = vaddr;
1291 			}
1292 		}
1293 
1294 		lastaddr = curaddr + sgsize;
1295 		vaddr += sgsize;
1296 		buflen -= sgsize;
1297 	}
1298 
1299 	*segp = seg;
1300 	*lastaddrp = lastaddr;
1301 
1302 	/*
1303 	 * Did we fit?
1304 	 */
1305 	if (buflen != 0)
1306 		return EFBIG;		/* XXX better return value here? */
1307 	return 0;
1308 }
1309 
1310 const struct hppa_bus_dma_tag hppa_dmatag = {
1311 	NULL,
1312 	mbus_dmamap_create, mbus_dmamap_destroy,
1313 	mbus_dmamap_load, mbus_dmamap_load_mbuf,
1314 	mbus_dmamap_load_uio, mbus_dmamap_load_raw,
1315 	mbus_dmamap_unload, mbus_dmamap_sync,
1316 
1317 	mbus_dmamem_alloc, mbus_dmamem_free, mbus_dmamem_map,
1318 	mbus_dmamem_unmap, mbus_dmamem_mmap
1319 };
1320 
1321 int
1322 mbmatch(device_t parent, cfdata_t cf, void *aux)
1323 {
1324 
1325 	/* there will be only one */
1326 	if (mb_attached)
1327 		return 0;
1328 
1329 	return 1;
1330 }
1331 
1332 static device_t
1333 mb_module_callback(device_t self, struct confargs *ca)
1334 {
1335 	if (ca->ca_type.iodc_type == HPPA_TYPE_NPROC ||
1336 	    ca->ca_type.iodc_type == HPPA_TYPE_MEMORY)
1337 		return NULL;
1338 
1339 	return config_found_sm_loc(self, "gedoens", NULL, ca, mbprint, mbsubmatch);
1340 }
1341 
1342 static device_t
1343 mb_cpu_mem_callback(device_t self, struct confargs *ca)
1344 {
1345 	if ((ca->ca_type.iodc_type != HPPA_TYPE_NPROC &&
1346 	     ca->ca_type.iodc_type != HPPA_TYPE_MEMORY))
1347 		return NULL;
1348 
1349 	return config_found_sm_loc(self, "gedoens", NULL, ca, mbprint, mbsubmatch);
1350 }
1351 
1352 void
1353 mbattach(device_t parent, device_t self, void *aux)
1354 {
1355 	struct mainbus_softc *sc = device_private(self);
1356 	struct confargs nca;
1357 	bus_space_handle_t ioh;
1358 #if NLCD > 0
1359 	int err;
1360 #endif
1361 
1362 	sc->sc_dv = self;
1363 	mb_attached = 1;
1364 
1365 	/*
1366 	 * Map all of Fixed Physical, Local Broadcast, and Global Broadcast
1367 	 * space.  These spaces are adjacent and in that order and run to the
1368 	 * end of the address space.
1369 	 */
1370 	/*
1371 	 * XXX fredette - this may be a copout, or it may be a great idea.  I'm
1372 	 * not sure which yet.
1373 	 */
1374 
1375 	/* map all the way till the end of the memory */
1376 	if (bus_space_map(&hppa_bustag, hppa_mcpuhpa, (~0LU - hppa_mcpuhpa + 1),
1377 	    0, &ioh))
1378 		panic("%s: cannot map mainbus IO space", __func__);
1379 
1380 	/*
1381 	 * Local-Broadcast the HPA to all modules on the bus
1382 	 */
1383 	((struct iomod *)(hppa_mcpuhpa & HPPA_FLEX_MASK))[FPA_IOMOD].io_flex =
1384 		(void *)((hppa_mcpuhpa & HPPA_FLEX_MASK) | DMA_ENABLE);
1385 
1386 	aprint_normal(" [flex %lx]\n", hppa_mcpuhpa & HPPA_FLEX_MASK);
1387 
1388 	/* PDC first */
1389 	memset(&nca, 0, sizeof(nca));
1390 	nca.ca_name = "pdc";
1391 	nca.ca_hpa = 0;
1392 	nca.ca_iot = &hppa_bustag;
1393 	nca.ca_dmatag = &hppa_dmatag;
1394 	config_found(self, &nca, mbprint);
1395 
1396 #if NPOWER > 0
1397 	/* get some power */
1398 	memset(&nca, 0, sizeof(nca));
1399 	nca.ca_name = "power";
1400 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1401 	nca.ca_iot = &hppa_bustag;
1402 	config_found(self, &nca, mbprint);
1403 #endif
1404 
1405 #if NLCD > 0
1406 	memset(&nca, 0, sizeof(nca));
1407 	err = pdcproc_chassis_info(&pdc_chassis_info, &nca.ca_pcl);
1408 	if (!err && nca.ca_pcl.enabled) {
1409 		nca.ca_name = "lcd";
1410 		nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1411 		nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1412 		nca.ca_dp.dp_mod = -1;
1413 		nca.ca_irq = HPPACF_IRQ_UNDEF;
1414 		nca.ca_iot = &hppa_bustag;
1415 		nca.ca_hpa = nca.ca_pcl.cmd_addr;
1416 
1417 		config_found(self, &nca, mbprint);
1418 	}
1419 #endif
1420 
1421 	hppa_modules_scan();
1422 
1423 	/* Search and attach all CPUs and memory controllers. */
1424 	memset(&nca, 0, sizeof(nca));
1425 	nca.ca_name = "mainbus";
1426 	nca.ca_hpa = 0;
1427 	nca.ca_hpabase = HPPA_FPA;	/* Central bus */
1428 	nca.ca_nmodules = MAXMODBUS;
1429 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1430 	nca.ca_iot = &hppa_bustag;
1431 	nca.ca_dmatag = &hppa_dmatag;
1432 	nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1433 	nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1434 	nca.ca_dp.dp_mod = -1;
1435 	pdc_scanbus(self, &nca, mb_cpu_mem_callback);
1436 
1437 	/* Search for IO hardware. */
1438 	memset(&nca, 0, sizeof(nca));
1439 	nca.ca_name = "mainbus";
1440 	nca.ca_hpa = 0;
1441 	nca.ca_hpabase = 0;		/* Central bus already walked above */
1442 	nca.ca_nmodules = MAXMODBUS;
1443 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1444 	nca.ca_iot = &hppa_bustag;
1445 	nca.ca_dmatag = &hppa_dmatag;
1446 	nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1447 	nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1448 	nca.ca_dp.dp_mod = -1;
1449 	pdc_scanbus(self, &nca, mb_module_callback);
1450 
1451 	hppa_modules_done();
1452 }
1453 
1454 int
1455 mbprint(void *aux, const char *pnp)
1456 {
1457 	int n;
1458 	struct confargs *ca = aux;
1459 
1460 	if (pnp)
1461 		aprint_normal("\"%s\" at %s (type 0x%x, sv 0x%x)", ca->ca_name,
1462 		    pnp, ca->ca_type.iodc_type, ca->ca_type.iodc_sv_model);
1463 	if (ca->ca_hpa) {
1464 		aprint_normal(" hpa 0x%lx", ca->ca_hpa);
1465 		if (ca->ca_dp.dp_mod >=0) {
1466 			aprint_normal(" path ");
1467 			for (n = 0; n < 6; n++) {
1468 				if (ca->ca_dp.dp_bc[n] >= 0)
1469 					aprint_normal("%d/", ca->ca_dp.dp_bc[n]);
1470 			}
1471 			aprint_normal("%d", ca->ca_dp.dp_mod);
1472 		}
1473 		if (!pnp && ca->ca_irq >= 0) {
1474 			aprint_normal(" irq %d", ca->ca_irq);
1475 		}
1476 	}
1477 
1478 	return UNCONF;
1479 }
1480 
1481 int
1482 mbsubmatch(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
1483 {
1484 	struct confargs *ca = aux;
1485 	int ret;
1486 	int saved_irq;
1487 
1488 	saved_irq = ca->ca_irq;
1489 	if (cf->hppacf_irq != HPPACF_IRQ_UNDEF)
1490 		ca->ca_irq = cf->hppacf_irq;
1491 	if (!(ret = config_match(parent, cf, aux)))
1492 		ca->ca_irq = saved_irq;
1493 	return ret;
1494 }
1495