1 /* $NetBSD: mainbus.c,v 1.13 2024/01/28 09:03:22 macallan Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /* $OpenBSD: mainbus.c,v 1.74 2009/04/20 00:42:06 oga Exp $ */
33
34 /*
35 * Copyright (c) 1998-2004 Michael Shalayeff
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57 * THE POSSIBILITY OF SUCH DAMAGE.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.13 2024/01/28 09:03:22 macallan Exp $");
62
63 #include "locators.h"
64 #include "power.h"
65 #include "lcd.h"
66 #include "opt_useleds.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/device.h>
71 #include <sys/reboot.h>
72 #include <sys/extent.h>
73 #include <sys/mbuf.h>
74 #include <sys/proc.h>
75 #include <sys/kmem.h>
76
77 #include <uvm/uvm_page.h>
78 #include <uvm/uvm.h>
79
80 #include <machine/pdc.h>
81 #include <machine/iomod.h>
82 #include <machine/autoconf.h>
83
84 #include <hppa/hppa/machdep.h>
85 #include <hppa/dev/cpudevs.h>
86
87 static struct pdc_chassis_info pdc_chassis_info;
88
89 #ifdef MBUSDEBUG
90
91 #define DPRINTF(s) do { \
92 if (mbusdebug) \
93 printf s; \
94 } while(0)
95
96 int mbusdebug = 1;
97 #else
98 #define DPRINTF(s) /* */
99 #endif
100
101 struct mainbus_softc {
102 device_t sc_dv;
103 };
104
105 int mbmatch(device_t, cfdata_t, void *);
106 void mbattach(device_t, device_t, void *);
107
108 CFATTACH_DECL_NEW(mainbus, sizeof(struct mainbus_softc),
109 mbmatch, mbattach, NULL, NULL);
110
111 extern struct cfdriver mainbus_cd;
112
113 static int mb_attached;
114
115 /* from machdep.c */
116 extern struct extent *hppa_io_extent;
117
118 uint8_t mbus_r1(void *, bus_space_handle_t, bus_size_t);
119 uint16_t mbus_r2(void *, bus_space_handle_t, bus_size_t);
120 uint32_t mbus_r4(void *, bus_space_handle_t, bus_size_t);
121 uint64_t mbus_r8(void *, bus_space_handle_t, bus_size_t);
122 void mbus_w1(void *, bus_space_handle_t, bus_size_t, uint8_t);
123 void mbus_w2(void *, bus_space_handle_t, bus_size_t, uint16_t);
124 void mbus_w4(void *, bus_space_handle_t, bus_size_t, uint32_t);
125 void mbus_w8(void *, bus_space_handle_t, bus_size_t, uint64_t);
126 void mbus_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
127 void mbus_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
128 void mbus_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
129 void mbus_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
130 void mbus_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
131 void mbus_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
132 void mbus_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
133 void mbus_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
134 void mbus_rr_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
135 void mbus_rr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
136 void mbus_rr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
137 void mbus_rr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
138 void mbus_wr_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
139 void mbus_wr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
140 void mbus_wr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
141 void mbus_wr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
142 void mbus_sm_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
143 void mbus_sm_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
144 void mbus_sm_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
145 void mbus_sm_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
146 void mbus_sr_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
147 void mbus_sr_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
148 void mbus_sr_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
149 void mbus_sr_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
150 void mbus_cp_1(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
151 void mbus_cp_2(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
152 void mbus_cp_4(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
153 void mbus_cp_8(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
154
155 int mbus_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *);
156 int mbus_map(void *, bus_addr_t, bus_size_t, int, bus_space_handle_t *);
157 void mbus_unmap(void *, bus_space_handle_t, bus_size_t);
158 int mbus_alloc(void *, bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
159 void mbus_free(void *, bus_space_handle_t, bus_size_t);
160 int mbus_subregion(void *, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *);
161 void mbus_barrier(void *, bus_space_handle_t, bus_size_t, bus_size_t, int);
162 void *mbus_vaddr(void *, bus_space_handle_t);
163 paddr_t mbus_mmap(void *, bus_addr_t, off_t, int, int);
164
165 int mbus_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
166 void mbus_dmamap_destroy(void *, bus_dmamap_t);
167 int mbus_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *, int);
168 int mbus_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
169 int mbus_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
170 int mbus_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int);
171 void mbus_dmamap_unload(void *, bus_dmamap_t);
172 void mbus_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
173 int mbus_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int);
174 void mbus_dmamem_free(void *, bus_dma_segment_t *, int);
175 int mbus_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
176 void mbus_dmamem_unmap(void *, void *, size_t);
177 paddr_t mbus_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
178 int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
179 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
180 int *segp, int first);
181
182 extern struct pdc_btlb pdc_btlb;
183 static uint32_t bmm[HPPA_FLEX_COUNT/32];
184
185 int
mbus_add_mapping(bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * bshp)186 mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
187 bus_space_handle_t *bshp)
188 {
189 vaddr_t pa, spa, epa;
190 int flex;
191
192 DPRINTF(("\n%s(%lx,%lx,%scachable,%p)\n", __func__,
193 bpa, size, flags? "" : "non", bshp));
194
195 KASSERT(bpa >= HPPA_IOSPACE);
196 KASSERT(!(flags & BUS_SPACE_MAP_CACHEABLE));
197
198 /*
199 * Mappings are established in HPPA_FLEX_SIZE units,
200 * either with BTLB, or regular mappings of the whole area.
201 */
202 for (pa = bpa ; size != 0; pa = epa) {
203 flex = HPPA_FLEX(pa);
204 spa = pa & HPPA_FLEX_MASK;
205 epa = spa + HPPA_FLEX_SIZE; /* may wrap to 0... */
206
207 size -= uimin(size, HPPA_FLEX_SIZE - (pa - spa));
208
209 /* do need a new mapping? */
210 if (bmm[flex / 32] & (1 << (flex % 32))) {
211 DPRINTF(("%s: already mapped flex=%x, mask=%x\n",
212 __func__, flex, bmm[flex / 32]));
213 continue;
214 }
215
216 DPRINTF(("%s: adding flex=%x %lx-%lx, ", __func__, flex, spa,
217 epa - 1));
218
219 bmm[flex / 32] |= (1 << (flex % 32));
220
221 while (spa != epa) {
222 DPRINTF(("%s: kenter 0x%lx-0x%lx", __func__, spa,
223 epa));
224 for (; spa != epa; spa += PAGE_SIZE)
225 pmap_kenter_pa(spa, spa,
226 VM_PROT_READ | VM_PROT_WRITE, 0);
227 }
228 }
229
230 *bshp = bpa;
231
232 /* Success. */
233 return 0;
234 }
235
236 int
mbus_map(void * v,bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * bshp)237 mbus_map(void *v, bus_addr_t bpa, bus_size_t size, int flags,
238 bus_space_handle_t *bshp)
239 {
240 int error;
241
242 /*
243 * We must only be called with addresses in I/O space.
244 */
245 KASSERT(bpa >= HPPA_IOSPACE);
246
247 /*
248 * Allocate the region of I/O space.
249 */
250 error = extent_alloc_region(hppa_io_extent, bpa, size, EX_NOWAIT);
251 if (error)
252 return error;
253
254 /*
255 * Map the region of I/O space.
256 */
257 error = mbus_add_mapping(bpa, size, flags, bshp);
258 if (error) {
259 DPRINTF(("bus_space_map: pa 0x%lx, size 0x%lx failed\n",
260 bpa, size));
261 if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
262 printf ("bus_space_map: can't free region\n");
263 }
264 }
265
266 return error;
267 }
268
269 void
mbus_unmap(void * v,bus_space_handle_t bsh,bus_size_t size)270 mbus_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
271 {
272 bus_addr_t bpa = bsh;
273 int error;
274
275 /*
276 * Free the region of I/O space.
277 */
278 error = extent_free(hppa_io_extent, bpa, size, EX_NOWAIT);
279 if (error) {
280 DPRINTF(("bus_space_unmap: ps 0x%lx, size 0x%lx\n",
281 bpa, size));
282 panic("bus_space_unmap: can't free region (%d)", error);
283 }
284 }
285
286 int
mbus_alloc(void * v,bus_addr_t rstart,bus_addr_t rend,bus_size_t size,bus_size_t align,bus_size_t boundary,int flags,bus_addr_t * addrp,bus_space_handle_t * bshp)287 mbus_alloc(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
288 bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp,
289 bus_space_handle_t *bshp)
290 {
291 bus_addr_t bpa;
292 int error;
293
294 if (rstart < hppa_io_extent->ex_start ||
295 rend > hppa_io_extent->ex_end)
296 panic("bus_space_alloc: bad region start/end");
297
298 /*
299 * Allocate the region of I/O space.
300 */
301 error = extent_alloc_subregion1(hppa_io_extent, rstart, rend, size,
302 align, 0, boundary, EX_NOWAIT, &bpa);
303 if (error)
304 return error;
305
306 /*
307 * Map the region of I/O space.
308 */
309 error = mbus_add_mapping(bpa, size, flags, bshp);
310 if (error) {
311 DPRINTF(("bus_space_alloc: pa 0x%lx, size 0x%lx failed\n",
312 bpa, size));
313 if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
314 printf("bus_space_alloc: can't free region\n");
315 }
316 }
317
318 *addrp = bpa;
319
320 return error;
321 }
322
323 void
mbus_free(void * v,bus_space_handle_t h,bus_size_t size)324 mbus_free(void *v, bus_space_handle_t h, bus_size_t size)
325 {
326 /* bus_space_unmap() does all that we need to do. */
327 mbus_unmap(v, h, size);
328 }
329
330 int
mbus_subregion(void * v,bus_space_handle_t bsh,bus_size_t offset,bus_size_t size,bus_space_handle_t * nbshp)331 mbus_subregion(void *v, bus_space_handle_t bsh, bus_size_t offset,
332 bus_size_t size, bus_space_handle_t *nbshp)
333 {
334 *nbshp = bsh + offset;
335 return(0);
336 }
337
338 void
mbus_barrier(void * v,bus_space_handle_t h,bus_size_t o,bus_size_t l,int op)339 mbus_barrier(void *v, bus_space_handle_t h, bus_size_t o, bus_size_t l, int op)
340 {
341 sync_caches();
342 }
343
344 void*
mbus_vaddr(void * v,bus_space_handle_t h)345 mbus_vaddr(void *v, bus_space_handle_t h)
346 {
347 /*
348 * We must only be called with addresses in I/O space.
349 */
350 KASSERT(h >= HPPA_IOSPACE);
351 return (void*)h;
352 }
353
354 paddr_t
mbus_mmap(void * v,bus_addr_t addr,off_t off,int prot,int flags)355 mbus_mmap(void *v, bus_addr_t addr, off_t off, int prot, int flags)
356 {
357 return btop(addr + off);
358 }
359
360 uint8_t
mbus_r1(void * v,bus_space_handle_t h,bus_size_t o)361 mbus_r1(void *v, bus_space_handle_t h, bus_size_t o)
362 {
363 return *((volatile uint8_t *)(h + o));
364 }
365
366 uint16_t
mbus_r2(void * v,bus_space_handle_t h,bus_size_t o)367 mbus_r2(void *v, bus_space_handle_t h, bus_size_t o)
368 {
369 return *((volatile uint16_t *)(h + o));
370 }
371
372 uint32_t
mbus_r4(void * v,bus_space_handle_t h,bus_size_t o)373 mbus_r4(void *v, bus_space_handle_t h, bus_size_t o)
374 {
375 return *((volatile uint32_t *)(h + o));
376 }
377
378 uint64_t
mbus_r8(void * v,bus_space_handle_t h,bus_size_t o)379 mbus_r8(void *v, bus_space_handle_t h, bus_size_t o)
380 {
381 return *((volatile uint64_t *)(h + o));
382 }
383
384 void
mbus_w1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv)385 mbus_w1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv)
386 {
387 *((volatile uint8_t *)(h + o)) = vv;
388 }
389
390 void
mbus_w2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv)391 mbus_w2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv)
392 {
393 *((volatile uint16_t *)(h + o)) = vv;
394 }
395
396 void
mbus_w4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv)397 mbus_w4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv)
398 {
399 *((volatile uint32_t *)(h + o)) = vv;
400 }
401
402 void
mbus_w8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv)403 mbus_w8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv)
404 {
405 *((volatile uint64_t *)(h + o)) = vv;
406 }
407
408
409 void
mbus_rm_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)410 mbus_rm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
411 {
412 h += o;
413 while (c--)
414 *(a++) = *(volatile uint8_t *)h;
415 }
416
417 void
mbus_rm_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)418 mbus_rm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
419 {
420 h += o;
421 while (c--)
422 *(a++) = *(volatile uint16_t *)h;
423 }
424
425 void
mbus_rm_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)426 mbus_rm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
427 {
428 h += o;
429 while (c--)
430 *(a++) = *(volatile uint32_t *)h;
431 }
432
433 void
mbus_rm_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)434 mbus_rm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
435 {
436 h += o;
437 while (c--)
438 *(a++) = *(volatile uint64_t *)h;
439 }
440
441 void
mbus_wm_1(void * v,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)442 mbus_wm_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
443 {
444 h += o;
445 while (c--)
446 *(volatile uint8_t *)h = *(a++);
447 }
448
449 void
mbus_wm_2(void * v,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)450 mbus_wm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
451 {
452 h += o;
453 while (c--)
454 *(volatile uint16_t *)h = *(a++);
455 }
456
457 void
mbus_wm_4(void * v,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)458 mbus_wm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
459 {
460 h += o;
461 while (c--)
462 *(volatile uint32_t *)h = *(a++);
463 }
464
465 void
mbus_wm_8(void * v,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)466 mbus_wm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
467 {
468 h += o;
469 while (c--)
470 *(volatile uint64_t *)h = *(a++);
471 }
472
473 void
mbus_sm_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv,bus_size_t c)474 mbus_sm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
475 {
476 h += o;
477 while (c--)
478 *(volatile uint8_t *)h = vv;
479 }
480
481 void
mbus_sm_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv,bus_size_t c)482 mbus_sm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
483 {
484 h += o;
485 while (c--)
486 *(volatile uint16_t *)h = vv;
487 }
488
489 void
mbus_sm_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv,bus_size_t c)490 mbus_sm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
491 {
492 h += o;
493 while (c--)
494 *(volatile uint32_t *)h = vv;
495 }
496
497 void
mbus_sm_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv,bus_size_t c)498 mbus_sm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
499 {
500 h += o;
501 while (c--)
502 *(volatile uint64_t *)h = vv;
503 }
504
505 void mbus_rrm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t*a, bus_size_t c);
506 void mbus_rrm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t*a, bus_size_t c);
507 void mbus_rrm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t*a, bus_size_t c);
508
509 void mbus_wrm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c);
510 void mbus_wrm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c);
511 void mbus_wrm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c);
512
513 void
mbus_rr_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)514 mbus_rr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
515 {
516 volatile uint8_t *p;
517
518 h += o;
519 p = (void *)h;
520 while (c--)
521 *a++ = *p++;
522 }
523
524 void
mbus_rr_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)525 mbus_rr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
526 {
527 volatile uint16_t *p;
528
529 h += o;
530 p = (void *)h;
531 while (c--)
532 *a++ = *p++;
533 }
534
535 void
mbus_rr_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)536 mbus_rr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
537 {
538 volatile uint32_t *p;
539
540 h += o;
541 p = (void *)h;
542 while (c--)
543 *a++ = *p++;
544 }
545
546 void
mbus_rr_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)547 mbus_rr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
548 {
549 volatile uint64_t *p;
550
551 h += o;
552 p = (void *)h;
553 while (c--)
554 *a++ = *p++;
555 }
556
557 void
mbus_wr_1(void * v,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)558 mbus_wr_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
559 {
560 volatile uint8_t *p;
561
562 h += o;
563 p = (void *)h;
564 while (c--)
565 *p++ = *a++;
566 }
567
568 void
mbus_wr_2(void * v,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)569 mbus_wr_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
570 {
571 volatile uint16_t *p;
572
573 h += o;
574 p = (void *)h;
575 while (c--)
576 *p++ = *a++;
577 }
578
579 void
mbus_wr_4(void * v,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)580 mbus_wr_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
581 {
582 volatile uint32_t *p;
583
584 h += o;
585 p = (void *)h;
586 while (c--)
587 *p++ = *a++;
588 }
589
590 void
mbus_wr_8(void * v,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)591 mbus_wr_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
592 {
593 volatile uint64_t *p;
594
595 h += o;
596 p = (void *)h;
597 while (c--)
598 *p++ = *a++;
599 }
600
601 void mbus_rrr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
602 void mbus_rrr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
603 void mbus_rrr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
604
605 void mbus_wrr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
606 void mbus_wrr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
607 void mbus_wrr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
608
609 void
mbus_sr_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv,bus_size_t c)610 mbus_sr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
611 {
612 volatile uint8_t *p;
613
614 h += o;
615 p = (void *)h;
616 while (c--)
617 *p++ = vv;
618 }
619
620 void
mbus_sr_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv,bus_size_t c)621 mbus_sr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
622 {
623 volatile uint16_t *p;
624
625 h += o;
626 p = (void *)h;
627 while (c--)
628 *p++ = vv;
629 }
630
631 void
mbus_sr_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv,bus_size_t c)632 mbus_sr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
633 {
634 volatile uint32_t *p;
635
636 h += o;
637 p = (void *)h;
638 while (c--)
639 *p++ = vv;
640 }
641
642 void
mbus_sr_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv,bus_size_t c)643 mbus_sr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
644 {
645 volatile uint64_t *p;
646
647 h += o;
648 p = (void *)h;
649 while (c--)
650 *p++ = vv;
651 }
652
653 void
mbus_cp_1(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)654 mbus_cp_1(void *v, bus_space_handle_t h1, bus_size_t o1,
655 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
656 {
657 volatile uint8_t *p1, *p2;
658
659 h1 += o1;
660 h2 += o2;
661 p1 = (void *)h1;
662 p2 = (void *)h2;
663 while (c--)
664 *p1++ = *p2++;
665 }
666
667 void
mbus_cp_2(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)668 mbus_cp_2(void *v, bus_space_handle_t h1, bus_size_t o1,
669 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
670 {
671 volatile uint16_t *p1, *p2;
672
673 h1 += o1;
674 h2 += o2;
675 p1 = (void *)h1;
676 p2 = (void *)h2;
677 while (c--)
678 *p1++ = *p2++;
679 }
680
681 void
mbus_cp_4(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)682 mbus_cp_4(void *v, bus_space_handle_t h1, bus_size_t o1,
683 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
684 {
685 volatile uint32_t *p1, *p2;
686
687 h1 += o1;
688 h2 += o2;
689 p1 = (void *)h1;
690 p2 = (void *)h2;
691 while (c--)
692 *p1++ = *p2++;
693 }
694
695 void
mbus_cp_8(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)696 mbus_cp_8(void *v, bus_space_handle_t h1, bus_size_t o1,
697 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
698 {
699 volatile uint64_t *p1, *p2;
700
701 h1 += o1;
702 h2 += o2;
703 p1 = (void *)h1;
704 p2 = (void *)h2;
705 while (c--)
706 *p1++ = *p2++;
707 }
708
709
710 const struct hppa_bus_space_tag hppa_bustag = {
711 NULL,
712
713 mbus_map, mbus_unmap, mbus_subregion, mbus_alloc, mbus_free,
714 mbus_barrier, mbus_vaddr, mbus_mmap,
715 mbus_r1, mbus_r2, mbus_r4, mbus_r8,
716 mbus_r2, mbus_r4, mbus_r8,
717 mbus_w1, mbus_w2, mbus_w4, mbus_w8,
718 mbus_w2, mbus_w4, mbus_w8,
719 mbus_rm_1, mbus_rm_2, mbus_rm_4, mbus_rm_8,
720 mbus_wm_1, mbus_wm_2, mbus_wm_4, mbus_wm_8,
721 mbus_sm_1, mbus_sm_2, mbus_sm_4, mbus_sm_8,
722 /* *_stream_* are the same as non-stream for native busses */
723 mbus_rm_2, mbus_rm_4, mbus_rm_8,
724 mbus_wm_2, mbus_wm_4, mbus_wm_8,
725 mbus_rr_1, mbus_rr_2, mbus_rr_4, mbus_rr_8,
726 mbus_wr_1, mbus_wr_2, mbus_wr_4, mbus_wr_8,
727 /* *_stream_* are the same as non-stream for native busses */
728 mbus_rr_2, mbus_rr_4, mbus_rr_8,
729 mbus_wr_2, mbus_wr_4, mbus_wr_8,
730 mbus_sr_1, mbus_sr_2, mbus_sr_4, mbus_sr_8,
731 mbus_cp_1, mbus_cp_2, mbus_cp_4, mbus_cp_8
732 };
733
734 static size_t
_bus_dmamap_mapsize(int const nsegments)735 _bus_dmamap_mapsize(int const nsegments)
736 {
737 KASSERT(nsegments > 0);
738 return sizeof(struct hppa_bus_dmamap) +
739 (sizeof(bus_dma_segment_t) * (nsegments - 1));
740 }
741
742 /*
743 * Common function for DMA map creation. May be called by bus-specific DMA map
744 * creation functions.
745 */
746 int
mbus_dmamap_create(void * v,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)747 mbus_dmamap_create(void *v, bus_size_t size, int nsegments, bus_size_t maxsegsz,
748 bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
749 {
750 struct hppa_bus_dmamap *map;
751
752 /*
753 * Allocate and initialize the DMA map. The end of the map is a
754 * variable-sized array of segments, so we allocate enough room for
755 * them in one shot.
756 *
757 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation of
758 * ALLOCNOW notifies others that we've reserved these resources, and
759 * they are not to be freed.
760 *
761 * The bus_dmamap_t includes one bus_dma_segment_t, hence the
762 * (nsegments - 1).
763 */
764 map = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
765 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
766 if (!map)
767 return ENOMEM;
768
769 map->_dm_size = size;
770 map->_dm_segcnt = nsegments;
771 map->_dm_maxsegsz = maxsegsz;
772 map->_dm_boundary = boundary;
773 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
774 map->dm_mapsize = 0; /* no valid mappings */
775 map->dm_nsegs = 0;
776
777 *dmamp = map;
778 return 0;
779 }
780
781 /*
782 * Common function for DMA map destruction. May be called by bus-specific DMA
783 * map destruction functions.
784 */
785 void
mbus_dmamap_destroy(void * v,bus_dmamap_t map)786 mbus_dmamap_destroy(void *v, bus_dmamap_t map)
787 {
788
789 /*
790 * If the handle contains a valid mapping, unload it.
791 */
792 if (map->dm_mapsize != 0)
793 mbus_dmamap_unload(v, map);
794
795 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
796 }
797
798 /*
799 * load DMA map with a linear buffer.
800 */
801 int
mbus_dmamap_load(void * v,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)802 mbus_dmamap_load(void *v, bus_dmamap_t map, void *buf, bus_size_t buflen,
803 struct proc *p, int flags)
804 {
805 vaddr_t lastaddr;
806 int seg, error;
807 struct vmspace *vm;
808
809 /*
810 * Make sure that on error condition we return "no valid mappings".
811 */
812 map->dm_mapsize = 0;
813 map->dm_nsegs = 0;
814
815 if (buflen > map->_dm_size)
816 return EINVAL;
817
818 if (p != NULL) {
819 vm = p->p_vmspace;
820 } else {
821 vm = vmspace_kernel();
822 }
823
824 seg = 0;
825 error = _bus_dmamap_load_buffer(NULL, map, buf, buflen, vm, flags,
826 &lastaddr, &seg, 1);
827 if (error == 0) {
828 map->dm_mapsize = buflen;
829 map->dm_nsegs = seg + 1;
830 }
831 return error;
832 }
833
834 /*
835 * Like bus_dmamap_load(), but for mbufs.
836 */
837 int
mbus_dmamap_load_mbuf(void * v,bus_dmamap_t map,struct mbuf * m0,int flags)838 mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0,
839 int flags)
840 {
841 vaddr_t lastaddr;
842 int seg, error, first;
843 struct mbuf *m;
844
845 /*
846 * Make sure that on error condition we return "no valid mappings."
847 */
848 map->dm_mapsize = 0;
849 map->dm_nsegs = 0;
850
851 KASSERT(m0->m_flags & M_PKTHDR);
852
853 if (m0->m_pkthdr.len > map->_dm_size)
854 return EINVAL;
855
856 first = 1;
857 seg = 0;
858 error = 0;
859 for (m = m0; m != NULL && error == 0; m = m->m_next) {
860 if (m->m_len == 0)
861 continue;
862 error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len,
863 vmspace_kernel(), flags, &lastaddr, &seg, first);
864 first = 0;
865 }
866 if (error == 0) {
867 map->dm_mapsize = m0->m_pkthdr.len;
868 map->dm_nsegs = seg + 1;
869 }
870 return error;
871 }
872
873 /*
874 * Like bus_dmamap_load(), but for uios.
875 */
876 int
mbus_dmamap_load_uio(void * v,bus_dmamap_t map,struct uio * uio,int flags)877 mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio,
878 int flags)
879 {
880 vaddr_t lastaddr;
881 int seg, i, error, first;
882 bus_size_t minlen, resid;
883 struct iovec *iov;
884 void *addr;
885
886 /*
887 * Make sure that on error condition we return "no valid mappings."
888 */
889 map->dm_mapsize = 0;
890 map->dm_nsegs = 0;
891
892 resid = uio->uio_resid;
893 iov = uio->uio_iov;
894
895 first = 1;
896 seg = 0;
897 error = 0;
898 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
899 /*
900 * Now at the first iovec to load. Load each iovec
901 * until we have exhausted the residual count.
902 */
903 minlen = MIN(resid, iov[i].iov_len);
904 addr = (void *)iov[i].iov_base;
905
906 error = _bus_dmamap_load_buffer(NULL, map, addr, minlen,
907 uio->uio_vmspace, flags, &lastaddr, &seg, first);
908 first = 0;
909
910 resid -= minlen;
911 }
912 if (error == 0) {
913 map->dm_mapsize = uio->uio_resid;
914 map->dm_nsegs = seg + 1;
915 }
916 return error;
917 }
918
919 /*
920 * Like bus_dmamap_load(), but for raw memory allocated with
921 * bus_dmamem_alloc().
922 */
923 int
mbus_dmamap_load_raw(void * v,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)924 mbus_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
925 int nsegs, bus_size_t size, int flags)
926 {
927 struct pglist *mlist;
928 struct vm_page *m;
929 paddr_t pa, pa_next;
930 bus_size_t mapsize;
931 bus_size_t pagesz = PAGE_SIZE;
932 int seg;
933
934 /*
935 * Make sure that on error condition we return "no valid mappings".
936 */
937 map->dm_nsegs = 0;
938 map->dm_mapsize = 0;
939
940 /* Load the allocated pages. */
941 mlist = segs[0]._ds_mlist;
942 pa_next = 0;
943 seg = -1;
944 mapsize = size;
945 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
946
947 if (size == 0)
948 panic("mbus_dmamap_load_raw: size botch");
949
950 pa = VM_PAGE_TO_PHYS(m);
951 if (pa != pa_next) {
952 if (++seg >= map->_dm_segcnt)
953 panic("mbus_dmamap_load_raw: nsegs botch");
954 map->dm_segs[seg].ds_addr = pa;
955 map->dm_segs[seg].ds_len = 0;
956 }
957 pa_next = pa + PAGE_SIZE;
958 if (size < pagesz)
959 pagesz = size;
960 map->dm_segs[seg].ds_len += pagesz;
961 size -= pagesz;
962 }
963
964 /* Make the map truly valid. */
965 map->dm_nsegs = seg + 1;
966 map->dm_mapsize = mapsize;
967
968 return 0;
969 }
970
971 /*
972 * unload a DMA map.
973 */
974 void
mbus_dmamap_unload(void * v,bus_dmamap_t map)975 mbus_dmamap_unload(void *v, bus_dmamap_t map)
976 {
977 /*
978 * If this map was loaded with mbus_dmamap_load, we don't need to do
979 * anything. If this map was loaded with mbus_dmamap_load_raw, we also
980 * don't need to do anything.
981 */
982
983 /* Mark the mappings as invalid. */
984 map->dm_mapsize = 0;
985 map->dm_nsegs = 0;
986 }
987
988 void
mbus_dmamap_sync(void * v,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)989 mbus_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t offset, bus_size_t len,
990 int ops)
991 {
992 int i;
993
994 /*
995 * Mixing of PRE and POST operations is not allowed.
996 */
997 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
998 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
999 panic("mbus_dmamap_sync: mix PRE and POST");
1000
1001 #ifdef DIAGNOSTIC
1002 if (offset >= map->dm_mapsize)
1003 panic("mbus_dmamap_sync: bad offset %lu (map size is %lu)",
1004 offset, map->dm_mapsize);
1005 if ((offset + len) > map->dm_mapsize)
1006 panic("mbus_dmamap_sync: bad length");
1007 #endif
1008
1009 /*
1010 * For a virtually-indexed write-back cache, we need to do the
1011 * following things:
1012 *
1013 * PREREAD -- Invalidate the D-cache. We do this here in case a
1014 * write-back is required by the back-end.
1015 *
1016 * PREWRITE -- Write-back the D-cache. Note that if we are doing
1017 * a PREREAD|PREWRITE, we can collapse the whole thing into a
1018 * single Wb-Inv.
1019 *
1020 * POSTREAD -- Nothing.
1021 *
1022 * POSTWRITE -- Nothing.
1023 */
1024
1025 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1026 if (len == 0 || ops == 0)
1027 return;
1028
1029 for (i = 0; len != 0 && i < map->dm_nsegs; i++) {
1030 if (offset >= map->dm_segs[i].ds_len)
1031 offset -= map->dm_segs[i].ds_len;
1032 else {
1033 bus_size_t l = map->dm_segs[i].ds_len - offset;
1034
1035 if (l > len)
1036 l = len;
1037
1038 fdcache(HPPA_SID_KERNEL, map->dm_segs[i]._ds_va +
1039 offset, l);
1040 len -= l;
1041 offset = 0;
1042 }
1043 }
1044
1045 /* for either operation sync the shit away */
1046 __asm __volatile ("sync\n\tsyncdma\n\tsync\n\t"
1047 "nop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop" ::: "memory");
1048 }
1049
1050 /*
1051 * Common function for DMA-safe memory allocation. May be called by bus-
1052 * specific DMA memory allocation functions.
1053 */
1054 int
mbus_dmamem_alloc(void * v,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1055 mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
1056 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1057 int flags)
1058 {
1059 paddr_t low, high;
1060 struct pglist *mlist;
1061 struct vm_page *m;
1062 paddr_t pa, pa_next;
1063 int seg;
1064 int error;
1065
1066 DPRINTF(("%s: size 0x%lx align 0x%lx bdry %0lx segs %p nsegs %d\n",
1067 __func__, size, alignment, boundary, segs, nsegs));
1068
1069 /* Always round the size. */
1070 size = round_page(size);
1071
1072 /* Decide where we can allocate pages. */
1073 low = 0;
1074 high = ((flags & BUS_DMA_24BIT) ? (1 << 24) : 0) - 1;
1075
1076 if ((mlist = kmem_alloc(sizeof(*mlist),
1077 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1078 return ENOMEM;
1079
1080 /*
1081 * Allocate physical pages from the VM system.
1082 */
1083 TAILQ_INIT(mlist);
1084 error = uvm_pglistalloc(size, low, high, 0, 0, mlist, nsegs,
1085 (flags & BUS_DMA_NOWAIT) == 0);
1086
1087 /* If we don't have the pages. */
1088 if (error) {
1089 DPRINTF(("%s: uvm_pglistalloc(%lx, %lx, %lx, 0, 0, %p, %d, %0x)"
1090 " failed", __func__, size, low, high, mlist, nsegs,
1091 (flags & BUS_DMA_NOWAIT) == 0));
1092 kmem_free(mlist, sizeof(*mlist));
1093 return error;
1094 }
1095
1096 pa_next = 0;
1097 seg = -1;
1098
1099 TAILQ_FOREACH(m, mlist, pageq.queue) {
1100 pa = VM_PAGE_TO_PHYS(m);
1101 if (pa != pa_next) {
1102 if (++seg >= nsegs) {
1103 uvm_pglistfree(mlist);
1104 kmem_free(mlist, sizeof(*mlist));
1105 return ENOMEM;
1106 }
1107 segs[seg].ds_addr = pa;
1108 segs[seg].ds_len = PAGE_SIZE;
1109 segs[seg]._ds_mlist = NULL;
1110 segs[seg]._ds_va = 0;
1111 } else
1112 segs[seg].ds_len += PAGE_SIZE;
1113 pa_next = pa + PAGE_SIZE;
1114 }
1115 *rsegs = seg + 1;
1116
1117 /*
1118 * Simply keep a pointer around to the linked list, so
1119 * bus_dmamap_free() can return it.
1120 *
1121 * Nobody should touch the pageq.queue fields while these pages are in
1122 * our custody.
1123 */
1124 segs[0]._ds_mlist = mlist;
1125
1126 /*
1127 * We now have physical pages, but no kernel virtual addresses yet.
1128 * These may be allocated in bus_dmamap_map.
1129 */
1130 return 0;
1131 }
1132
1133 void
mbus_dmamem_free(void * v,bus_dma_segment_t * segs,int nsegs)1134 mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
1135 {
1136 struct pglist *mlist;
1137 /*
1138 * Return the list of physical pages back to the VM system.
1139 */
1140 mlist = segs[0]._ds_mlist;
1141 if (mlist == NULL)
1142 return;
1143
1144 uvm_pglistfree(mlist);
1145 kmem_free(mlist, sizeof(*mlist));
1146 }
1147
1148 /*
1149 * Common function for mapping DMA-safe memory. May be called by bus-specific
1150 * DMA memory map functions.
1151 */
1152 int
mbus_dmamem_map(void * v,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1153 mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
1154 void **kvap, int flags)
1155 {
1156 bus_addr_t addr;
1157 vaddr_t va;
1158 int curseg;
1159 u_int pmflags =
1160 hppa_cpu_hastlbu_p() ? PMAP_NOCACHE : 0;
1161 const uvm_flag_t kmflags =
1162 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1163
1164 size = round_page(size);
1165
1166 /* Get a chunk of kernel virtual space. */
1167 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1168 if (__predict_false(va == 0))
1169 return ENOMEM;
1170
1171 *kvap = (void *)va;
1172
1173 for (curseg = 0; curseg < nsegs; curseg++) {
1174 segs[curseg]._ds_va = va;
1175 for (addr = segs[curseg].ds_addr;
1176 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); ) {
1177 KASSERT(size != 0);
1178
1179 pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE,
1180 pmflags);
1181
1182 addr += PAGE_SIZE;
1183 va += PAGE_SIZE;
1184 size -= PAGE_SIZE;
1185 }
1186 }
1187 pmap_update(pmap_kernel());
1188 return 0;
1189 }
1190
1191 /*
1192 * Common function for unmapping DMA-safe memory. May be called by bus-
1193 * specific DMA memory unmapping functions.
1194 */
1195 void
mbus_dmamem_unmap(void * v,void * kva,size_t size)1196 mbus_dmamem_unmap(void *v, void *kva, size_t size)
1197 {
1198
1199 KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);
1200
1201 size = round_page(size);
1202 pmap_kremove((vaddr_t)kva, size);
1203 pmap_update(pmap_kernel());
1204 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1205 }
1206
1207 /*
1208 * Common function for mmap(2)'ing DMA-safe memory. May be called by bus-
1209 * specific DMA mmap(2)'ing functions.
1210 */
1211 paddr_t
mbus_dmamem_mmap(void * v,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1212 mbus_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs,
1213 off_t off, int prot, int flags)
1214 {
1215 int i;
1216
1217 for (i = 0; i < nsegs; i++) {
1218 KASSERT((off & PGOFSET) == 0);
1219 KASSERT((segs[i].ds_addr & PGOFSET) == 0);
1220 KASSERT((segs[i].ds_len & PGOFSET) == 0);
1221
1222 if (off >= segs[i].ds_len) {
1223 off -= segs[i].ds_len;
1224 continue;
1225 }
1226
1227 return btop((u_long)segs[i].ds_addr + off);
1228 }
1229
1230 /* Page not found. */
1231 return -1;
1232 }
1233
1234 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags,paddr_t * lastaddrp,int * segp,int first)1235 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1236 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
1237 int *segp, int first)
1238 {
1239 bus_size_t sgsize;
1240 bus_addr_t curaddr, lastaddr, baddr, bmask;
1241 vaddr_t vaddr = (vaddr_t)buf;
1242 int seg;
1243 pmap_t pmap;
1244
1245 pmap = vm_map_pmap(&vm->vm_map);
1246
1247 lastaddr = *lastaddrp;
1248 bmask = ~(map->_dm_boundary - 1);
1249
1250 for (seg = *segp; buflen > 0; ) {
1251 bool ok __diagused;
1252 /*
1253 * Get the physical address for this segment.
1254 */
1255 ok = pmap_extract(pmap, vaddr, &curaddr);
1256 KASSERT(ok == true);
1257
1258 /*
1259 * Compute the segment size, and adjust counts.
1260 */
1261 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1262 if (buflen < sgsize)
1263 sgsize = buflen;
1264
1265 /*
1266 * Make sure we don't cross any boundaries.
1267 */
1268 if (map->_dm_boundary > 0) {
1269 baddr = (curaddr + map->_dm_boundary) & bmask;
1270 if (sgsize > (baddr - curaddr))
1271 sgsize = (baddr - curaddr);
1272 }
1273
1274 /*
1275 * Insert chunk into a segment, coalescing with previous
1276 * segment if possible.
1277 */
1278 if (first) {
1279 map->dm_segs[seg].ds_addr = curaddr;
1280 map->dm_segs[seg].ds_len = sgsize;
1281 map->dm_segs[seg]._ds_va = vaddr;
1282 first = 0;
1283 } else {
1284 if (curaddr == lastaddr &&
1285 (map->dm_segs[seg].ds_len + sgsize) <=
1286 map->_dm_maxsegsz &&
1287 (map->_dm_boundary == 0 ||
1288 (map->dm_segs[seg].ds_addr & bmask) ==
1289 (curaddr & bmask)))
1290 map->dm_segs[seg].ds_len += sgsize;
1291 else {
1292 if (++seg >= map->_dm_segcnt)
1293 break;
1294 map->dm_segs[seg].ds_addr = curaddr;
1295 map->dm_segs[seg].ds_len = sgsize;
1296 map->dm_segs[seg]._ds_va = vaddr;
1297 }
1298 }
1299
1300 lastaddr = curaddr + sgsize;
1301 vaddr += sgsize;
1302 buflen -= sgsize;
1303 }
1304
1305 *segp = seg;
1306 *lastaddrp = lastaddr;
1307
1308 /*
1309 * Did we fit?
1310 */
1311 if (buflen != 0)
1312 return EFBIG; /* XXX better return value here? */
1313 return 0;
1314 }
1315
1316 const struct hppa_bus_dma_tag hppa_dmatag = {
1317 NULL,
1318 mbus_dmamap_create, mbus_dmamap_destroy,
1319 mbus_dmamap_load, mbus_dmamap_load_mbuf,
1320 mbus_dmamap_load_uio, mbus_dmamap_load_raw,
1321 mbus_dmamap_unload, mbus_dmamap_sync,
1322
1323 mbus_dmamem_alloc, mbus_dmamem_free, mbus_dmamem_map,
1324 mbus_dmamem_unmap, mbus_dmamem_mmap
1325 };
1326
1327 int
mbmatch(device_t parent,cfdata_t cf,void * aux)1328 mbmatch(device_t parent, cfdata_t cf, void *aux)
1329 {
1330
1331 /* there will be only one */
1332 if (mb_attached)
1333 return 0;
1334
1335 return 1;
1336 }
1337
1338 static device_t
mb_module_callback(device_t self,struct confargs * ca)1339 mb_module_callback(device_t self, struct confargs *ca)
1340 {
1341 if (ca->ca_type.iodc_type == HPPA_TYPE_NPROC ||
1342 ca->ca_type.iodc_type == HPPA_TYPE_MEMORY)
1343 return NULL;
1344
1345 return config_found(self, ca, mbprint,
1346 CFARGS(.submatch = mbsubmatch));
1347 }
1348
1349 static device_t
mb_cpu_mem_callback(device_t self,struct confargs * ca)1350 mb_cpu_mem_callback(device_t self, struct confargs *ca)
1351 {
1352 if ((ca->ca_type.iodc_type != HPPA_TYPE_NPROC &&
1353 ca->ca_type.iodc_type != HPPA_TYPE_MEMORY))
1354 return NULL;
1355
1356 return config_found(self, ca, mbprint,
1357 CFARGS(.submatch = mbsubmatch));
1358 }
1359
1360 void
mbattach(device_t parent,device_t self,void * aux)1361 mbattach(device_t parent, device_t self, void *aux)
1362 {
1363 struct mainbus_softc *sc = device_private(self);
1364 struct confargs nca;
1365 bus_space_handle_t ioh;
1366 int err;
1367
1368 sc->sc_dv = self;
1369 mb_attached = 1;
1370
1371 /*
1372 * Map all of Fixed Physical, Local Broadcast, and Global Broadcast
1373 * space. These spaces are adjacent and in that order and run to the
1374 * end of the address space.
1375 */
1376 /*
1377 * XXX fredette - this may be a copout, or it may be a great idea. I'm
1378 * not sure which yet.
1379 */
1380
1381 /* map all the way till the end of the memory */
1382 if (bus_space_map(&hppa_bustag, hppa_mcpuhpa, (~0LU - hppa_mcpuhpa + 1),
1383 0, &ioh))
1384 panic("%s: cannot map mainbus IO space", __func__);
1385
1386 /*
1387 * Local-Broadcast the HPA to all modules on the bus
1388 */
1389 ((struct iomod *)(hppa_mcpuhpa & HPPA_FLEX_MASK))[FPA_IOMOD].io_flex =
1390 (void *)((hppa_mcpuhpa & HPPA_FLEX_MASK) | DMA_ENABLE);
1391
1392 aprint_normal(" [flex %lx]\n", hppa_mcpuhpa & HPPA_FLEX_MASK);
1393
1394 /* PDC first */
1395 memset(&nca, 0, sizeof(nca));
1396 nca.ca_name = "pdc";
1397 nca.ca_hpa = 0;
1398 nca.ca_iot = &hppa_bustag;
1399 nca.ca_dmatag = &hppa_dmatag;
1400 config_found(self, &nca, mbprint, CFARGS_NONE);
1401
1402 #if NPOWER > 0
1403 /* get some power */
1404 memset(&nca, 0, sizeof(nca));
1405 nca.ca_name = "power";
1406 nca.ca_irq = HPPACF_IRQ_UNDEF;
1407 nca.ca_iot = &hppa_bustag;
1408 config_found(self, &nca, mbprint, CFARGS_NONE);
1409 #endif
1410
1411 memset(&nca, 0, sizeof(nca));
1412 err = pdcproc_chassis_info(&pdc_chassis_info, &nca.ca_pcl);
1413 if (!err) {
1414 if (nca.ca_pcl.enabled) {
1415 #if NLCD > 0
1416 nca.ca_name = "lcd";
1417 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1418 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1419 nca.ca_dp.dp_mod = -1;
1420 nca.ca_irq = HPPACF_IRQ_UNDEF;
1421 nca.ca_iot = &hppa_bustag;
1422 nca.ca_hpa = nca.ca_pcl.cmd_addr;
1423
1424 config_found(self, &nca, mbprint, CFARGS_NONE);
1425 #endif
1426 } else if (nca.ca_pcl.model == 2) {
1427 #ifdef USELEDS
1428 bus_space_map(&hppa_bustag, nca.ca_pcl.cmd_addr,
1429 4, 0, (bus_space_handle_t *)&machine_ledaddr);
1430 machine_ledword = 1;
1431 #endif
1432 }
1433 }
1434
1435 hppa_modules_scan();
1436
1437 /* Search and attach all CPUs and memory controllers. */
1438 memset(&nca, 0, sizeof(nca));
1439 nca.ca_name = "mainbus";
1440 nca.ca_hpa = 0;
1441 nca.ca_hpabase = HPPA_FPA; /* Central bus */
1442 nca.ca_nmodules = MAXMODBUS;
1443 nca.ca_irq = HPPACF_IRQ_UNDEF;
1444 nca.ca_iot = &hppa_bustag;
1445 nca.ca_dmatag = &hppa_dmatag;
1446 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1447 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1448 nca.ca_dp.dp_mod = -1;
1449 pdc_scanbus(self, &nca, mb_cpu_mem_callback);
1450
1451 /* Search for IO hardware. */
1452 memset(&nca, 0, sizeof(nca));
1453 nca.ca_name = "mainbus";
1454 nca.ca_hpa = 0;
1455 nca.ca_hpabase = 0; /* Central bus already walked above */
1456 nca.ca_nmodules = MAXMODBUS;
1457 nca.ca_irq = HPPACF_IRQ_UNDEF;
1458 nca.ca_iot = &hppa_bustag;
1459 nca.ca_dmatag = &hppa_dmatag;
1460 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1461 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1462 nca.ca_dp.dp_mod = -1;
1463 pdc_scanbus(self, &nca, mb_module_callback);
1464
1465 hppa_modules_done();
1466 }
1467
1468 int
mbprint(void * aux,const char * pnp)1469 mbprint(void *aux, const char *pnp)
1470 {
1471 int n;
1472 struct confargs *ca = aux;
1473
1474 if (pnp)
1475 aprint_normal("\"%s\" at %s (type 0x%x, sv 0x%x)", ca->ca_name,
1476 pnp, ca->ca_type.iodc_type, ca->ca_type.iodc_sv_model);
1477 if (ca->ca_hpa) {
1478 aprint_normal(" hpa 0x%lx", ca->ca_hpa);
1479 if (ca->ca_dp.dp_mod >=0) {
1480 aprint_normal(" path ");
1481 for (n = 0; n < 6; n++) {
1482 if (ca->ca_dp.dp_bc[n] >= 0)
1483 aprint_normal("%d/", ca->ca_dp.dp_bc[n]);
1484 }
1485 aprint_normal("%d", ca->ca_dp.dp_mod);
1486 }
1487 if (!pnp && ca->ca_irq >= 0) {
1488 aprint_normal(" irq %d", ca->ca_irq);
1489 }
1490 }
1491
1492 return UNCONF;
1493 }
1494
1495 int
mbsubmatch(device_t parent,cfdata_t cf,const int * ldesc,void * aux)1496 mbsubmatch(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
1497 {
1498 struct confargs *ca = aux;
1499 int ret;
1500 int saved_irq;
1501
1502 saved_irq = ca->ca_irq;
1503 if (cf->hppacf_irq != HPPACF_IRQ_UNDEF)
1504 ca->ca_irq = cf->hppacf_irq;
1505 if (!(ret = config_match(parent, cf, aux)))
1506 ca->ca_irq = saved_irq;
1507 return ret;
1508 }
1509