1 /* $NetBSD: mainbus.c,v 1.1 2014/02/24 07:23:43 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matthew Fredette. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* $OpenBSD: mainbus.c,v 1.74 2009/04/20 00:42:06 oga Exp $ */ 33 34 /* 35 * Copyright (c) 1998-2004 Michael Shalayeff 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * THE POSSIBILITY OF SUCH DAMAGE. 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.1 2014/02/24 07:23:43 skrll Exp $"); 62 63 #include "locators.h" 64 #include "power.h" 65 #include "lcd.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/device.h> 70 #include <sys/reboot.h> 71 #include <sys/extent.h> 72 #include <sys/mbuf.h> 73 #include <sys/proc.h> 74 75 #include <uvm/uvm_page.h> 76 #include <uvm/uvm.h> 77 78 #include <machine/pdc.h> 79 #include <machine/iomod.h> 80 #include <machine/autoconf.h> 81 82 #include <hppa/hppa/machdep.h> 83 #include <hppa/dev/cpudevs.h> 84 85 #if NLCD > 0 86 static struct pdc_chassis_info pdc_chassis_info; 87 #endif 88 89 #ifdef MBUSDEBUG 90 91 #define DPRINTF(s) do { \ 92 if (mbusdebug) \ 93 printf s; \ 94 } while(0) 95 96 int mbusdebug = 1; 97 #else 98 #define DPRINTF(s) /* */ 99 #endif 100 101 struct mainbus_softc { 102 device_t sc_dv; 103 }; 104 105 int mbmatch(device_t, cfdata_t, void *); 106 void mbattach(device_t, device_t, void *); 107 108 CFATTACH_DECL_NEW(mainbus, sizeof(struct mainbus_softc), 109 mbmatch, mbattach, NULL, NULL); 110 111 extern struct cfdriver mainbus_cd; 112 113 static int mb_attached; 114 115 /* from machdep.c */ 116 extern struct extent *hppa_io_extent; 117 118 uint8_t mbus_r1(void *, bus_space_handle_t, bus_size_t); 119 uint16_t mbus_r2(void *, bus_space_handle_t, bus_size_t); 120 uint32_t mbus_r4(void *, bus_space_handle_t, bus_size_t); 121 uint64_t mbus_r8(void *, bus_space_handle_t, bus_size_t); 122 void mbus_w1(void *, bus_space_handle_t, bus_size_t, uint8_t); 123 void mbus_w2(void *, bus_space_handle_t, bus_size_t, uint16_t); 124 void mbus_w4(void *, bus_space_handle_t, bus_size_t, uint32_t); 125 void mbus_w8(void *, bus_space_handle_t, bus_size_t, uint64_t); 126 void mbus_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); 127 void mbus_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); 128 void mbus_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); 129 void mbus_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); 130 void mbus_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); 131 void mbus_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); 132 void mbus_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); 133 void mbus_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); 134 void mbus_rr_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); 135 void mbus_rr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); 136 void mbus_rr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); 137 void mbus_rr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); 138 void mbus_wr_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); 139 void mbus_wr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); 140 void mbus_wr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); 141 void mbus_wr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); 142 void mbus_sm_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); 143 void mbus_sm_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); 144 void mbus_sm_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); 145 void mbus_sm_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); 146 void mbus_sr_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); 147 void mbus_sr_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); 148 void mbus_sr_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); 149 void mbus_sr_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); 150 void mbus_cp_1(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); 151 void mbus_cp_2(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); 152 void mbus_cp_4(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); 153 void mbus_cp_8(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); 154 155 int mbus_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *); 156 int mbus_map(void *, bus_addr_t, bus_size_t, int, bus_space_handle_t *); 157 void mbus_unmap(void *, bus_space_handle_t, bus_size_t); 158 int mbus_alloc(void *, bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *); 159 void mbus_free(void *, bus_space_handle_t, bus_size_t); 160 int mbus_subregion(void *, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *); 161 void mbus_barrier(void *, bus_space_handle_t, bus_size_t, bus_size_t, int); 162 void *mbus_vaddr(void *, bus_space_handle_t); 163 paddr_t mbus_mmap(void *, bus_addr_t, off_t, int, int); 164 165 int mbus_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *); 166 void mbus_dmamap_destroy(void *, bus_dmamap_t); 167 int mbus_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *, int); 168 int mbus_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int); 169 int mbus_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int); 170 int mbus_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int); 171 void mbus_dmamap_unload(void *, bus_dmamap_t); 172 void mbus_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int); 173 int mbus_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int); 174 void mbus_dmamem_free(void *, bus_dma_segment_t *, int); 175 int mbus_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int); 176 void mbus_dmamem_unmap(void *, void *, size_t); 177 paddr_t mbus_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int); 178 int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 179 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, 180 int *segp, int first); 181 182 extern struct pdc_btlb pdc_btlb; 183 static uint32_t bmm[HPPA_FLEX_COUNT/32]; 184 185 int 186 mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags, 187 bus_space_handle_t *bshp) 188 { 189 vaddr_t pa, spa, epa; 190 int flex; 191 192 DPRINTF(("\n%s(%lx,%lx,%scachable,%p)\n", __func__, 193 bpa, size, flags? "" : "non", bshp)); 194 195 KASSERT(bpa >= HPPA_IOSPACE); 196 KASSERT(!(flags & BUS_SPACE_MAP_CACHEABLE)); 197 198 /* 199 * Mappings are established in HPPA_FLEX_SIZE units, 200 * either with BTLB, or regular mappings of the whole area. 201 */ 202 for (pa = bpa ; size != 0; pa = epa) { 203 flex = HPPA_FLEX(pa); 204 spa = pa & HPPA_FLEX_MASK; 205 epa = spa + HPPA_FLEX_SIZE; /* may wrap to 0... */ 206 207 size -= min(size, HPPA_FLEX_SIZE - (pa - spa)); 208 209 /* do need a new mapping? */ 210 if (bmm[flex / 32] & (1 << (flex % 32))) { 211 DPRINTF(("%s: already mapped flex=%x, mask=%x\n", 212 __func__, flex, bmm[flex / 32])); 213 continue; 214 } 215 216 DPRINTF(("%s: adding flex=%x %lx-%lx, ", __func__, flex, spa, 217 epa - 1)); 218 219 bmm[flex / 32] |= (1 << (flex % 32)); 220 221 while (spa != epa) { 222 DPRINTF(("%s: kenter 0x%lx-0x%lx", __func__, spa, 223 epa)); 224 for (; spa != epa; spa += PAGE_SIZE) 225 pmap_kenter_pa(spa, spa, 226 VM_PROT_READ | VM_PROT_WRITE, 0); 227 } 228 } 229 230 *bshp = bpa; 231 232 /* Success. */ 233 return 0; 234 } 235 236 int 237 mbus_map(void *v, bus_addr_t bpa, bus_size_t size, int flags, 238 bus_space_handle_t *bshp) 239 { 240 int error; 241 242 /* 243 * We must only be called with addresses in I/O space. 244 */ 245 KASSERT(bpa >= HPPA_IOSPACE); 246 247 /* 248 * Allocate the region of I/O space. 249 */ 250 error = extent_alloc_region(hppa_io_extent, bpa, size, EX_NOWAIT); 251 if (error) 252 return (error); 253 254 /* 255 * Map the region of I/O space. 256 */ 257 error = mbus_add_mapping(bpa, size, flags, bshp); 258 if (error) { 259 DPRINTF(("bus_space_map: pa 0x%lx, size 0x%lx failed\n", 260 bpa, size)); 261 if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) { 262 printf ("bus_space_map: can't free region\n"); 263 } 264 } 265 266 return error; 267 } 268 269 void 270 mbus_unmap(void *v, bus_space_handle_t bsh, bus_size_t size) 271 { 272 bus_addr_t bpa = bsh; 273 int error; 274 275 /* 276 * Free the region of I/O space. 277 */ 278 error = extent_free(hppa_io_extent, bpa, size, EX_NOWAIT); 279 if (error) { 280 DPRINTF(("bus_space_unmap: ps 0x%lx, size 0x%lx\n", 281 bpa, size)); 282 panic("bus_space_unmap: can't free region (%d)", error); 283 } 284 } 285 286 int 287 mbus_alloc(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size, 288 bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp, 289 bus_space_handle_t *bshp) 290 { 291 bus_addr_t bpa; 292 int error; 293 294 if (rstart < hppa_io_extent->ex_start || 295 rend > hppa_io_extent->ex_end) 296 panic("bus_space_alloc: bad region start/end"); 297 298 /* 299 * Allocate the region of I/O space. 300 */ 301 error = extent_alloc_subregion1(hppa_io_extent, rstart, rend, size, 302 align, 0, boundary, EX_NOWAIT, &bpa); 303 if (error) 304 return (error); 305 306 /* 307 * Map the region of I/O space. 308 */ 309 error = mbus_add_mapping(bpa, size, flags, bshp); 310 if (error) { 311 DPRINTF(("bus_space_alloc: pa 0x%lx, size 0x%lx failed\n", 312 bpa, size)); 313 if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) { 314 printf("bus_space_alloc: can't free region\n"); 315 } 316 } 317 318 *addrp = bpa; 319 320 return error; 321 } 322 323 void 324 mbus_free(void *v, bus_space_handle_t h, bus_size_t size) 325 { 326 /* bus_space_unmap() does all that we need to do. */ 327 mbus_unmap(v, h, size); 328 } 329 330 int 331 mbus_subregion(void *v, bus_space_handle_t bsh, bus_size_t offset, 332 bus_size_t size, bus_space_handle_t *nbshp) 333 { 334 *nbshp = bsh + offset; 335 return(0); 336 } 337 338 void 339 mbus_barrier(void *v, bus_space_handle_t h, bus_size_t o, bus_size_t l, int op) 340 { 341 sync_caches(); 342 } 343 344 void* 345 mbus_vaddr(void *v, bus_space_handle_t h) 346 { 347 /* 348 * We must only be called with addresses in I/O space. 349 */ 350 KASSERT(h >= HPPA_IOSPACE); 351 return (void*)h; 352 } 353 354 paddr_t 355 mbus_mmap(void *v, bus_addr_t addr, off_t off, int prot, int flags) 356 { 357 358 return -1; 359 } 360 361 uint8_t 362 mbus_r1(void *v, bus_space_handle_t h, bus_size_t o) 363 { 364 return *((volatile uint8_t *)(h + o)); 365 } 366 367 uint16_t 368 mbus_r2(void *v, bus_space_handle_t h, bus_size_t o) 369 { 370 return *((volatile uint16_t *)(h + o)); 371 } 372 373 uint32_t 374 mbus_r4(void *v, bus_space_handle_t h, bus_size_t o) 375 { 376 return *((volatile uint32_t *)(h + o)); 377 } 378 379 uint64_t 380 mbus_r8(void *v, bus_space_handle_t h, bus_size_t o) 381 { 382 return *((volatile uint64_t *)(h + o)); 383 } 384 385 void 386 mbus_w1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv) 387 { 388 *((volatile uint8_t *)(h + o)) = vv; 389 } 390 391 void 392 mbus_w2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv) 393 { 394 *((volatile uint16_t *)(h + o)) = vv; 395 } 396 397 void 398 mbus_w4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv) 399 { 400 *((volatile uint32_t *)(h + o)) = vv; 401 } 402 403 void 404 mbus_w8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv) 405 { 406 *((volatile uint64_t *)(h + o)) = vv; 407 } 408 409 410 void 411 mbus_rm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c) 412 { 413 h += o; 414 while (c--) 415 *(a++) = *(volatile uint8_t *)h; 416 } 417 418 void 419 mbus_rm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c) 420 { 421 h += o; 422 while (c--) 423 *(a++) = *(volatile uint16_t *)h; 424 } 425 426 void 427 mbus_rm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c) 428 { 429 h += o; 430 while (c--) 431 *(a++) = *(volatile uint32_t *)h; 432 } 433 434 void 435 mbus_rm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c) 436 { 437 h += o; 438 while (c--) 439 *(a++) = *(volatile uint64_t *)h; 440 } 441 442 void 443 mbus_wm_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c) 444 { 445 h += o; 446 while (c--) 447 *(volatile uint8_t *)h = *(a++); 448 } 449 450 void 451 mbus_wm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c) 452 { 453 h += o; 454 while (c--) 455 *(volatile uint16_t *)h = *(a++); 456 } 457 458 void 459 mbus_wm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c) 460 { 461 h += o; 462 while (c--) 463 *(volatile uint32_t *)h = *(a++); 464 } 465 466 void 467 mbus_wm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c) 468 { 469 h += o; 470 while (c--) 471 *(volatile uint64_t *)h = *(a++); 472 } 473 474 void 475 mbus_sm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c) 476 { 477 h += o; 478 while (c--) 479 *(volatile uint8_t *)h = vv; 480 } 481 482 void 483 mbus_sm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c) 484 { 485 h += o; 486 while (c--) 487 *(volatile uint16_t *)h = vv; 488 } 489 490 void 491 mbus_sm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c) 492 { 493 h += o; 494 while (c--) 495 *(volatile uint32_t *)h = vv; 496 } 497 498 void 499 mbus_sm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c) 500 { 501 h += o; 502 while (c--) 503 *(volatile uint64_t *)h = vv; 504 } 505 506 void mbus_rrm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t*a, bus_size_t c); 507 void mbus_rrm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t*a, bus_size_t c); 508 void mbus_rrm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t*a, bus_size_t c); 509 510 void mbus_wrm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c); 511 void mbus_wrm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c); 512 void mbus_wrm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c); 513 514 void 515 mbus_rr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c) 516 { 517 volatile uint8_t *p; 518 519 h += o; 520 p = (void *)h; 521 while (c--) 522 *a++ = *p++; 523 } 524 525 void 526 mbus_rr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c) 527 { 528 volatile uint16_t *p; 529 530 h += o; 531 p = (void *)h; 532 while (c--) 533 *a++ = *p++; 534 } 535 536 void 537 mbus_rr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c) 538 { 539 volatile uint32_t *p; 540 541 h += o; 542 p = (void *)h; 543 while (c--) 544 *a++ = *p++; 545 } 546 547 void 548 mbus_rr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c) 549 { 550 volatile uint64_t *p; 551 552 h += o; 553 p = (void *)h; 554 while (c--) 555 *a++ = *p++; 556 } 557 558 void 559 mbus_wr_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c) 560 { 561 volatile uint8_t *p; 562 563 h += o; 564 p = (void *)h; 565 while (c--) 566 *p++ = *a++; 567 } 568 569 void 570 mbus_wr_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c) 571 { 572 volatile uint16_t *p; 573 574 h += o; 575 p = (void *)h; 576 while (c--) 577 *p++ = *a++; 578 } 579 580 void 581 mbus_wr_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c) 582 { 583 volatile uint32_t *p; 584 585 h += o; 586 p = (void *)h; 587 while (c--) 588 *p++ = *a++; 589 } 590 591 void 592 mbus_wr_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c) 593 { 594 volatile uint64_t *p; 595 596 h += o; 597 p = (void *)h; 598 while (c--) 599 *p++ = *a++; 600 } 601 602 void mbus_rrr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); 603 void mbus_rrr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); 604 void mbus_rrr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); 605 606 void mbus_wrr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); 607 void mbus_wrr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); 608 void mbus_wrr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); 609 610 void 611 mbus_sr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c) 612 { 613 volatile uint8_t *p; 614 615 h += o; 616 p = (void *)h; 617 while (c--) 618 *p++ = vv; 619 } 620 621 void 622 mbus_sr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c) 623 { 624 volatile uint16_t *p; 625 626 h += o; 627 p = (void *)h; 628 while (c--) 629 *p++ = vv; 630 } 631 632 void 633 mbus_sr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c) 634 { 635 volatile uint32_t *p; 636 637 h += o; 638 p = (void *)h; 639 while (c--) 640 *p++ = vv; 641 } 642 643 void 644 mbus_sr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c) 645 { 646 volatile uint64_t *p; 647 648 h += o; 649 p = (void *)h; 650 while (c--) 651 *p++ = vv; 652 } 653 654 void 655 mbus_cp_1(void *v, bus_space_handle_t h1, bus_size_t o1, 656 bus_space_handle_t h2, bus_size_t o2, bus_size_t c) 657 { 658 volatile uint8_t *p1, *p2; 659 660 h1 += o1; 661 h2 += o2; 662 p1 = (void *)h1; 663 p2 = (void *)h2; 664 while (c--) 665 *p1++ = *p2++; 666 } 667 668 void 669 mbus_cp_2(void *v, bus_space_handle_t h1, bus_size_t o1, 670 bus_space_handle_t h2, bus_size_t o2, bus_size_t c) 671 { 672 volatile uint16_t *p1, *p2; 673 674 h1 += o1; 675 h2 += o2; 676 p1 = (void *)h1; 677 p2 = (void *)h2; 678 while (c--) 679 *p1++ = *p2++; 680 } 681 682 void 683 mbus_cp_4(void *v, bus_space_handle_t h1, bus_size_t o1, 684 bus_space_handle_t h2, bus_size_t o2, bus_size_t c) 685 { 686 volatile uint32_t *p1, *p2; 687 688 h1 += o1; 689 h2 += o2; 690 p1 = (void *)h1; 691 p2 = (void *)h2; 692 while (c--) 693 *p1++ = *p2++; 694 } 695 696 void 697 mbus_cp_8(void *v, bus_space_handle_t h1, bus_size_t o1, 698 bus_space_handle_t h2, bus_size_t o2, bus_size_t c) 699 { 700 volatile uint64_t *p1, *p2; 701 702 h1 += o1; 703 h2 += o2; 704 p1 = (void *)h1; 705 p2 = (void *)h2; 706 while (c--) 707 *p1++ = *p2++; 708 } 709 710 711 const struct hppa_bus_space_tag hppa_bustag = { 712 NULL, 713 714 mbus_map, mbus_unmap, mbus_subregion, mbus_alloc, mbus_free, 715 mbus_barrier, mbus_vaddr, mbus_mmap, 716 mbus_r1, mbus_r2, mbus_r4, mbus_r8, 717 mbus_w1, mbus_w2, mbus_w4, mbus_w8, 718 mbus_rm_1, mbus_rm_2, mbus_rm_4, mbus_rm_8, 719 mbus_wm_1, mbus_wm_2, mbus_wm_4, mbus_wm_8, 720 mbus_sm_1, mbus_sm_2, mbus_sm_4, mbus_sm_8, 721 /* *_stream_* are the same as non-stream for native busses */ 722 mbus_rm_2, mbus_rm_4, mbus_rm_8, 723 mbus_wm_2, mbus_wm_4, mbus_wm_8, 724 mbus_rr_1, mbus_rr_2, mbus_rr_4, mbus_rr_8, 725 mbus_wr_1, mbus_wr_2, mbus_wr_4, mbus_wr_8, 726 /* *_stream_* are the same as non-stream for native busses */ 727 mbus_rr_2, mbus_rr_4, mbus_rr_8, 728 mbus_wr_2, mbus_wr_4, mbus_wr_8, 729 mbus_sr_1, mbus_sr_2, mbus_sr_4, mbus_sr_8, 730 mbus_cp_1, mbus_cp_2, mbus_cp_4, mbus_cp_8 731 }; 732 733 /* 734 * Common function for DMA map creation. May be called by bus-specific DMA map 735 * creation functions. 736 */ 737 int 738 mbus_dmamap_create(void *v, bus_size_t size, int nsegments, bus_size_t maxsegsz, 739 bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 740 { 741 struct hppa_bus_dmamap *map; 742 size_t mapsize; 743 744 /* 745 * Allocate and initialize the DMA map. The end of the map is a 746 * variable-sized array of segments, so we allocate enough room for 747 * them in one shot. 748 * 749 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation of 750 * ALLOCNOW notifies others that we've reserved these resources, and 751 * they are not to be freed. 752 * 753 * The bus_dmamap_t includes one bus_dma_segment_t, hence the 754 * (nsegments - 1). 755 */ 756 mapsize = sizeof(struct hppa_bus_dmamap) + 757 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 758 map = malloc(mapsize, M_DMAMAP, 759 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 760 if (!map) 761 return (ENOMEM); 762 763 memset(map, 0, mapsize); 764 map->_dm_size = size; 765 map->_dm_segcnt = nsegments; 766 map->_dm_maxsegsz = maxsegsz; 767 map->_dm_boundary = boundary; 768 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 769 map->dm_mapsize = 0; /* no valid mappings */ 770 map->dm_nsegs = 0; 771 772 *dmamp = map; 773 return (0); 774 } 775 776 /* 777 * Common function for DMA map destruction. May be called by bus-specific DMA 778 * map destruction functions. 779 */ 780 void 781 mbus_dmamap_destroy(void *v, bus_dmamap_t map) 782 { 783 784 /* 785 * If the handle contains a valid mapping, unload it. 786 */ 787 if (map->dm_mapsize != 0) 788 mbus_dmamap_unload(v, map); 789 790 free(map, M_DMAMAP); 791 } 792 793 /* 794 * load DMA map with a linear buffer. 795 */ 796 int 797 mbus_dmamap_load(void *v, bus_dmamap_t map, void *buf, bus_size_t buflen, 798 struct proc *p, int flags) 799 { 800 vaddr_t lastaddr; 801 int seg, error; 802 struct vmspace *vm; 803 804 /* 805 * Make sure that on error condition we return "no valid mappings". 806 */ 807 map->dm_mapsize = 0; 808 map->dm_nsegs = 0; 809 810 if (buflen > map->_dm_size) 811 return (EINVAL); 812 813 if (p != NULL) { 814 vm = p->p_vmspace; 815 } else { 816 vm = vmspace_kernel(); 817 } 818 819 seg = 0; 820 error = _bus_dmamap_load_buffer(NULL, map, buf, buflen, vm, flags, 821 &lastaddr, &seg, 1); 822 if (error == 0) { 823 map->dm_mapsize = buflen; 824 map->dm_nsegs = seg + 1; 825 } 826 return (error); 827 } 828 829 /* 830 * Like bus_dmamap_load(), but for mbufs. 831 */ 832 int 833 mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0, 834 int flags) 835 { 836 vaddr_t lastaddr; 837 int seg, error, first; 838 struct mbuf *m; 839 840 /* 841 * Make sure that on error condition we return "no valid mappings." 842 */ 843 map->dm_mapsize = 0; 844 map->dm_nsegs = 0; 845 846 KASSERT(m0->m_flags & M_PKTHDR); 847 848 if (m0->m_pkthdr.len > map->_dm_size) 849 return (EINVAL); 850 851 first = 1; 852 seg = 0; 853 error = 0; 854 for (m = m0; m != NULL && error == 0; m = m->m_next) { 855 if (m->m_len == 0) 856 continue; 857 error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len, 858 vmspace_kernel(), flags, &lastaddr, &seg, first); 859 first = 0; 860 } 861 if (error == 0) { 862 map->dm_mapsize = m0->m_pkthdr.len; 863 map->dm_nsegs = seg + 1; 864 } 865 return (error); 866 } 867 868 /* 869 * Like bus_dmamap_load(), but for uios. 870 */ 871 int 872 mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, 873 int flags) 874 { 875 vaddr_t lastaddr; 876 int seg, i, error, first; 877 bus_size_t minlen, resid; 878 struct iovec *iov; 879 void *addr; 880 881 /* 882 * Make sure that on error condition we return "no valid mappings." 883 */ 884 map->dm_mapsize = 0; 885 map->dm_nsegs = 0; 886 887 resid = uio->uio_resid; 888 iov = uio->uio_iov; 889 890 first = 1; 891 seg = 0; 892 error = 0; 893 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 894 /* 895 * Now at the first iovec to load. Load each iovec 896 * until we have exhausted the residual count. 897 */ 898 minlen = MIN(resid, iov[i].iov_len); 899 addr = (void *)iov[i].iov_base; 900 901 error = _bus_dmamap_load_buffer(NULL, map, addr, minlen, 902 uio->uio_vmspace, flags, &lastaddr, &seg, first); 903 first = 0; 904 905 resid -= minlen; 906 } 907 if (error == 0) { 908 map->dm_mapsize = uio->uio_resid; 909 map->dm_nsegs = seg + 1; 910 } 911 return (error); 912 } 913 914 /* 915 * Like bus_dmamap_load(), but for raw memory allocated with 916 * bus_dmamem_alloc(). 917 */ 918 int 919 mbus_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs, 920 int nsegs, bus_size_t size, int flags) 921 { 922 struct pglist *mlist; 923 struct vm_page *m; 924 paddr_t pa, pa_next; 925 bus_size_t mapsize; 926 bus_size_t pagesz = PAGE_SIZE; 927 int seg; 928 929 /* 930 * Make sure that on error condition we return "no valid mappings". 931 */ 932 map->dm_nsegs = 0; 933 map->dm_mapsize = 0; 934 935 /* Load the allocated pages. */ 936 mlist = segs[0]._ds_mlist; 937 pa_next = 0; 938 seg = -1; 939 mapsize = size; 940 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 941 942 if (size == 0) 943 panic("mbus_dmamem_load_raw: size botch"); 944 945 pa = VM_PAGE_TO_PHYS(m); 946 if (pa != pa_next) { 947 if (++seg >= map->_dm_segcnt) 948 panic("mbus_dmamem_load_raw: nsegs botch"); 949 map->dm_segs[seg].ds_addr = pa; 950 map->dm_segs[seg].ds_len = 0; 951 } 952 pa_next = pa + PAGE_SIZE; 953 if (size < pagesz) 954 pagesz = size; 955 map->dm_segs[seg].ds_len += pagesz; 956 size -= pagesz; 957 } 958 959 /* Make the map truly valid. */ 960 map->dm_nsegs = seg + 1; 961 map->dm_mapsize = mapsize; 962 963 return (0); 964 } 965 966 /* 967 * unload a DMA map. 968 */ 969 void 970 mbus_dmamap_unload(void *v, bus_dmamap_t map) 971 { 972 /* 973 * If this map was loaded with mbus_dmamap_load, we don't need to do 974 * anything. If this map was loaded with mbus_dmamap_load_raw, we also 975 * don't need to do anything. 976 */ 977 978 /* Mark the mappings as invalid. */ 979 map->dm_mapsize = 0; 980 map->dm_nsegs = 0; 981 } 982 983 void 984 mbus_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, 985 int ops) 986 { 987 int i; 988 989 /* 990 * Mixing of PRE and POST operations is not allowed. 991 */ 992 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 993 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 994 panic("mbus_dmamap_sync: mix PRE and POST"); 995 996 #ifdef DIAGNOSTIC 997 if (offset >= map->dm_mapsize) 998 panic("mbus_dmamap_sync: bad offset %lu (map size is %lu)", 999 offset, map->dm_mapsize); 1000 if ((offset + len) > map->dm_mapsize) 1001 panic("mbus_dmamap_sync: bad length"); 1002 #endif 1003 1004 /* 1005 * For a virtually-indexed write-back cache, we need to do the 1006 * following things: 1007 * 1008 * PREREAD -- Invalidate the D-cache. We do this here in case a 1009 * write-back is required by the back-end. 1010 * 1011 * PREWRITE -- Write-back the D-cache. Note that if we are doing 1012 * a PREREAD|PREWRITE, we can collapse the whole thing into a 1013 * single Wb-Inv. 1014 * 1015 * POSTREAD -- Nothing. 1016 * 1017 * POSTWRITE -- Nothing. 1018 */ 1019 1020 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1021 if (len == 0 || ops == 0) 1022 return; 1023 1024 for (i = 0; len != 0 && i < map->dm_nsegs; i++) { 1025 if (offset >= map->dm_segs[i].ds_len) 1026 offset -= map->dm_segs[i].ds_len; 1027 else { 1028 bus_size_t l = map->dm_segs[i].ds_len - offset; 1029 1030 if (l > len) 1031 l = len; 1032 1033 fdcache(HPPA_SID_KERNEL, map->dm_segs[i]._ds_va + 1034 offset, l); 1035 len -= l; 1036 offset = 0; 1037 } 1038 } 1039 1040 /* for either operation sync the shit away */ 1041 __asm __volatile ("sync\n\tsyncdma\n\tsync\n\t" 1042 "nop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop" ::: "memory"); 1043 } 1044 1045 /* 1046 * Common function for DMA-safe memory allocation. May be called by bus- 1047 * specific DMA memory allocation functions. 1048 */ 1049 int 1050 mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment, 1051 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1052 int flags) 1053 { 1054 paddr_t low, high; 1055 struct pglist *mlist; 1056 struct vm_page *m; 1057 paddr_t pa, pa_next; 1058 int seg; 1059 int error; 1060 1061 DPRINTF(("%s: size 0x%lx align 0x%lx bdry %0lx segs %p nsegs %d\n", 1062 __func__, size, alignment, boundary, segs, nsegs)); 1063 1064 /* Always round the size. */ 1065 size = round_page(size); 1066 1067 /* Decide where we can allocate pages. */ 1068 low = 0; 1069 high = ((flags & BUS_DMA_24BIT) ? (1 << 24) : 0) - 1; 1070 1071 if ((mlist = malloc(sizeof(*mlist), M_DEVBUF, 1072 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 1073 return (ENOMEM); 1074 1075 /* 1076 * Allocate physical pages from the VM system. 1077 */ 1078 TAILQ_INIT(mlist); 1079 error = uvm_pglistalloc(size, low, high, 0, 0, mlist, nsegs, 1080 (flags & BUS_DMA_NOWAIT) == 0); 1081 1082 /* If we don't have the pages. */ 1083 if (error) { 1084 DPRINTF(("%s: uvm_pglistalloc(%lx, %lx, %lx, 0, 0, %p, %d, %0x)" 1085 " failed", __func__, size, low, high, mlist, nsegs, 1086 (flags & BUS_DMA_NOWAIT) == 0)); 1087 free(mlist, M_DEVBUF); 1088 return (error); 1089 } 1090 1091 pa_next = 0; 1092 seg = -1; 1093 1094 TAILQ_FOREACH(m, mlist, pageq.queue) { 1095 pa = VM_PAGE_TO_PHYS(m); 1096 if (pa != pa_next) { 1097 if (++seg >= nsegs) { 1098 uvm_pglistfree(mlist); 1099 free(mlist, M_DEVBUF); 1100 return (ENOMEM); 1101 } 1102 segs[seg].ds_addr = pa; 1103 segs[seg].ds_len = PAGE_SIZE; 1104 segs[seg]._ds_mlist = NULL; 1105 segs[seg]._ds_va = 0; 1106 } else 1107 segs[seg].ds_len += PAGE_SIZE; 1108 pa_next = pa + PAGE_SIZE; 1109 } 1110 *rsegs = seg + 1; 1111 1112 /* 1113 * Simply keep a pointer around to the linked list, so 1114 * bus_dmamap_free() can return it. 1115 * 1116 * Nobody should touch the pageq.queue fields while these pages are in 1117 * our custody. 1118 */ 1119 segs[0]._ds_mlist = mlist; 1120 1121 /* 1122 * We now have physical pages, but no kernel virtual addresses yet. 1123 * These may be allocated in bus_dmamap_map. 1124 */ 1125 return (0); 1126 } 1127 1128 void 1129 mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs) 1130 { 1131 struct pglist *mlist; 1132 /* 1133 * Return the list of physical pages back to the VM system. 1134 */ 1135 mlist = segs[0]._ds_mlist; 1136 if (mlist == NULL) 1137 return; 1138 1139 uvm_pglistfree(mlist); 1140 free(mlist, M_DEVBUF); 1141 } 1142 1143 /* 1144 * Common function for mapping DMA-safe memory. May be called by bus-specific 1145 * DMA memory map functions. 1146 */ 1147 int 1148 mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size, 1149 void **kvap, int flags) 1150 { 1151 bus_addr_t addr; 1152 vaddr_t va; 1153 int curseg; 1154 u_int pmflags = 1155 hppa_cpu_hastlbu_p() ? PMAP_NOCACHE : 0; 1156 const uvm_flag_t kmflags = 1157 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 1158 1159 size = round_page(size); 1160 1161 /* Get a chunk of kernel virtual space. */ 1162 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 1163 if (__predict_false(va == 0)) 1164 return (ENOMEM); 1165 1166 *kvap = (void *)va; 1167 1168 for (curseg = 0; curseg < nsegs; curseg++) { 1169 segs[curseg]._ds_va = va; 1170 for (addr = segs[curseg].ds_addr; 1171 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); ) { 1172 KASSERT(size != 0); 1173 1174 pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE, 1175 pmflags); 1176 1177 addr += PAGE_SIZE; 1178 va += PAGE_SIZE; 1179 size -= PAGE_SIZE; 1180 } 1181 } 1182 pmap_update(pmap_kernel()); 1183 return (0); 1184 } 1185 1186 /* 1187 * Common function for unmapping DMA-safe memory. May be called by bus- 1188 * specific DMA memory unmapping functions. 1189 */ 1190 void 1191 mbus_dmamem_unmap(void *v, void *kva, size_t size) 1192 { 1193 1194 KASSERT(((vaddr_t)kva & PAGE_MASK) == 0); 1195 1196 size = round_page(size); 1197 pmap_kremove((vaddr_t)kva, size); 1198 pmap_update(pmap_kernel()); 1199 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1200 } 1201 1202 /* 1203 * Common functin for mmap(2)'ing DMA-safe memory. May be called by bus- 1204 * specific DMA mmap(2)'ing functions. 1205 */ 1206 paddr_t 1207 mbus_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, 1208 off_t off, int prot, int flags) 1209 { 1210 int i; 1211 1212 for (i = 0; i < nsegs; i++) { 1213 KASSERT((off & PGOFSET) == 0); 1214 KASSERT((segs[i].ds_addr & PGOFSET) == 0); 1215 KASSERT((segs[i].ds_len & PGOFSET) == 0); 1216 1217 if (off >= segs[i].ds_len) { 1218 off -= segs[i].ds_len; 1219 continue; 1220 } 1221 1222 return (btop((u_long)segs[i].ds_addr + off)); 1223 } 1224 1225 /* Page not found. */ 1226 return (-1); 1227 } 1228 1229 int 1230 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 1231 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, 1232 int *segp, int first) 1233 { 1234 bus_size_t sgsize; 1235 bus_addr_t curaddr, lastaddr, baddr, bmask; 1236 vaddr_t vaddr = (vaddr_t)buf; 1237 int seg; 1238 pmap_t pmap; 1239 1240 pmap = vm_map_pmap(&vm->vm_map); 1241 1242 lastaddr = *lastaddrp; 1243 bmask = ~(map->_dm_boundary - 1); 1244 1245 for (seg = *segp; buflen > 0; ) { 1246 bool ok __diagused; 1247 /* 1248 * Get the physical address for this segment. 1249 */ 1250 ok = pmap_extract(pmap, vaddr, &curaddr); 1251 KASSERT(ok == true); 1252 1253 /* 1254 * Compute the segment size, and adjust counts. 1255 */ 1256 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 1257 if (buflen < sgsize) 1258 sgsize = buflen; 1259 1260 /* 1261 * Make sure we don't cross any boundaries. 1262 */ 1263 if (map->_dm_boundary > 0) { 1264 baddr = (curaddr + map->_dm_boundary) & bmask; 1265 if (sgsize > (baddr - curaddr)) 1266 sgsize = (baddr - curaddr); 1267 } 1268 1269 /* 1270 * Insert chunk into a segment, coalescing with previous 1271 * segment if possible. 1272 */ 1273 if (first) { 1274 map->dm_segs[seg].ds_addr = curaddr; 1275 map->dm_segs[seg].ds_len = sgsize; 1276 map->dm_segs[seg]._ds_va = vaddr; 1277 first = 0; 1278 } else { 1279 if (curaddr == lastaddr && 1280 (map->dm_segs[seg].ds_len + sgsize) <= 1281 map->_dm_maxsegsz && 1282 (map->_dm_boundary == 0 || 1283 (map->dm_segs[seg].ds_addr & bmask) == 1284 (curaddr & bmask))) 1285 map->dm_segs[seg].ds_len += sgsize; 1286 else { 1287 if (++seg >= map->_dm_segcnt) 1288 break; 1289 map->dm_segs[seg].ds_addr = curaddr; 1290 map->dm_segs[seg].ds_len = sgsize; 1291 map->dm_segs[seg]._ds_va = vaddr; 1292 } 1293 } 1294 1295 lastaddr = curaddr + sgsize; 1296 vaddr += sgsize; 1297 buflen -= sgsize; 1298 } 1299 1300 *segp = seg; 1301 *lastaddrp = lastaddr; 1302 1303 /* 1304 * Did we fit? 1305 */ 1306 if (buflen != 0) 1307 return (EFBIG); /* XXX better return value here? */ 1308 return (0); 1309 } 1310 1311 const struct hppa_bus_dma_tag hppa_dmatag = { 1312 NULL, 1313 mbus_dmamap_create, mbus_dmamap_destroy, 1314 mbus_dmamap_load, mbus_dmamap_load_mbuf, 1315 mbus_dmamap_load_uio, mbus_dmamap_load_raw, 1316 mbus_dmamap_unload, mbus_dmamap_sync, 1317 1318 mbus_dmamem_alloc, mbus_dmamem_free, mbus_dmamem_map, 1319 mbus_dmamem_unmap, mbus_dmamem_mmap 1320 }; 1321 1322 int 1323 mbmatch(device_t parent, cfdata_t cf, void *aux) 1324 { 1325 1326 /* there will be only one */ 1327 if (mb_attached) 1328 return 0; 1329 1330 return 1; 1331 } 1332 1333 static device_t 1334 mb_module_callback(device_t self, struct confargs *ca) 1335 { 1336 if (ca->ca_type.iodc_type == HPPA_TYPE_NPROC || 1337 ca->ca_type.iodc_type == HPPA_TYPE_MEMORY) 1338 return NULL; 1339 1340 return config_found_sm_loc(self, "gedoens", NULL, ca, mbprint, mbsubmatch); 1341 } 1342 1343 static device_t 1344 mb_cpu_mem_callback(device_t self, struct confargs *ca) 1345 { 1346 if ((ca->ca_type.iodc_type != HPPA_TYPE_NPROC && 1347 ca->ca_type.iodc_type != HPPA_TYPE_MEMORY)) 1348 return NULL; 1349 1350 return config_found_sm_loc(self, "gedoens", NULL, ca, mbprint, mbsubmatch); 1351 } 1352 1353 void 1354 mbattach(device_t parent, device_t self, void *aux) 1355 { 1356 struct mainbus_softc *sc = device_private(self); 1357 struct confargs nca; 1358 bus_space_handle_t ioh; 1359 #if NLCD > 0 1360 int err; 1361 #endif 1362 1363 sc->sc_dv = self; 1364 mb_attached = 1; 1365 1366 /* 1367 * Map all of Fixed Physical, Local Broadcast, and Global Broadcast 1368 * space. These spaces are adjacent and in that order and run to the 1369 * end of the address space. 1370 */ 1371 /* 1372 * XXX fredette - this may be a copout, or it may be a great idea. I'm 1373 * not sure which yet. 1374 */ 1375 1376 /* map all the way till the end of the memory */ 1377 if (bus_space_map(&hppa_bustag, hppa_mcpuhpa, (~0LU - hppa_mcpuhpa + 1), 1378 0, &ioh)) 1379 panic("%s: cannot map mainbus IO space", __func__); 1380 1381 /* 1382 * Local-Broadcast the HPA to all modules on the bus 1383 */ 1384 ((struct iomod *)(hppa_mcpuhpa & HPPA_FLEX_MASK))[FPA_IOMOD].io_flex = 1385 (void *)((hppa_mcpuhpa & HPPA_FLEX_MASK) | DMA_ENABLE); 1386 1387 aprint_normal(" [flex %lx]\n", hppa_mcpuhpa & HPPA_FLEX_MASK); 1388 1389 /* PDC first */ 1390 memset(&nca, 0, sizeof(nca)); 1391 nca.ca_name = "pdc"; 1392 nca.ca_hpa = 0; 1393 nca.ca_iot = &hppa_bustag; 1394 nca.ca_dmatag = &hppa_dmatag; 1395 config_found(self, &nca, mbprint); 1396 1397 #if NPOWER > 0 1398 /* get some power */ 1399 memset(&nca, 0, sizeof(nca)); 1400 nca.ca_name = "power"; 1401 nca.ca_irq = HPPACF_IRQ_UNDEF; 1402 nca.ca_iot = &hppa_bustag; 1403 config_found(self, &nca, mbprint); 1404 #endif 1405 1406 #if NLCD > 0 1407 memset(&nca, 0, sizeof(nca)); 1408 err = pdcproc_chassis_info(&pdc_chassis_info, &nca.ca_pcl); 1409 if (!err && nca.ca_pcl.enabled) { 1410 nca.ca_name = "lcd"; 1411 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] = 1412 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1; 1413 nca.ca_dp.dp_mod = -1; 1414 nca.ca_irq = HPPACF_IRQ_UNDEF; 1415 nca.ca_iot = &hppa_bustag; 1416 nca.ca_hpa = nca.ca_pcl.cmd_addr; 1417 1418 config_found(self, &nca, mbprint); 1419 } 1420 #endif 1421 1422 hppa_modules_scan(); 1423 1424 /* Search and attach all CPUs and memory controllers. */ 1425 memset(&nca, 0, sizeof(nca)); 1426 nca.ca_name = "mainbus"; 1427 nca.ca_hpa = 0; 1428 nca.ca_hpabase = HPPA_FPA; /* Central bus */ 1429 nca.ca_nmodules = MAXMODBUS; 1430 nca.ca_irq = HPPACF_IRQ_UNDEF; 1431 nca.ca_iot = &hppa_bustag; 1432 nca.ca_dmatag = &hppa_dmatag; 1433 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] = 1434 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1; 1435 nca.ca_dp.dp_mod = -1; 1436 pdc_scanbus(self, &nca, mb_cpu_mem_callback); 1437 1438 /* Search for IO hardware. */ 1439 memset(&nca, 0, sizeof(nca)); 1440 nca.ca_name = "mainbus"; 1441 nca.ca_hpa = 0; 1442 nca.ca_hpabase = 0; /* Central bus already walked above */ 1443 nca.ca_nmodules = MAXMODBUS; 1444 nca.ca_irq = HPPACF_IRQ_UNDEF; 1445 nca.ca_iot = &hppa_bustag; 1446 nca.ca_dmatag = &hppa_dmatag; 1447 nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] = 1448 nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1; 1449 nca.ca_dp.dp_mod = -1; 1450 pdc_scanbus(self, &nca, mb_module_callback); 1451 1452 hppa_modules_done(); 1453 } 1454 1455 int 1456 mbprint(void *aux, const char *pnp) 1457 { 1458 int n; 1459 struct confargs *ca = aux; 1460 1461 if (pnp) 1462 aprint_normal("\"%s\" at %s (type 0x%x, sv 0x%x)", ca->ca_name, 1463 pnp, ca->ca_type.iodc_type, ca->ca_type.iodc_sv_model); 1464 if (ca->ca_hpa) { 1465 aprint_normal(" hpa 0x%lx", ca->ca_hpa); 1466 if (ca->ca_dp.dp_mod >=0) { 1467 aprint_normal(" path "); 1468 for (n = 0; n < 6; n++) { 1469 if (ca->ca_dp.dp_bc[n] >= 0) 1470 aprint_normal("%d/", ca->ca_dp.dp_bc[n]); 1471 } 1472 aprint_normal("%d", ca->ca_dp.dp_mod); 1473 } 1474 if (!pnp && ca->ca_irq >= 0) { 1475 aprint_normal(" irq %d", ca->ca_irq); 1476 } 1477 } 1478 1479 return (UNCONF); 1480 } 1481 1482 int 1483 mbsubmatch(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 1484 { 1485 struct confargs *ca = aux; 1486 int ret; 1487 int saved_irq; 1488 1489 saved_irq = ca->ca_irq; 1490 if (cf->hppacf_irq != HPPACF_IRQ_UNDEF) 1491 ca->ca_irq = cf->hppacf_irq; 1492 if (!(ret = config_match(parent, cf, aux))) 1493 ca->ca_irq = saved_irq; 1494 return ret; 1495 } 1496