1 /* $NetBSD: pciconf.c,v 1.48 2020/07/08 13:12:35 thorpej Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Allen Briggs for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 /* 38 * Derived in part from code from PMON/2000 (http://pmon.groupbsd.org/). 39 */ 40 41 /* 42 * To do: 43 * - Perform all data structure allocation dynamically, don't have 44 * statically-sized arrays ("oops, you lose because you have too 45 * many slots filled!") 46 * - Do this in 2 passes, with an MD hook to control the behavior: 47 * (1) Configure the bus (possibly including expansion 48 * ROMs. 49 * (2) Another pass to disable expansion ROMs if they're 50 * mapped (since you're not supposed to leave them 51 * mapped when you're not using them). 52 * This would facilitate MD code executing the expansion ROMs 53 * if necessary (possibly with an x86 emulator) to configure 54 * devices (e.g. VGA cards). 55 * - Deal with "anything can be hot-plugged" -- i.e., carry configuration 56 * information around & be able to reconfigure on the fly 57 * - Deal with segments (See IA64 System Abstraction Layer) 58 * - Deal with subtractive bridges (& non-spec positive/subtractive decode) 59 * - Deal with ISA/VGA/VGA palette snooping 60 * - Deal with device capabilities on bridges 61 * - Worry about changing a bridge to/from transparency 62 * From thorpej (05/25/01) 63 * - Try to handle devices that are already configured (perhaps using that 64 * as a hint to where we put other devices) 65 */ 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: pciconf.c,v 1.48 2020/07/08 13:12:35 thorpej Exp $"); 69 70 #include "opt_pci.h" 71 72 #include <sys/param.h> 73 #include <sys/queue.h> 74 #include <sys/systm.h> 75 #include <sys/malloc.h> 76 #include <sys/kmem.h> 77 #include <sys/vmem.h> 78 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pciconf.h> 81 #include <dev/pci/pcidevs.h> 82 #include <dev/pci/pccbbreg.h> 83 84 int pci_conf_debug = 0; 85 86 #if !defined(MIN) 87 #define MIN(a,b) (((a)<(b))?(a):(b)) 88 #define MAX(a,b) (((a)>(b))?(a):(b)) 89 #endif 90 91 /* per-bus constants. */ 92 #define MAX_CONF_DEV 32 /* Arbitrary */ 93 #define MAX_CONF_MEM (3 * MAX_CONF_DEV) /* Avg. 3 per device -- Arb. */ 94 #define MAX_CONF_IO (3 * MAX_CONF_DEV) /* Avg. 1 per device -- Arb. */ 95 96 struct _s_pciconf_bus_t; /* Forward declaration */ 97 98 struct pciconf_resource { 99 vmem_t *arena; 100 bus_addr_t min_addr; 101 bus_addr_t max_addr; 102 bus_size_t total_size; 103 }; 104 105 #define PCICONF_RESOURCE_NTYPES 3 106 CTASSERT(PCICONF_RESOURCE_IO < PCICONF_RESOURCE_NTYPES); 107 CTASSERT(PCICONF_RESOURCE_MEM < PCICONF_RESOURCE_NTYPES); 108 CTASSERT(PCICONF_RESOURCE_PREFETCHABLE_MEM < PCICONF_RESOURCE_NTYPES); 109 110 static const char *pciconf_resource_names[] = { 111 [PCICONF_RESOURCE_IO] = "pci-io", 112 [PCICONF_RESOURCE_MEM] = "pci-mem", 113 [PCICONF_RESOURCE_PREFETCHABLE_MEM] = "pci-pmem", 114 }; 115 116 struct pciconf_resources { 117 struct pciconf_resource resources[PCICONF_RESOURCE_NTYPES]; 118 }; 119 120 typedef struct _s_pciconf_dev_t { 121 int ipin; 122 int iline; 123 int min_gnt; 124 int max_lat; 125 int enable; 126 pcitag_t tag; 127 pci_chipset_tag_t pc; 128 struct _s_pciconf_bus_t *ppb; /* I am really a bridge */ 129 } pciconf_dev_t; 130 131 typedef struct _s_pciconf_win_t { 132 pciconf_dev_t *dev; 133 int reg; /* 0 for busses */ 134 int align; 135 int prefetch; 136 uint64_t size; 137 uint64_t address; 138 } pciconf_win_t; 139 140 typedef struct _s_pciconf_bus_t { 141 int busno; 142 int next_busno; 143 int last_busno; 144 int max_mingnt; 145 int min_maxlat; 146 int cacheline_size; 147 int prefetch; 148 int fast_b2b; 149 int freq_66; 150 int def_ltim; 151 int max_ltim; 152 int bandwidth_used; 153 int swiz; 154 int io_32bit; 155 int pmem_64bit; 156 int mem_64bit; 157 int io_align; 158 int mem_align; 159 int pmem_align; 160 161 int ndevs; 162 pciconf_dev_t device[MAX_CONF_DEV]; 163 164 /* These should be sorted in order of decreasing size */ 165 int nmemwin; 166 pciconf_win_t pcimemwin[MAX_CONF_MEM]; 167 int niowin; 168 pciconf_win_t pciiowin[MAX_CONF_IO]; 169 170 bus_size_t io_total; 171 bus_size_t mem_total; 172 bus_size_t pmem_total; 173 174 struct pciconf_resource io_res; 175 struct pciconf_resource mem_res; 176 struct pciconf_resource pmem_res; 177 178 pci_chipset_tag_t pc; 179 struct _s_pciconf_bus_t *parent_bus; 180 } pciconf_bus_t; 181 182 static int probe_bus(pciconf_bus_t *); 183 static void alloc_busno(pciconf_bus_t *, pciconf_bus_t *); 184 static void set_busreg(pci_chipset_tag_t, pcitag_t, int, int, int); 185 static int pci_do_device_query(pciconf_bus_t *, pcitag_t, int, int, int); 186 static int setup_iowins(pciconf_bus_t *); 187 static int setup_memwins(pciconf_bus_t *); 188 static int configure_bridge(pciconf_dev_t *); 189 static int configure_bus(pciconf_bus_t *); 190 static uint64_t pci_allocate_range(struct pciconf_resource *, uint64_t, int, 191 bool); 192 static pciconf_win_t *get_io_desc(pciconf_bus_t *, bus_size_t); 193 static pciconf_win_t *get_mem_desc(pciconf_bus_t *, bus_size_t); 194 static pciconf_bus_t *query_bus(pciconf_bus_t *, pciconf_dev_t *, int); 195 196 static void print_tag(pci_chipset_tag_t, pcitag_t); 197 198 static vmem_t * 199 create_vmem_arena(const char *name, bus_addr_t start, bus_size_t size, 200 int flags) 201 { 202 KASSERT(start < VMEM_ADDR_MAX); 203 KASSERT(size == 0 || 204 (VMEM_ADDR_MAX - start) >= (size - 1)); 205 206 return vmem_create(name, start, size, 207 1, /*quantum*/ 208 NULL, /*importfn*/ 209 NULL, /*releasefn*/ 210 NULL, /*source*/ 211 0, /*qcache_max*/ 212 flags, 213 IPL_NONE); 214 } 215 216 static int 217 init_range_resource(struct pciconf_resource *r, const char *name, 218 bus_addr_t start, bus_addr_t size) 219 { 220 r->arena = create_vmem_arena(name, start, size, VM_NOSLEEP); 221 if (r->arena == NULL) 222 return ENOMEM; 223 224 r->min_addr = start; 225 r->max_addr = start + (size - 1); 226 r->total_size = size; 227 228 return 0; 229 } 230 231 static void 232 fini_range_resource(struct pciconf_resource *r) 233 { 234 if (r->arena) { 235 vmem_xfreeall(r->arena); 236 vmem_destroy(r->arena); 237 } 238 memset(r, 0, sizeof(*r)); 239 } 240 241 static void 242 print_tag(pci_chipset_tag_t pc, pcitag_t tag) 243 { 244 int bus, dev, func; 245 246 pci_decompose_tag(pc, tag, &bus, &dev, &func); 247 printf("PCI: bus %d, device %d, function %d: ", bus, dev, func); 248 } 249 250 #ifdef _LP64 251 #define __used_only_lp64 __unused 252 #else 253 #define __used_only_lp64 /* nothing */ 254 #endif /* _LP64 */ 255 256 /************************************************************************/ 257 /************************************************************************/ 258 /*********************** Bus probing routines ***********************/ 259 /************************************************************************/ 260 /************************************************************************/ 261 static pciconf_win_t * 262 get_io_desc(pciconf_bus_t *pb, bus_size_t size) 263 { 264 int i, n; 265 266 n = pb->niowin; 267 for (i = n; i > 0 && size > pb->pciiowin[i-1].size; i--) 268 pb->pciiowin[i] = pb->pciiowin[i-1]; /* struct copy */ 269 return &pb->pciiowin[i]; 270 } 271 272 static pciconf_win_t * 273 get_mem_desc(pciconf_bus_t *pb, bus_size_t size) 274 { 275 int i, n; 276 277 n = pb->nmemwin; 278 for (i = n; i > 0 && size > pb->pcimemwin[i-1].size; i--) 279 pb->pcimemwin[i] = pb->pcimemwin[i-1]; /* struct copy */ 280 return &pb->pcimemwin[i]; 281 } 282 283 /* 284 * Set up bus common stuff, then loop over devices & functions. 285 * If we find something, call pci_do_device_query()). 286 */ 287 static int 288 probe_bus(pciconf_bus_t *pb) 289 { 290 int device; 291 uint8_t devs[32]; 292 int i, n; 293 294 pb->ndevs = 0; 295 pb->niowin = 0; 296 pb->nmemwin = 0; 297 pb->freq_66 = 1; 298 #ifdef PCICONF_NO_FAST_B2B 299 pb->fast_b2b = 0; 300 #else 301 pb->fast_b2b = 1; 302 #endif 303 pb->prefetch = 1; 304 pb->max_mingnt = 0; /* we are looking for the maximum */ 305 pb->min_maxlat = 0x100; /* we are looking for the minimum */ 306 pb->bandwidth_used = 0; 307 308 n = pci_bus_devorder(pb->pc, pb->busno, devs, __arraycount(devs)); 309 for (i = 0; i < n; i++) { 310 pcitag_t tag; 311 pcireg_t id, bhlcr; 312 int function, nfunction; 313 int confmode; 314 315 device = devs[i]; 316 317 tag = pci_make_tag(pb->pc, pb->busno, device, 0); 318 if (pci_conf_debug) { 319 print_tag(pb->pc, tag); 320 } 321 id = pci_conf_read(pb->pc, tag, PCI_ID_REG); 322 323 if (pci_conf_debug) { 324 printf("id=%x: Vendor=%x, Product=%x\n", 325 id, PCI_VENDOR(id), PCI_PRODUCT(id)); 326 } 327 /* Invalid vendor ID value? */ 328 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 329 continue; 330 331 bhlcr = pci_conf_read(pb->pc, tag, PCI_BHLC_REG); 332 nfunction = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; 333 for (function = 0; function < nfunction; function++) { 334 tag = pci_make_tag(pb->pc, pb->busno, device, function); 335 id = pci_conf_read(pb->pc, tag, PCI_ID_REG); 336 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 337 continue; 338 if (pb->ndevs + 1 < MAX_CONF_DEV) { 339 if (pci_conf_debug) { 340 print_tag(pb->pc, tag); 341 printf("Found dev 0x%04x 0x%04x -- " 342 "really probing.\n", 343 PCI_VENDOR(id), PCI_PRODUCT(id)); 344 } 345 #ifdef __HAVE_PCI_CONF_HOOK 346 confmode = pci_conf_hook(pb->pc, pb->busno, 347 device, function, id); 348 if (confmode == 0) 349 continue; 350 #else 351 /* 352 * Don't enable expansion ROMS -- some cards 353 * share address decoders between the EXPROM 354 * and PCI memory space, and enabling the ROM 355 * when not needed will cause all sorts of 356 * lossage. 357 */ 358 confmode = PCI_CONF_DEFAULT; 359 #endif 360 if (pci_do_device_query(pb, tag, device, 361 function, confmode)) 362 return -1; 363 pb->ndevs++; 364 } 365 } 366 } 367 return 0; 368 } 369 370 static void 371 alloc_busno(pciconf_bus_t *parent, pciconf_bus_t *pb) 372 { 373 pb->busno = parent->next_busno; 374 pb->next_busno = pb->busno + 1; 375 } 376 377 static void 378 set_busreg(pci_chipset_tag_t pc, pcitag_t tag, int prim, int sec, int sub) 379 { 380 pcireg_t busreg; 381 382 busreg = __SHIFTIN(prim, PCI_BRIDGE_BUS_PRIMARY); 383 busreg |= __SHIFTIN(sec, PCI_BRIDGE_BUS_SECONDARY); 384 busreg |= __SHIFTIN(sub, PCI_BRIDGE_BUS_SUBORDINATE); 385 pci_conf_write(pc, tag, PCI_BRIDGE_BUS_REG, busreg); 386 } 387 388 static pciconf_bus_t * 389 query_bus(pciconf_bus_t *parent, pciconf_dev_t *pd, int dev) 390 { 391 pciconf_bus_t *pb; 392 pcireg_t io, pmem; 393 pciconf_win_t *pi, *pm; 394 395 pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP); 396 pb->cacheline_size = parent->cacheline_size; 397 pb->parent_bus = parent; 398 alloc_busno(parent, pb); 399 400 pb->mem_align = 0x100000; /* 1M alignment */ 401 pb->pmem_align = 0x100000; /* 1M alignment */ 402 pb->io_align = 0x1000; /* 4K alignment */ 403 404 set_busreg(parent->pc, pd->tag, parent->busno, pb->busno, 0xff); 405 406 pb->swiz = parent->swiz + dev; 407 408 memset(&pb->io_res, 0, sizeof(pb->io_res)); 409 memset(&pb->mem_res, 0, sizeof(pb->mem_res)); 410 memset(&pb->pmem_res, 0, sizeof(pb->pmem_res)); 411 412 pb->pc = parent->pc; 413 pb->io_total = pb->mem_total = pb->pmem_total = 0; 414 415 pb->io_32bit = 0; 416 if (parent->io_32bit) { 417 io = pci_conf_read(parent->pc, pd->tag, PCI_BRIDGE_STATIO_REG); 418 if (PCI_BRIDGE_IO_32BITS(io)) 419 pb->io_32bit = 1; 420 } 421 422 pb->pmem_64bit = 0; 423 if (parent->pmem_64bit) { 424 pmem = pci_conf_read(parent->pc, pd->tag, 425 PCI_BRIDGE_PREFETCHMEM_REG); 426 if (PCI_BRIDGE_PREFETCHMEM_64BITS(pmem)) 427 pb->pmem_64bit = 1; 428 } 429 430 /* Bridges only forward a 32-bit range of non-prefetcable memory. */ 431 pb->mem_64bit = 0; 432 433 if (probe_bus(pb)) { 434 printf("Failed to probe bus %d\n", pb->busno); 435 goto err; 436 } 437 438 /* We have found all subordinate busses now, reprogram busreg. */ 439 pb->last_busno = pb->next_busno - 1; 440 parent->next_busno = pb->next_busno; 441 set_busreg(parent->pc, pd->tag, parent->busno, pb->busno, 442 pb->last_busno); 443 if (pci_conf_debug) 444 printf("PCI bus bridge (parent %d) covers busses %d-%d\n", 445 parent->busno, pb->busno, pb->last_busno); 446 447 if (pb->io_total > 0) { 448 if (parent->niowin >= MAX_CONF_IO) { 449 printf("pciconf: too many (%d) I/O windows\n", 450 parent->niowin); 451 goto err; 452 } 453 pb->io_total |= pb->io_align - 1; /* Round up */ 454 pi = get_io_desc(parent, pb->io_total); 455 pi->dev = pd; 456 pi->reg = 0; 457 pi->size = pb->io_total; 458 pi->align = pb->io_align; /* 4K min alignment */ 459 if (parent->io_align < pb->io_align) 460 parent->io_align = pb->io_align; 461 pi->prefetch = 0; 462 parent->niowin++; 463 parent->io_total += pb->io_total; 464 } 465 466 if (pb->mem_total > 0) { 467 if (parent->nmemwin >= MAX_CONF_MEM) { 468 printf("pciconf: too many (%d) MEM windows\n", 469 parent->nmemwin); 470 goto err; 471 } 472 pb->mem_total |= pb->mem_align - 1; /* Round up */ 473 pm = get_mem_desc(parent, pb->mem_total); 474 pm->dev = pd; 475 pm->reg = 0; 476 pm->size = pb->mem_total; 477 pm->align = pb->mem_align; /* 1M min alignment */ 478 if (parent->mem_align < pb->mem_align) 479 parent->mem_align = pb->mem_align; 480 pm->prefetch = 0; 481 parent->nmemwin++; 482 parent->mem_total += pb->mem_total; 483 } 484 485 if (pb->pmem_total > 0) { 486 if (parent->nmemwin >= MAX_CONF_MEM) { 487 printf("pciconf: too many MEM windows\n"); 488 goto err; 489 } 490 pb->pmem_total |= pb->pmem_align - 1; /* Round up */ 491 pm = get_mem_desc(parent, pb->pmem_total); 492 pm->dev = pd; 493 pm->reg = 0; 494 pm->size = pb->pmem_total; 495 pm->align = pb->pmem_align; /* 1M alignment */ 496 if (parent->pmem_align < pb->pmem_align) 497 parent->pmem_align = pb->pmem_align; 498 pm->prefetch = 1; 499 parent->nmemwin++; 500 parent->pmem_total += pb->pmem_total; 501 } 502 503 return pb; 504 err: 505 kmem_free(pb, sizeof(*pb)); 506 return NULL; 507 } 508 509 static int 510 pci_do_device_query(pciconf_bus_t *pb, pcitag_t tag, int dev, int func, 511 int mode) 512 { 513 pciconf_dev_t *pd; 514 pciconf_win_t *pi, *pm; 515 pcireg_t classreg, cmd, icr, bhlc, bar, mask, bar64, mask64, 516 busreg; 517 uint64_t size; 518 int br, width, reg_start, reg_end; 519 520 pd = &pb->device[pb->ndevs]; 521 pd->pc = pb->pc; 522 pd->tag = tag; 523 pd->ppb = NULL; 524 pd->enable = mode; 525 526 classreg = pci_conf_read(pb->pc, tag, PCI_CLASS_REG); 527 528 cmd = pci_conf_read(pb->pc, tag, PCI_COMMAND_STATUS_REG); 529 bhlc = pci_conf_read(pb->pc, tag, PCI_BHLC_REG); 530 531 if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE 532 && PCI_HDRTYPE_TYPE(bhlc) != PCI_HDRTYPE_PPB) { 533 cmd &= ~(PCI_COMMAND_MASTER_ENABLE | 534 PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE); 535 pci_conf_write(pb->pc, tag, PCI_COMMAND_STATUS_REG, cmd); 536 } else if (pci_conf_debug) { 537 print_tag(pb->pc, tag); 538 printf("device is a bridge; not clearing enables\n"); 539 } 540 541 if ((cmd & PCI_STATUS_BACKTOBACK_SUPPORT) == 0) 542 pb->fast_b2b = 0; 543 544 if ((cmd & PCI_STATUS_66MHZ_SUPPORT) == 0) 545 pb->freq_66 = 0; 546 547 switch (PCI_HDRTYPE_TYPE(bhlc)) { 548 case PCI_HDRTYPE_DEVICE: 549 reg_start = PCI_MAPREG_START; 550 reg_end = PCI_MAPREG_END; 551 break; 552 case PCI_HDRTYPE_PPB: 553 pd->ppb = query_bus(pb, pd, dev); 554 if (pd->ppb == NULL) 555 return -1; 556 return 0; 557 case PCI_HDRTYPE_PCB: 558 reg_start = PCI_MAPREG_START; 559 reg_end = PCI_MAPREG_PCB_END; 560 561 busreg = pci_conf_read(pb->pc, tag, PCI_BUSNUM); 562 busreg = (busreg & 0xff000000) | 563 __SHIFTIN(pb->busno, PCI_BRIDGE_BUS_PRIMARY) | 564 __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SECONDARY) | 565 __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SUBORDINATE); 566 pci_conf_write(pb->pc, tag, PCI_BUSNUM, busreg); 567 568 pb->next_busno++; 569 break; 570 default: 571 return -1; 572 } 573 574 icr = pci_conf_read(pb->pc, tag, PCI_INTERRUPT_REG); 575 pd->ipin = PCI_INTERRUPT_PIN(icr); 576 pd->iline = PCI_INTERRUPT_LINE(icr); 577 pd->min_gnt = PCI_MIN_GNT(icr); 578 pd->max_lat = PCI_MAX_LAT(icr); 579 if (pd->iline || pd->ipin) { 580 pci_conf_interrupt(pb->pc, pb->busno, dev, pd->ipin, pb->swiz, 581 &pd->iline); 582 icr &= ~(PCI_INTERRUPT_LINE_MASK << PCI_INTERRUPT_LINE_SHIFT); 583 icr |= (pd->iline << PCI_INTERRUPT_LINE_SHIFT); 584 pci_conf_write(pb->pc, tag, PCI_INTERRUPT_REG, icr); 585 } 586 587 if (pd->min_gnt != 0 || pd->max_lat != 0) { 588 if (pd->min_gnt != 0 && pd->min_gnt > pb->max_mingnt) 589 pb->max_mingnt = pd->min_gnt; 590 591 if (pd->max_lat != 0 && pd->max_lat < pb->min_maxlat) 592 pb->min_maxlat = pd->max_lat; 593 594 pb->bandwidth_used += pd->min_gnt * 4000000 / 595 (pd->min_gnt + pd->max_lat); 596 } 597 598 width = 4; 599 for (br = reg_start; br < reg_end; br += width) { 600 #if 0 601 /* XXX Should only ignore if IDE not in legacy mode? */ 602 if (PCI_CLASS(classreg) == PCI_CLASS_MASS_STORAGE && 603 PCI_SUBCLASS(classreg) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 604 break; 605 } 606 #endif 607 bar = pci_conf_read(pb->pc, tag, br); 608 pci_conf_write(pb->pc, tag, br, 0xffffffff); 609 mask = pci_conf_read(pb->pc, tag, br); 610 pci_conf_write(pb->pc, tag, br, bar); 611 width = 4; 612 613 if ( (mode & PCI_CONF_MAP_IO) 614 && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_IO)) { 615 /* 616 * Upper 16 bits must be one. Devices may hardwire 617 * them to zero, though, per PCI 2.2, 6.2.5.1, p 203. 618 */ 619 mask |= 0xffff0000; 620 621 size = PCI_MAPREG_IO_SIZE(mask); 622 if (size == 0) { 623 if (pci_conf_debug) { 624 print_tag(pb->pc, tag); 625 printf("I/O BAR 0x%x is void\n", br); 626 } 627 continue; 628 } 629 630 if (pb->niowin >= MAX_CONF_IO) { 631 printf("pciconf: too many I/O windows\n"); 632 return -1; 633 } 634 635 pi = get_io_desc(pb, size); 636 pi->dev = pd; 637 pi->reg = br; 638 pi->size = (uint64_t)size; 639 pi->align = 4; 640 if (pb->io_align < pi->size) 641 pb->io_align = pi->size; 642 pi->prefetch = 0; 643 if (pci_conf_debug) { 644 print_tag(pb->pc, tag); 645 printf("Register 0x%x, I/O size %" PRIu64 "\n", 646 br, pi->size); 647 } 648 pb->niowin++; 649 pb->io_total += size; 650 } else if ((mode & PCI_CONF_MAP_MEM) 651 && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_MEM)) { 652 switch (PCI_MAPREG_MEM_TYPE(mask)) { 653 case PCI_MAPREG_MEM_TYPE_32BIT: 654 case PCI_MAPREG_MEM_TYPE_32BIT_1M: 655 size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask); 656 break; 657 case PCI_MAPREG_MEM_TYPE_64BIT: 658 bar64 = pci_conf_read(pb->pc, tag, br + 4); 659 pci_conf_write(pb->pc, tag, br + 4, 0xffffffff); 660 mask64 = pci_conf_read(pb->pc, tag, br + 4); 661 pci_conf_write(pb->pc, tag, br + 4, bar64); 662 size = (uint64_t)PCI_MAPREG_MEM64_SIZE( 663 (((uint64_t)mask64) << 32) | mask); 664 width = 8; 665 break; 666 default: 667 print_tag(pb->pc, tag); 668 printf("reserved mapping type 0x%x\n", 669 PCI_MAPREG_MEM_TYPE(mask)); 670 continue; 671 } 672 673 if (size == 0) { 674 if (pci_conf_debug) { 675 print_tag(pb->pc, tag); 676 printf("MEM%d BAR 0x%x is void\n", 677 PCI_MAPREG_MEM_TYPE(mask) == 678 PCI_MAPREG_MEM_TYPE_64BIT ? 679 64 : 32, br); 680 } 681 continue; 682 } else { 683 if (pci_conf_debug) { 684 print_tag(pb->pc, tag); 685 printf("MEM%d BAR 0x%x has size %#lx\n", 686 PCI_MAPREG_MEM_TYPE(mask) == 687 PCI_MAPREG_MEM_TYPE_64BIT ? 688 64 : 32, 689 br, (unsigned long)size); 690 } 691 } 692 693 if (pb->nmemwin >= MAX_CONF_MEM) { 694 printf("pciconf: too many memory windows\n"); 695 return -1; 696 } 697 698 pm = get_mem_desc(pb, size); 699 pm->dev = pd; 700 pm->reg = br; 701 pm->size = size; 702 pm->align = 4; 703 pm->prefetch = PCI_MAPREG_MEM_PREFETCHABLE(mask); 704 if (pci_conf_debug) { 705 print_tag(pb->pc, tag); 706 printf("Register 0x%x, memory size %" 707 PRIu64 "\n", br, pm->size); 708 } 709 pb->nmemwin++; 710 if (pm->prefetch) { 711 pb->pmem_total += size; 712 if (pb->pmem_align < pm->size) 713 pb->pmem_align = pm->size; 714 } else { 715 pb->mem_total += size; 716 if (pb->mem_align < pm->size) 717 pb->mem_align = pm->size; 718 } 719 } 720 } 721 722 if (mode & PCI_CONF_MAP_ROM) { 723 bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM); 724 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, 0xfffffffe); 725 mask = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM); 726 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, bar); 727 728 if (mask != 0 && mask != 0xffffffff) { 729 if (pb->nmemwin >= MAX_CONF_MEM) { 730 printf("pciconf: too many memory windows\n"); 731 return -1; 732 } 733 size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask); 734 735 pm = get_mem_desc(pb, size); 736 pm->dev = pd; 737 pm->reg = PCI_MAPREG_ROM; 738 pm->size = size; 739 pm->align = 4; 740 pm->prefetch = 0; 741 if (pci_conf_debug) { 742 print_tag(pb->pc, tag); 743 printf("Expansion ROM memory size %" 744 PRIu64 "\n", pm->size); 745 } 746 pb->nmemwin++; 747 if (pm->prefetch) { 748 pb->pmem_total += size; 749 if (pb->pmem_align < pm->size) 750 pb->pmem_align = pm->size; 751 } else { 752 pb->mem_total += size; 753 if (pb->mem_align < pm->size) 754 pb->mem_align = pm->size; 755 } 756 } 757 } else { 758 /* Don't enable ROMs if we aren't going to map them. */ 759 mode &= ~PCI_CONF_ENABLE_ROM; 760 pd->enable &= ~PCI_CONF_ENABLE_ROM; 761 } 762 763 if (!(mode & PCI_CONF_ENABLE_ROM)) { 764 /* Ensure ROM is disabled */ 765 bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM); 766 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, 767 bar & ~PCI_MAPREG_ROM_ENABLE); 768 } 769 770 return 0; 771 } 772 773 /************************************************************************/ 774 /************************************************************************/ 775 /******************** Bus configuration routines ********************/ 776 /************************************************************************/ 777 /************************************************************************/ 778 static uint64_t 779 pci_allocate_range(struct pciconf_resource * const r, const uint64_t amt, 780 const int align, const bool ok64 __used_only_lp64) 781 { 782 vmem_size_t const size = (vmem_size_t) amt; 783 vmem_addr_t result; 784 int error; 785 786 #ifdef _LP64 787 /* 788 * If a 64-bit range IS OK, then we prefer allocating above 4GB. 789 * 790 * XXX We guard this with _LP64 because vmem uses uintptr_t 791 * internally. 792 */ 793 if (!ok64) { 794 error = vmem_xalloc(r->arena, size, align, 0, 0, 795 VMEM_ADDR_MIN, 0xffffffffUL, 796 VM_BESTFIT | VM_NOSLEEP, 797 &result); 798 } else { 799 error = vmem_xalloc(r->arena, size, align, 0, 0, 800 (1UL << 32), VMEM_ADDR_MAX, 801 VM_BESTFIT | VM_NOSLEEP, 802 &result); 803 if (error) { 804 error = vmem_xalloc(r->arena, size, align, 0, 0, 805 VMEM_ADDR_MIN, VMEM_ADDR_MAX, 806 VM_BESTFIT | VM_NOSLEEP, 807 &result); 808 } 809 } 810 #else 811 error = vmem_xalloc(r->arena, size, align, 0, 0, 812 VMEM_ADDR_MIN, 0xffffffffUL, 813 VM_BESTFIT | VM_NOSLEEP, 814 &result); 815 #endif /* _L64 */ 816 817 if (error) 818 return ~0ULL; 819 820 return result; 821 } 822 823 static int 824 setup_iowins(pciconf_bus_t *pb) 825 { 826 pciconf_win_t *pi; 827 pciconf_dev_t *pd; 828 int error; 829 830 for (pi = pb->pciiowin; pi < &pb->pciiowin[pb->niowin]; pi++) { 831 if (pi->size == 0) 832 continue; 833 834 pd = pi->dev; 835 if (pb->io_res.arena == NULL) { 836 /* Bus has no IO ranges, disable IO BAR */ 837 pi->address = 0; 838 pd->enable &= ~PCI_CONF_ENABLE_IO; 839 goto write_ioaddr; 840 } 841 pi->address = pci_allocate_range(&pb->io_res, pi->size, 842 pi->align, false); 843 if (~pi->address == 0) { 844 print_tag(pd->pc, pd->tag); 845 printf("Failed to allocate PCI I/O space (%" 846 PRIu64 " req)\n", pi->size); 847 return -1; 848 } 849 if (pd->ppb && pi->reg == 0) { 850 error = init_range_resource(&pd->ppb->io_res, 851 "ppb-io", pi->address, pi->size); 852 if (error) { 853 print_tag(pd->pc, pd->tag); 854 printf("Failed to alloc I/O arena for bus %d\n", 855 pd->ppb->busno); 856 return -1; 857 } 858 continue; 859 } 860 if (!pb->io_32bit && pi->address > 0xFFFF) { 861 pi->address = 0; 862 pd->enable &= ~PCI_CONF_ENABLE_IO; 863 } else { 864 pd->enable |= PCI_CONF_ENABLE_IO; 865 } 866 write_ioaddr: 867 if (pci_conf_debug) { 868 print_tag(pd->pc, pd->tag); 869 printf("Putting %" PRIu64 " I/O bytes @ %#" PRIx64 870 " (reg %x)\n", pi->size, pi->address, pi->reg); 871 } 872 pci_conf_write(pd->pc, pd->tag, pi->reg, 873 PCI_MAPREG_IO_ADDR(pi->address) | PCI_MAPREG_TYPE_IO); 874 } 875 return 0; 876 } 877 878 static int 879 setup_memwins(pciconf_bus_t *pb) 880 { 881 pciconf_win_t *pm; 882 pciconf_dev_t *pd; 883 pcireg_t base; 884 struct pciconf_resource *r; 885 bool ok64; 886 int error; 887 888 for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) { 889 if (pm->size == 0) 890 continue; 891 892 ok64 = false; 893 pd = pm->dev; 894 if (pm->prefetch) { 895 r = &pb->pmem_res; 896 ok64 = pb->pmem_64bit; 897 } else { 898 r = &pb->mem_res; 899 ok64 = pb->mem_64bit && pd->ppb == NULL; 900 } 901 902 /* 903 * We need to figure out if the memory BAR is 64-bit 904 * capable or not. If it's not, then we need to constrain 905 * the address allocation. 906 */ 907 if (pm->reg == PCI_MAPREG_ROM) { 908 ok64 = false; 909 } else if (ok64) { 910 base = pci_conf_read(pd->pc, pd->tag, pm->reg); 911 ok64 = PCI_MAPREG_MEM_TYPE(base) == 912 PCI_MAPREG_MEM_TYPE_64BIT; 913 } 914 915 pm->address = pci_allocate_range(r, pm->size, pm->align, 916 ok64); 917 if (~pm->address == 0) { 918 print_tag(pd->pc, pd->tag); 919 printf( 920 "Failed to allocate PCI memory space (%" PRIu64 921 " req, prefetch=%d ok64=%d)\n", pm->size, 922 pm->prefetch, (int)ok64); 923 return -1; 924 } 925 if (pd->ppb && pm->reg == 0) { 926 const char *name = pm->prefetch ? "ppb-pmem" 927 : "ppb-mem"; 928 r = pm->prefetch ? &pd->ppb->pmem_res 929 : &pd->ppb->mem_res; 930 error = init_range_resource(r, name, 931 pm->address, pm->size); 932 if (error) { 933 print_tag(pd->pc, pd->tag); 934 printf("Failed to alloc MEM arena for bus %d\n", 935 pd->ppb->busno); 936 return -1; 937 } 938 continue; 939 } 940 if (!ok64 && pm->address > 0xFFFFFFFFULL) { 941 pm->address = 0; 942 pd->enable &= ~PCI_CONF_ENABLE_MEM; 943 } else 944 pd->enable |= PCI_CONF_ENABLE_MEM; 945 946 if (pm->reg != PCI_MAPREG_ROM) { 947 if (pci_conf_debug) { 948 print_tag(pd->pc, pd->tag); 949 printf( 950 "Putting %" PRIu64 " MEM bytes @ %#" 951 PRIx64 " (reg %x)\n", pm->size, 952 pm->address, pm->reg); 953 } 954 base = pci_conf_read(pd->pc, pd->tag, pm->reg); 955 base = PCI_MAPREG_MEM_ADDR(pm->address) | 956 PCI_MAPREG_MEM_TYPE(base); 957 pci_conf_write(pd->pc, pd->tag, pm->reg, base); 958 if (PCI_MAPREG_MEM_TYPE(base) == 959 PCI_MAPREG_MEM_TYPE_64BIT) { 960 base = (pcireg_t) 961 (PCI_MAPREG_MEM64_ADDR(pm->address) >> 32); 962 pci_conf_write(pd->pc, pd->tag, pm->reg + 4, 963 base); 964 } 965 } 966 } 967 for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) { 968 if (pm->reg == PCI_MAPREG_ROM && pm->address != -1) { 969 pd = pm->dev; 970 if (!(pd->enable & PCI_CONF_MAP_ROM)) 971 continue; 972 if (pci_conf_debug) { 973 print_tag(pd->pc, pd->tag); 974 printf( 975 "Putting %" PRIu64 " ROM bytes @ %#" 976 PRIx64 " (reg %x)\n", pm->size, 977 pm->address, pm->reg); 978 } 979 base = (pcireg_t) pm->address; 980 if (pd->enable & PCI_CONF_ENABLE_ROM) 981 base |= PCI_MAPREG_ROM_ENABLE; 982 983 pci_conf_write(pd->pc, pd->tag, pm->reg, base); 984 } 985 } 986 return 0; 987 } 988 989 static bool 990 constrain_bridge_mem_range(struct pciconf_resource * const r, 991 u_long * const base, 992 u_long * const limit, 993 const bool ok64 __used_only_lp64) 994 { 995 996 *base = r->min_addr; 997 *limit = r->max_addr; 998 999 #ifdef _LP64 1000 if (!ok64) { 1001 if (r->min_addr >= (1UL << 32)) { 1002 return true; 1003 } 1004 if (r->max_addr > 0xffffffffUL) { 1005 *limit = 0xffffffffUL; 1006 } 1007 } 1008 #endif /* _LP64 */ 1009 1010 return false; 1011 } 1012 1013 /* 1014 * Configure I/O, memory, and prefetcable memory spaces, then make 1015 * a call to configure_bus(). 1016 */ 1017 static int 1018 configure_bridge(pciconf_dev_t *pd) 1019 { 1020 unsigned long io_base, io_limit, mem_base, mem_limit; 1021 pciconf_bus_t *pb; 1022 pcireg_t io, iohigh, mem, cmd; 1023 int rv; 1024 bool isprefetchmem64; 1025 bool bad_range; 1026 1027 pb = pd->ppb; 1028 /* Configure I/O base & limit*/ 1029 if (pb->io_res.arena) { 1030 io_base = pb->io_res.min_addr; 1031 io_limit = pb->io_res.max_addr; 1032 } else { 1033 io_base = 0x1000; /* 4K */ 1034 io_limit = 0x0000; 1035 } 1036 if (pb->io_32bit) { 1037 iohigh = __SHIFTIN(io_base >> 16, PCI_BRIDGE_IOHIGH_BASE) | 1038 __SHIFTIN(io_limit >> 16, PCI_BRIDGE_IOHIGH_LIMIT); 1039 } else { 1040 if (io_limit > 0xFFFF) { 1041 printf("Bus %d bridge does not support 32-bit I/O. ", 1042 pb->busno); 1043 printf("Disabling I/O accesses\n"); 1044 io_base = 0x1000; /* 4K */ 1045 io_limit = 0x0000; 1046 } 1047 iohigh = 0; 1048 } 1049 io = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG) & 1050 PCI_BRIDGE_STATIO_STATUS; 1051 io |= __SHIFTIN((io_base >> 8) & PCI_BRIDGE_STATIO_IOADDR, 1052 PCI_BRIDGE_STATIO_IOBASE); 1053 io |= __SHIFTIN((io_limit >> 8) & PCI_BRIDGE_STATIO_IOADDR, 1054 PCI_BRIDGE_STATIO_IOLIMIT); 1055 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG, io); 1056 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_IOHIGH_REG, iohigh); 1057 1058 /* Configure mem base & limit */ 1059 bad_range = false; 1060 if (pb->mem_res.arena) { 1061 bad_range = constrain_bridge_mem_range(&pb->mem_res, 1062 &mem_base, 1063 &mem_limit, 1064 false); 1065 } else { 1066 mem_base = 0x100000; /* 1M */ 1067 mem_limit = 0x000000; 1068 } 1069 if (bad_range) { 1070 printf("Bus %d bridge MEM range out of range. ", pb->busno); 1071 printf("Disabling MEM accesses\n"); 1072 mem_base = 0x100000; /* 1M */ 1073 mem_limit = 0x000000; 1074 } 1075 mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_MEMORY_ADDR, 1076 PCI_BRIDGE_MEMORY_BASE); 1077 mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_MEMORY_ADDR, 1078 PCI_BRIDGE_MEMORY_LIMIT); 1079 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_MEMORY_REG, mem); 1080 1081 /* Configure prefetchable mem base & limit */ 1082 mem = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG); 1083 isprefetchmem64 = PCI_BRIDGE_PREFETCHMEM_64BITS(mem); 1084 bad_range = false; 1085 if (pb->pmem_res.arena) { 1086 bad_range = constrain_bridge_mem_range(&pb->pmem_res, 1087 &mem_base, 1088 &mem_limit, 1089 isprefetchmem64); 1090 } else { 1091 mem_base = 0x100000; /* 1M */ 1092 mem_limit = 0x000000; 1093 } 1094 if (bad_range) { 1095 printf("Bus %d bridge does not support 64-bit PMEM. ", 1096 pb->busno); 1097 printf("Disabling prefetchable-MEM accesses\n"); 1098 mem_base = 0x100000; /* 1M */ 1099 mem_limit = 0x000000; 1100 } 1101 mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR, 1102 PCI_BRIDGE_PREFETCHMEM_BASE); 1103 mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR, 1104 PCI_BRIDGE_PREFETCHMEM_LIMIT); 1105 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG, mem); 1106 /* 1107 * XXX -- 64-bit systems need a lot more than just this... 1108 */ 1109 if (isprefetchmem64) { 1110 mem_base = (uint64_t)mem_base >> 32; 1111 mem_limit = (uint64_t)mem_limit >> 32; 1112 pci_conf_write(pb->pc, pd->tag, 1113 PCI_BRIDGE_PREFETCHBASEUP32_REG, mem_base & 0xffffffff); 1114 pci_conf_write(pb->pc, pd->tag, 1115 PCI_BRIDGE_PREFETCHLIMITUP32_REG, mem_limit & 0xffffffff); 1116 } 1117 1118 rv = configure_bus(pb); 1119 1120 fini_range_resource(&pb->io_res); 1121 fini_range_resource(&pb->mem_res); 1122 fini_range_resource(&pb->pmem_res); 1123 1124 if (rv == 0) { 1125 cmd = pci_conf_read(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG); 1126 cmd &= ~PCI_BRIDGE_CONTROL; /* Clear control bit first */ 1127 cmd |= PCI_BRIDGE_CONTROL_PERE | PCI_BRIDGE_CONTROL_SERR; 1128 if (pb->fast_b2b) 1129 cmd |= PCI_BRIDGE_CONTROL_SECFASTB2B; 1130 1131 pci_conf_write(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG, cmd); 1132 cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG); 1133 cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE; 1134 pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd); 1135 } 1136 1137 return rv; 1138 } 1139 1140 /* 1141 * Calculate latency values, allocate I/O and MEM segments, then set them 1142 * up. If a PCI-PCI bridge is found, configure the bridge separately, 1143 * which will cause a recursive call back here. 1144 */ 1145 static int 1146 configure_bus(pciconf_bus_t *pb) 1147 { 1148 pciconf_dev_t *pd; 1149 int def_ltim, max_ltim, band, bus_mhz; 1150 1151 if (pb->ndevs == 0) { 1152 if (pci_conf_debug) 1153 printf("PCI bus %d - no devices\n", pb->busno); 1154 return 1; 1155 } 1156 bus_mhz = pb->freq_66 ? 66 : 33; 1157 max_ltim = pb->max_mingnt * bus_mhz / 4; /* cvt to cycle count */ 1158 band = 4000000; /* 0.25us cycles/sec */ 1159 if (band < pb->bandwidth_used) { 1160 printf("PCI bus %d: Warning: Total bandwidth exceeded!? (%d)\n", 1161 pb->busno, pb->bandwidth_used); 1162 def_ltim = -1; 1163 } else { 1164 def_ltim = (band - pb->bandwidth_used) / pb->ndevs; 1165 if (def_ltim > pb->min_maxlat) 1166 def_ltim = pb->min_maxlat; 1167 def_ltim = def_ltim * bus_mhz / 4; 1168 } 1169 def_ltim = (def_ltim + 7) & ~7; 1170 max_ltim = (max_ltim + 7) & ~7; 1171 1172 pb->def_ltim = MIN(def_ltim, 255); 1173 pb->max_ltim = MIN(MAX(max_ltim, def_ltim), 255); 1174 1175 /* 1176 * Now we have what we need to initialize the devices. 1177 * It would probably be better if we could allocate all of these 1178 * for all busses at once, but "not right now". First, get a list 1179 * of free memory ranges from the m.d. system. 1180 */ 1181 if (setup_iowins(pb) || setup_memwins(pb)) { 1182 printf("PCI bus configuration failed: " 1183 "unable to assign all I/O and memory ranges.\n"); 1184 return -1; 1185 } 1186 1187 /* 1188 * Configure the latency for the devices, and enable them. 1189 */ 1190 for (pd = pb->device; pd < &pb->device[pb->ndevs]; pd++) { 1191 pcireg_t cmd, classreg, misc; 1192 int ltim; 1193 1194 if (pci_conf_debug) { 1195 print_tag(pd->pc, pd->tag); 1196 printf("Configuring device.\n"); 1197 } 1198 classreg = pci_conf_read(pd->pc, pd->tag, PCI_CLASS_REG); 1199 misc = pci_conf_read(pd->pc, pd->tag, PCI_BHLC_REG); 1200 cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG); 1201 if (pd->enable & PCI_CONF_ENABLE_PARITY) 1202 cmd |= PCI_COMMAND_PARITY_ENABLE; 1203 if (pd->enable & PCI_CONF_ENABLE_SERR) 1204 cmd |= PCI_COMMAND_SERR_ENABLE; 1205 if (pb->fast_b2b) 1206 cmd |= PCI_COMMAND_BACKTOBACK_ENABLE; 1207 if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE || 1208 PCI_SUBCLASS(classreg) != PCI_SUBCLASS_BRIDGE_PCI) { 1209 if (pd->enable & PCI_CONF_ENABLE_IO) 1210 cmd |= PCI_COMMAND_IO_ENABLE; 1211 if (pd->enable & PCI_CONF_ENABLE_MEM) 1212 cmd |= PCI_COMMAND_MEM_ENABLE; 1213 if (pd->enable & PCI_CONF_ENABLE_BM) 1214 cmd |= PCI_COMMAND_MASTER_ENABLE; 1215 ltim = pd->min_gnt * bus_mhz / 4; 1216 ltim = MIN (MAX (pb->def_ltim, ltim), pb->max_ltim); 1217 } else { 1218 cmd |= PCI_COMMAND_MASTER_ENABLE; 1219 ltim = MIN (pb->def_ltim, pb->max_ltim); 1220 } 1221 if ((pd->enable & 1222 (PCI_CONF_ENABLE_MEM | PCI_CONF_ENABLE_IO)) == 0) { 1223 print_tag(pd->pc, pd->tag); 1224 printf("Disabled due to lack of resources.\n"); 1225 cmd &= ~(PCI_COMMAND_MASTER_ENABLE | 1226 PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE); 1227 } 1228 pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd); 1229 1230 misc &= ~((PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT) | 1231 (PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)); 1232 misc |= (ltim & PCI_LATTIMER_MASK) << PCI_LATTIMER_SHIFT; 1233 misc |= ((pb->cacheline_size >> 2) & PCI_CACHELINE_MASK) << 1234 PCI_CACHELINE_SHIFT; 1235 pci_conf_write(pd->pc, pd->tag, PCI_BHLC_REG, misc); 1236 1237 if (pd->ppb) { 1238 if (configure_bridge(pd) < 0) 1239 return -1; 1240 continue; 1241 } 1242 } 1243 1244 if (pci_conf_debug) 1245 printf("PCI bus %d configured\n", pb->busno); 1246 1247 return 0; 1248 } 1249 1250 static bool 1251 mem_region_ok64(struct pciconf_resource * const r __used_only_lp64) 1252 { 1253 bool rv = false; 1254 1255 #ifdef _LP64 1256 /* 1257 * XXX We need to guard this with _LP64 because vmem uses 1258 * uintptr_t internally. 1259 */ 1260 vmem_size_t result; 1261 if (vmem_xalloc(r->arena, 1/*size*/, 1/*align*/, 0/*phase*/, 1262 0/*nocross*/, (1UL << 32), VMEM_ADDR_MAX, 1263 VM_INSTANTFIT | VM_NOSLEEP, &result) == 0) { 1264 vmem_free(r->arena, result, 1); 1265 rv = true; 1266 } 1267 #endif /* _LP64 */ 1268 1269 return rv; 1270 } 1271 1272 /* 1273 * pciconf_resource_init: 1274 * 1275 * Allocate and initilize a pci configuration resources container. 1276 */ 1277 struct pciconf_resources * 1278 pciconf_resource_init(void) 1279 { 1280 struct pciconf_resources *rs; 1281 1282 rs = kmem_zalloc(sizeof(*rs), KM_SLEEP); 1283 1284 return (rs); 1285 } 1286 1287 /* 1288 * pciconf_resource_fini: 1289 * 1290 * Dispose of a pci configuration resources container. 1291 */ 1292 void 1293 pciconf_resource_fini(struct pciconf_resources *rs) 1294 { 1295 int i; 1296 1297 for (i = 0; i < PCICONF_RESOURCE_NTYPES; i++) { 1298 fini_range_resource(&rs->resources[i]); 1299 } 1300 1301 kmem_free(rs, sizeof(*rs)); 1302 } 1303 1304 /* 1305 * pciconf_resource_add: 1306 * 1307 * Add a pci configuration resource to a container. 1308 */ 1309 int 1310 pciconf_resource_add(struct pciconf_resources *rs, int type, 1311 bus_addr_t start, bus_size_t size) 1312 { 1313 bus_addr_t end = start + (size - 1); 1314 struct pciconf_resource *r; 1315 int error; 1316 bool first; 1317 1318 if (size == 0 || end <= start) 1319 return EINVAL; 1320 1321 if (type < 0 || type >= PCICONF_RESOURCE_NTYPES) 1322 return EINVAL; 1323 1324 r = &rs->resources[type]; 1325 1326 first = r->arena == NULL; 1327 if (first) { 1328 r->arena = create_vmem_arena(pciconf_resource_names[type], 1329 0, 0, VM_SLEEP); 1330 r->min_addr = VMEM_ADDR_MAX; 1331 r->max_addr = VMEM_ADDR_MIN; 1332 } 1333 1334 error = vmem_add(r->arena, start, size, VM_SLEEP); 1335 if (error == 0) { 1336 if (start < r->min_addr) 1337 r->min_addr = start; 1338 if (end > r->max_addr) 1339 r->max_addr = end; 1340 } 1341 1342 r->total_size += size; 1343 1344 return 0; 1345 } 1346 1347 /* 1348 * Let's configure the PCI bus. 1349 * This consists of basically scanning for all existing devices, 1350 * identifying their needs, and then making another pass over them 1351 * to set: 1352 * 1. I/O addresses 1353 * 2. Memory addresses (Prefetchable and not) 1354 * 3. PCI command register 1355 * 4. The latency part of the PCI BHLC (BIST (Built-In Self Test), 1356 * Header type, Latency timer, Cache line size) register 1357 * 1358 * The command register is set to enable fast back-to-back transactions 1359 * if the host bridge says it can handle it. We also configure 1360 * Master Enable, SERR enable, parity enable, and (if this is not a 1361 * PCI-PCI bridge) the I/O and Memory spaces. Apparently some devices 1362 * will not report some I/O space. 1363 * 1364 * The latency is computed to be a "fair share" of the bus bandwidth. 1365 * The bus bandwidth variable is initialized to the number of PCI cycles 1366 * in one second. The number of cycles taken for one transaction by each 1367 * device (MAX_LAT + MIN_GNT) is then subtracted from the bandwidth. 1368 * Care is taken to ensure that the latency timer won't be set such that 1369 * it would exceed the critical time for any device. 1370 * 1371 * This is complicated somewhat due to the presence of bridges. PCI-PCI 1372 * bridges are probed and configured recursively. 1373 */ 1374 int 1375 pci_configure_bus(pci_chipset_tag_t pc, struct pciconf_resources *rs, 1376 int firstbus, int cacheline_size) 1377 { 1378 pciconf_bus_t *pb; 1379 int rv; 1380 1381 pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP); 1382 pb->busno = firstbus; 1383 pb->next_busno = pb->busno + 1; 1384 pb->last_busno = 255; 1385 pb->cacheline_size = cacheline_size; 1386 pb->parent_bus = NULL; 1387 pb->swiz = 0; 1388 pb->io_32bit = 1; 1389 pb->io_res = rs->resources[PCICONF_RESOURCE_IO]; 1390 1391 pb->mem_res = rs->resources[PCICONF_RESOURCE_MEM]; 1392 if (pb->mem_res.arena == NULL) 1393 pb->mem_res = rs->resources[PCICONF_RESOURCE_PREFETCHABLE_MEM]; 1394 1395 pb->pmem_res = rs->resources[PCICONF_RESOURCE_PREFETCHABLE_MEM]; 1396 if (pb->pmem_res.arena == NULL) 1397 pb->pmem_res = rs->resources[PCICONF_RESOURCE_MEM]; 1398 1399 /* 1400 * Probe the memory region arenas to see if allocation of 1401 * 64-bit addresses is possible. 1402 */ 1403 pb->mem_64bit = mem_region_ok64(&pb->mem_res); 1404 pb->pmem_64bit = mem_region_ok64(&pb->pmem_res); 1405 1406 pb->pc = pc; 1407 pb->io_total = pb->mem_total = pb->pmem_total = 0; 1408 1409 rv = probe_bus(pb); 1410 pb->last_busno = pb->next_busno - 1; 1411 if (rv == 0) 1412 rv = configure_bus(pb); 1413 1414 /* 1415 * All done! 1416 */ 1417 kmem_free(pb, sizeof(*pb)); 1418 return rv; 1419 } 1420