1 /* $NetBSD: acpi_pci_machdep.c,v 1.18 2020/06/17 06:45:09 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill@invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define _INTR_PRIVATE 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: acpi_pci_machdep.c,v 1.18 2020/06/17 06:45:09 thorpej Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/device.h> 40 #include <sys/intr.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/queue.h> 44 #include <sys/mutex.h> 45 #include <sys/kmem.h> 46 #include <sys/cpu.h> 47 48 #include <arm/cpufunc.h> 49 50 #include <arm/pic/picvar.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pciconf.h> 55 56 #include <dev/acpi/acpivar.h> 57 #include <dev/acpi/acpi_mcfg.h> 58 #include <dev/acpi/acpi_pci.h> 59 60 #include <arm/acpi/acpi_iort.h> 61 #include <arm/acpi/acpi_pci_machdep.h> 62 63 #include <arm/pci/pci_msi_machdep.h> 64 65 struct acpi_pci_prt { 66 u_int prt_segment; 67 u_int prt_bus; 68 ACPI_HANDLE prt_handle; 69 TAILQ_ENTRY(acpi_pci_prt) prt_list; 70 }; 71 72 static TAILQ_HEAD(, acpi_pci_prt) acpi_pci_irq_routes = 73 TAILQ_HEAD_INITIALIZER(acpi_pci_irq_routes); 74 75 struct acpi_pci_pct { 76 struct acpi_pci_context pct_ap; 77 TAILQ_ENTRY(acpi_pci_pct) pct_list; 78 }; 79 80 static TAILQ_HEAD(, acpi_pci_pct) acpi_pci_chipset_tags = 81 TAILQ_HEAD_INITIALIZER(acpi_pci_chipset_tags); 82 83 struct acpi_pci_intr; 84 85 struct acpi_pci_intr { 86 struct pic_softc pi_pic; 87 int pi_irqbase; 88 int pi_irq; 89 uint32_t pi_unblocked; 90 void *pi_ih; 91 TAILQ_ENTRY(acpi_pci_intr) pi_list; 92 }; 93 94 static TAILQ_HEAD(, acpi_pci_intr) acpi_pci_intrs = 95 TAILQ_HEAD_INITIALIZER(acpi_pci_intrs); 96 97 static const struct acpi_pci_quirk acpi_pci_quirks[] = { 98 /* OEM ID OEM Table ID Revision Seg Func */ 99 { "AMAZON", "GRAVITON", 0, -1, acpi_pci_graviton_init }, 100 { "ARMLTD", "ARMN1SDP", 0x20181101, 0, acpi_pci_n1sdp_init }, 101 { "ARMLTD", "ARMN1SDP", 0x20181101, 1, acpi_pci_n1sdp_init }, 102 { "NXP ", "LX2160 ", 0, -1, acpi_pci_layerscape_gen4_init }, 103 }; 104 105 pci_chipset_tag_t acpi_pci_md_get_chipset_tag(struct acpi_softc *, int, int); 106 107 static void acpi_pci_md_attach_hook(device_t, device_t, 108 struct pcibus_attach_args *); 109 static int acpi_pci_md_bus_maxdevs(void *, int); 110 static pcitag_t acpi_pci_md_make_tag(void *, int, int, int); 111 static void acpi_pci_md_decompose_tag(void *, pcitag_t, int *, int *, int *); 112 static u_int acpi_pci_md_get_segment(void *); 113 static uint32_t acpi_pci_md_get_devid(void *, uint32_t); 114 static uint32_t acpi_pci_md_get_frameid(void *, uint32_t); 115 static pcireg_t acpi_pci_md_conf_read(void *, pcitag_t, int); 116 static void acpi_pci_md_conf_write(void *, pcitag_t, int, pcireg_t); 117 static int acpi_pci_md_conf_hook(void *, int, int, int, pcireg_t); 118 static void acpi_pci_md_conf_interrupt(void *, int, int, int, int, int *); 119 120 static int acpi_pci_md_intr_map(const struct pci_attach_args *, 121 pci_intr_handle_t *); 122 static const char *acpi_pci_md_intr_string(void *, pci_intr_handle_t, 123 char *, size_t); 124 static const struct evcnt *acpi_pci_md_intr_evcnt(void *, pci_intr_handle_t); 125 static int acpi_pci_md_intr_setattr(void *, pci_intr_handle_t *, int, 126 uint64_t); 127 static void * acpi_pci_md_intr_establish(void *, pci_intr_handle_t, 128 int, int (*)(void *), void *, 129 const char *); 130 static void acpi_pci_md_intr_disestablish(void *, void *); 131 132 struct arm32_pci_chipset arm_acpi_pci_chipset = { 133 .pc_attach_hook = acpi_pci_md_attach_hook, 134 .pc_bus_maxdevs = acpi_pci_md_bus_maxdevs, 135 .pc_make_tag = acpi_pci_md_make_tag, 136 .pc_decompose_tag = acpi_pci_md_decompose_tag, 137 .pc_get_segment = acpi_pci_md_get_segment, 138 .pc_get_devid = acpi_pci_md_get_devid, 139 .pc_get_frameid = acpi_pci_md_get_frameid, 140 .pc_conf_read = acpi_pci_md_conf_read, 141 .pc_conf_write = acpi_pci_md_conf_write, 142 .pc_conf_hook = acpi_pci_md_conf_hook, 143 .pc_conf_interrupt = acpi_pci_md_conf_interrupt, 144 145 .pc_intr_map = acpi_pci_md_intr_map, 146 .pc_intr_string = acpi_pci_md_intr_string, 147 .pc_intr_evcnt = acpi_pci_md_intr_evcnt, 148 .pc_intr_setattr = acpi_pci_md_intr_setattr, 149 .pc_intr_establish = acpi_pci_md_intr_establish, 150 .pc_intr_disestablish = acpi_pci_md_intr_disestablish, 151 }; 152 153 static ACPI_STATUS 154 acpi_pci_md_pci_link(ACPI_HANDLE handle, pci_chipset_tag_t pc, int bus) 155 { 156 ACPI_PCI_ROUTING_TABLE *prt; 157 ACPI_HANDLE linksrc; 158 ACPI_BUFFER buf; 159 ACPI_STATUS rv; 160 void *linkdev; 161 162 rv = acpi_get(handle, &buf, AcpiGetIrqRoutingTable); 163 if (ACPI_FAILURE(rv)) 164 return rv; 165 166 for (char *p = buf.Pointer; ; p += prt->Length) { 167 prt = (ACPI_PCI_ROUTING_TABLE *)p; 168 if (prt->Length == 0) 169 break; 170 171 const u_int dev = ACPI_HIWORD(prt->Address); 172 if (prt->Source[0] != 0) { 173 aprint_debug("ACPI: %s dev %u INT%c on lnkdev %s\n", 174 acpi_name(handle), dev, 'A' + (prt->Pin & 3), prt->Source); 175 rv = AcpiGetHandle(ACPI_ROOT_OBJECT, prt->Source, &linksrc); 176 if (ACPI_FAILURE(rv)) { 177 aprint_debug("ACPI: AcpiGetHandle failed for '%s': %s\n", 178 prt->Source, AcpiFormatException(rv)); 179 continue; 180 } 181 182 linkdev = acpi_pci_link_devbyhandle(linksrc); 183 acpi_pci_link_add_reference(linkdev, pc, 0, bus, dev, prt->Pin & 3); 184 } else { 185 aprint_debug("ACPI: %s dev %u INT%c on globint %d\n", 186 acpi_name(handle), dev, 'A' + (prt->Pin & 3), prt->SourceIndex); 187 } 188 } 189 190 return AE_OK; 191 } 192 193 static void 194 acpi_pci_md_attach_hook(device_t parent, device_t self, 195 struct pcibus_attach_args *pba) 196 { 197 struct acpi_pci_context *ap = pba->pba_pc->pc_conf_v; 198 struct acpi_pci_prt *prt, *prtp; 199 struct acpi_devnode *ad; 200 ACPI_HANDLE handle; 201 int seg, bus, dev, func; 202 203 seg = ap->ap_seg; 204 handle = NULL; 205 206 if (pba->pba_bridgetag) { 207 /* 208 * Find the PCI address of our parent bridge and look for the 209 * corresponding ACPI device node. If there is no node for this 210 * bus, use the parent bridge routing information. 211 */ 212 acpi_pci_md_decompose_tag(NULL, *pba->pba_bridgetag, &bus, &dev, &func); 213 ad = acpi_pcidev_find(seg, bus, dev, func); 214 if (ad != NULL) { 215 handle = ad->ad_handle; 216 } else { 217 /* No routes defined for this bus, copy from parent */ 218 TAILQ_FOREACH(prtp, &acpi_pci_irq_routes, prt_list) 219 if (prtp->prt_bus == bus) { 220 handle = prtp->prt_handle; 221 break; 222 } 223 } 224 } else { 225 /* 226 * Lookup the ACPI device node for the root bus. 227 */ 228 ad = acpi_pciroot_find(seg, 0); 229 if (ad != NULL) 230 handle = ad->ad_handle; 231 } 232 233 if (handle != NULL) { 234 prt = kmem_alloc(sizeof(*prt), KM_SLEEP); 235 prt->prt_bus = pba->pba_bus; 236 prt->prt_segment = ap->ap_seg; 237 prt->prt_handle = handle; 238 TAILQ_INSERT_TAIL(&acpi_pci_irq_routes, prt, prt_list); 239 } 240 241 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus); 242 243 if (ad != NULL) { 244 /* 245 * This is a new ACPI managed bus. Add PCI link references. 246 */ 247 acpi_pci_md_pci_link(ad->ad_handle, pba->pba_pc, pba->pba_bus); 248 } 249 } 250 251 static int 252 acpi_pci_md_bus_maxdevs(void *v, int busno) 253 { 254 return 32; 255 } 256 257 static pcitag_t 258 acpi_pci_md_make_tag(void *v, int b, int d, int f) 259 { 260 return (b << 16) | (d << 11) | (f << 8); 261 } 262 263 static void 264 acpi_pci_md_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp) 265 { 266 if (bp) 267 *bp = (tag >> 16) & 0xff; 268 if (dp) 269 *dp = (tag >> 11) & 0x1f; 270 if (fp) 271 *fp = (tag >> 8) & 0x7; 272 } 273 274 static u_int 275 acpi_pci_md_get_segment(void *v) 276 { 277 struct acpi_pci_context * const ap = v; 278 279 return ap->ap_seg; 280 } 281 282 static uint32_t 283 acpi_pci_md_get_devid(void *v, uint32_t devid) 284 { 285 struct acpi_pci_context * const ap = v; 286 287 return acpi_iort_pci_root_map(ap->ap_seg, devid); 288 } 289 290 static uint32_t 291 acpi_pci_md_get_frameid(void *v, uint32_t devid) 292 { 293 struct acpi_pci_context * const ap = v; 294 295 return acpi_iort_its_id_map(ap->ap_seg, devid); 296 } 297 298 static pcireg_t 299 acpi_pci_md_conf_read(void *v, pcitag_t tag, int offset) 300 { 301 struct acpi_pci_context * const ap = v; 302 pcireg_t val; 303 304 if (offset < 0 || offset >= PCI_EXTCONF_SIZE) 305 return (pcireg_t) -1; 306 307 if (ap->ap_conf_read != NULL) 308 ap->ap_conf_read(&ap->ap_pc, tag, offset, &val); 309 else 310 acpimcfg_conf_read(&ap->ap_pc, tag, offset, &val); 311 312 return val; 313 } 314 315 static void 316 acpi_pci_md_conf_write(void *v, pcitag_t tag, int offset, pcireg_t val) 317 { 318 struct acpi_pci_context * const ap = v; 319 320 if (offset < 0 || offset >= PCI_EXTCONF_SIZE) 321 return; 322 323 if (ap->ap_conf_write != NULL) 324 ap->ap_conf_write(&ap->ap_pc, tag, offset, val); 325 else 326 acpimcfg_conf_write(&ap->ap_pc, tag, offset, val); 327 } 328 329 static int 330 acpi_pci_md_conf_hook(void *v, int b, int d, int f, pcireg_t id) 331 { 332 return PCI_CONF_DEFAULT; 333 } 334 335 static void 336 acpi_pci_md_conf_interrupt(void *v, int bus, int dev, int ipin, int sqiz, int *ilinep) 337 { 338 } 339 340 static struct acpi_pci_prt * 341 acpi_pci_md_intr_find_prt(pci_chipset_tag_t pc, u_int bus) 342 { 343 struct acpi_pci_prt *prt, *prtp; 344 u_int segment; 345 346 segment = pci_get_segment(pc); 347 348 prt = NULL; 349 TAILQ_FOREACH(prtp, &acpi_pci_irq_routes, prt_list) 350 if (prtp->prt_segment == segment && prtp->prt_bus == bus) { 351 prt = prtp; 352 break; 353 } 354 355 return prt; 356 } 357 358 static int 359 acpi_pci_md_intr_map(const struct pci_attach_args *pa, pci_intr_handle_t *ih) 360 { 361 struct acpi_pci_prt *prt; 362 ACPI_PCI_ROUTING_TABLE *tab; 363 int line, pol, trig, error; 364 ACPI_HANDLE linksrc; 365 ACPI_BUFFER buf; 366 void *linkdev; 367 368 if (pa->pa_intrpin == PCI_INTERRUPT_PIN_NONE) 369 return EINVAL; 370 371 prt = acpi_pci_md_intr_find_prt(pa->pa_pc, pa->pa_bus); 372 if (prt == NULL) 373 return ENXIO; 374 375 if (ACPI_FAILURE(acpi_get(prt->prt_handle, &buf, AcpiGetIrqRoutingTable))) 376 return EIO; 377 378 error = ENOENT; 379 for (char *p = buf.Pointer; ; p += tab->Length) { 380 tab = (ACPI_PCI_ROUTING_TABLE *)p; 381 if (tab->Length == 0) 382 break; 383 384 if (pa->pa_device == ACPI_HIWORD(tab->Address) && 385 (pa->pa_intrpin - 1) == (tab->Pin & 3)) { 386 if (tab->Source[0] != 0) { 387 if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, tab->Source, &linksrc))) 388 goto done; 389 linkdev = acpi_pci_link_devbyhandle(linksrc); 390 *ih = acpi_pci_link_route_interrupt(linkdev, 391 pa->pa_pc, tab->SourceIndex, 392 &line, &pol, &trig); 393 error = 0; 394 goto done; 395 } else { 396 *ih = tab->SourceIndex; 397 error = 0; 398 goto done; 399 } 400 } 401 } 402 403 done: 404 ACPI_FREE(buf.Pointer); 405 return error; 406 } 407 408 static const char * 409 acpi_pci_md_intr_string(void *v, pci_intr_handle_t ih, char *buf, size_t len) 410 { 411 const int irq = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ); 412 const int vec = __SHIFTOUT(ih, ARM_PCI_INTR_MSI_VEC); 413 414 if (ih & ARM_PCI_INTR_MSIX) 415 snprintf(buf, len, "irq %d (MSI-X vec %d)", irq, vec); 416 else if (ih & ARM_PCI_INTR_MSI) 417 snprintf(buf, len, "irq %d (MSI vec %d)", irq, vec); 418 else 419 snprintf(buf, len, "irq %d", irq); 420 421 return buf; 422 } 423 424 static const struct evcnt * 425 acpi_pci_md_intr_evcnt(void *v, pci_intr_handle_t ih) 426 { 427 return NULL; 428 } 429 430 static int 431 acpi_pci_md_intr_setattr(void *v, pci_intr_handle_t *ih, int attr, uint64_t data) 432 { 433 switch (attr) { 434 case PCI_INTR_MPSAFE: 435 if (data) 436 *ih |= ARM_PCI_INTR_MPSAFE; 437 else 438 *ih &= ~ARM_PCI_INTR_MPSAFE; 439 return 0; 440 default: 441 return ENODEV; 442 } 443 } 444 445 static struct acpi_pci_intr * 446 acpi_pci_md_intr_lookup(int irq) 447 { 448 struct acpi_pci_intr *pi; 449 450 TAILQ_FOREACH(pi, &acpi_pci_intrs, pi_list) 451 if (pi->pi_irq == irq) 452 return pi; 453 454 return NULL; 455 } 456 457 static void 458 acpi_pci_md_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t irqmask) 459 { 460 struct acpi_pci_intr * const pi = (struct acpi_pci_intr *)pic; 461 462 pi->pi_unblocked |= irqmask; 463 } 464 465 static void 466 acpi_pci_md_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t irqmask) 467 { 468 struct acpi_pci_intr * const pi = (struct acpi_pci_intr *)pic; 469 470 pi->pi_unblocked &= ~irqmask; 471 } 472 473 static int 474 acpi_pci_md_find_pending_irqs(struct pic_softc *pic) 475 { 476 struct acpi_pci_intr * const pi = (struct acpi_pci_intr *)pic; 477 478 pic_mark_pending_sources(pic, 0, pi->pi_unblocked); 479 480 return 1; 481 } 482 483 static void 484 acpi_pci_md_establish_irq(struct pic_softc *pic, struct intrsource *is) 485 { 486 } 487 488 static void 489 acpi_pci_md_source_name(struct pic_softc *pic, int irq, char *buf, size_t len) 490 { 491 snprintf(buf, len, "slot %d", irq); 492 } 493 494 static struct pic_ops acpi_pci_pic_ops = { 495 .pic_unblock_irqs = acpi_pci_md_unblock_irqs, 496 .pic_block_irqs = acpi_pci_md_block_irqs, 497 .pic_find_pending_irqs = acpi_pci_md_find_pending_irqs, 498 .pic_establish_irq = acpi_pci_md_establish_irq, 499 .pic_source_name = acpi_pci_md_source_name, 500 }; 501 502 static void * 503 acpi_pci_md_intr_establish(void *v, pci_intr_handle_t ih, int ipl, 504 int (*callback)(void *), void *arg, const char *xname) 505 { 506 struct acpi_pci_context * const ap = v; 507 struct acpi_pci_intr *pi; 508 int slot; 509 510 if ((ih & (ARM_PCI_INTR_MSI | ARM_PCI_INTR_MSIX)) != 0) 511 return arm_pci_msi_intr_establish(&ap->ap_pc, ih, ipl, callback, arg, xname); 512 513 const int irq = (int)__SHIFTOUT(ih, ARM_PCI_INTR_IRQ); 514 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0; 515 516 pi = acpi_pci_md_intr_lookup(irq); 517 if (pi == NULL) { 518 pi = kmem_zalloc(sizeof(*pi), KM_SLEEP); 519 pi->pi_irq = irq; 520 snprintf(pi->pi_pic.pic_name, sizeof(pi->pi_pic.pic_name), 521 "PCI irq %d", irq); 522 pi->pi_pic.pic_maxsources = 32; 523 pi->pi_pic.pic_ops = &acpi_pci_pic_ops; 524 pi->pi_irqbase = pic_add(&pi->pi_pic, PIC_IRQBASE_ALLOC); 525 TAILQ_INSERT_TAIL(&acpi_pci_intrs, pi, pi_list); 526 pi->pi_ih = intr_establish_xname(irq, IPL_SCHED, IST_LEVEL | IST_MPSAFE, 527 pic_handle_intr, &pi->pi_pic, device_xname(ap->ap_dev)); 528 } 529 if (pi->pi_ih == NULL) 530 return NULL; 531 532 /* Find a free slot */ 533 for (slot = 0; slot < pi->pi_pic.pic_maxsources; slot++) 534 if (pi->pi_pic.pic_sources[slot] == NULL) 535 break; 536 if (slot == pi->pi_pic.pic_maxsources) 537 return NULL; 538 539 return intr_establish_xname(pi->pi_irqbase + slot, ipl, IST_LEVEL | mpsafe, 540 callback, arg, xname); 541 } 542 543 static void 544 acpi_pci_md_intr_disestablish(void *v, void *vih) 545 { 546 intr_disestablish(vih); 547 } 548 549 const struct acpi_pci_quirk * 550 acpi_pci_md_find_quirk(int seg) 551 { 552 ACPI_STATUS rv; 553 ACPI_TABLE_MCFG *mcfg; 554 u_int n; 555 556 rv = AcpiGetTable(ACPI_SIG_MCFG, 0, (ACPI_TABLE_HEADER **)&mcfg); 557 if (ACPI_FAILURE(rv)) 558 return NULL; 559 560 for (n = 0; n < __arraycount(acpi_pci_quirks); n++) { 561 const struct acpi_pci_quirk *q = &acpi_pci_quirks[n]; 562 if (memcmp(q->q_oemid, mcfg->Header.OemId, ACPI_OEM_ID_SIZE) == 0 && 563 memcmp(q->q_oemtableid, mcfg->Header.OemTableId, ACPI_OEM_TABLE_ID_SIZE) == 0 && 564 q->q_oemrevision == mcfg->Header.OemRevision && 565 (q->q_segment == -1 || q->q_segment == seg)) 566 return q; 567 } 568 569 return NULL; 570 } 571 572 pci_chipset_tag_t 573 acpi_pci_md_get_chipset_tag(struct acpi_softc *sc, int seg, int bbn) 574 { 575 struct acpi_pci_pct *pct = NULL, *pctp; 576 const struct acpi_pci_quirk *q; 577 578 TAILQ_FOREACH(pctp, &acpi_pci_chipset_tags, pct_list) 579 if (pctp->pct_ap.ap_seg == seg) { 580 pct = pctp; 581 break; 582 } 583 584 if (pct == NULL) { 585 pct = kmem_zalloc(sizeof(*pct), KM_SLEEP); 586 pct->pct_ap.ap_dev = sc->sc_dev; 587 pct->pct_ap.ap_pc = arm_acpi_pci_chipset; 588 pct->pct_ap.ap_pc.pc_conf_v = &pct->pct_ap; 589 pct->pct_ap.ap_pc.pc_intr_v = &pct->pct_ap; 590 pct->pct_ap.ap_seg = seg; 591 pct->pct_ap.ap_bus = bbn; 592 pct->pct_ap.ap_bst = acpi_softc->sc_memt; 593 594 q = acpi_pci_md_find_quirk(seg); 595 if (q != NULL) 596 q->q_init(&pct->pct_ap); 597 598 TAILQ_INSERT_TAIL(&acpi_pci_chipset_tags, pct, pct_list); 599 } 600 601 return &pct->pct_ap.ap_pc; 602 } 603 __strong_alias(acpi_get_pci_chipset_tag,acpi_pci_md_get_chipset_tag); 604