1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 30 /* 31 * File that has code which is common between pci(7d) and npe(7d) 32 * It shares the following: 33 * - interrupt code 34 * - pci_tools ioctl code 35 * - name_child code 36 * - set_parent_private_data code 37 */ 38 39 #include <sys/conf.h> 40 #include <sys/pci.h> 41 #include <sys/sunndi.h> 42 #include <sys/mach_intr.h> 43 #include <sys/hotplug/pci/pcihp.h> 44 #include <sys/pci_intr_lib.h> 45 #include <sys/psm.h> 46 #include <sys/policy.h> 47 #include <sys/sysmacros.h> 48 #include <sys/clock.h> 49 #include <io/pcplusmp/apic.h> 50 #include <sys/pci_tools.h> 51 #include <io/pci/pci_var.h> 52 #include <io/pci/pci_tools_ext.h> 53 #include <io/pci/pci_common.h> 54 #include <sys/pci_cfgspace.h> 55 #include <sys/pci_impl.h> 56 57 /* 58 * Function prototypes 59 */ 60 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 61 static int pci_get_nintrs(dev_info_t *, int, int *); 62 static int pci_enable_intr(dev_info_t *, dev_info_t *, 63 ddi_intr_handle_impl_t *, uint32_t); 64 static void pci_disable_intr(dev_info_t *, dev_info_t *, 65 ddi_intr_handle_impl_t *, uint32_t); 66 67 /* Extern decalration for pcplusmp module */ 68 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 69 psm_intr_op_t, int *); 70 71 72 /* 73 * pci_name_child: 74 * 75 * Assign the address portion of the node name 76 */ 77 int 78 pci_common_name_child(dev_info_t *child, char *name, int namelen) 79 { 80 int dev, func, length; 81 char **unit_addr; 82 uint_t n; 83 pci_regspec_t *pci_rp; 84 85 if (ndi_dev_is_persistent_node(child) == 0) { 86 /* 87 * For .conf node, use "unit-address" property 88 */ 89 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 90 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 91 DDI_PROP_SUCCESS) { 92 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 93 ddi_get_name(child)); 94 return (DDI_FAILURE); 95 } 96 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 97 cmn_err(CE_WARN, "unit-address property in %s.conf" 98 " not well-formed", ddi_get_name(child)); 99 ddi_prop_free(unit_addr); 100 return (DDI_FAILURE); 101 } 102 (void) snprintf(name, namelen, "%s", *unit_addr); 103 ddi_prop_free(unit_addr); 104 return (DDI_SUCCESS); 105 } 106 107 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 108 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 109 cmn_err(CE_WARN, "cannot find reg property in %s", 110 ddi_get_name(child)); 111 return (DDI_FAILURE); 112 } 113 114 /* copy the device identifications */ 115 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 116 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 117 118 /* 119 * free the memory allocated by ddi_prop_lookup_int_array 120 */ 121 ddi_prop_free(pci_rp); 122 123 if (func != 0) { 124 (void) snprintf(name, namelen, "%x,%x", dev, func); 125 } else { 126 (void) snprintf(name, namelen, "%x", dev); 127 } 128 129 return (DDI_SUCCESS); 130 } 131 132 /* 133 * Interrupt related code: 134 * 135 * The following busop is common to npe and pci drivers 136 * bus_introp 137 */ 138 139 /* 140 * Create the ddi_parent_private_data for a pseudo child. 141 */ 142 void 143 pci_common_set_parent_private_data(dev_info_t *dip) 144 { 145 struct ddi_parent_private_data *pdptr; 146 147 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 148 (sizeof (struct ddi_parent_private_data) + 149 sizeof (struct intrspec)), KM_SLEEP); 150 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 151 pdptr->par_nintr = 1; 152 ddi_set_parent_data(dip, pdptr); 153 } 154 155 /* 156 * pci_get_priority: 157 * Figure out the priority of the device 158 */ 159 static int 160 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 161 { 162 struct intrspec *ispec; 163 164 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 165 (void *)dip, (void *)hdlp)); 166 167 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 168 hdlp->ih_inum)) == NULL) { 169 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 170 int class = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 171 DDI_PROP_DONTPASS, "class-code", -1); 172 173 *pri = (class == -1) ? 1 : pci_devclass_to_ipl(class); 174 pci_common_set_parent_private_data(hdlp->ih_dip); 175 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 176 hdlp->ih_inum); 177 return (DDI_SUCCESS); 178 } 179 return (DDI_FAILURE); 180 } 181 182 *pri = ispec->intrspec_pri; 183 return (DDI_SUCCESS); 184 } 185 186 187 /* 188 * pci_get_nintrs: 189 * Figure out how many interrupts the device supports 190 */ 191 static int 192 pci_get_nintrs(dev_info_t *dip, int type, int *nintrs) 193 { 194 int ret; 195 196 *nintrs = 0; 197 198 if (DDI_INTR_IS_MSI_OR_MSIX(type)) 199 ret = pci_msi_get_nintrs(dip, type, nintrs); 200 else { 201 ret = DDI_FAILURE; 202 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 203 "interrupts", -1) != -1) { 204 *nintrs = 1; 205 ret = DDI_SUCCESS; 206 } 207 } 208 209 return (ret); 210 } 211 212 static int pcie_pci_intr_pri_counter = 0; 213 214 /* 215 * pci_common_intr_ops: bus_intr_op() function for interrupt support 216 */ 217 int 218 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 219 ddi_intr_handle_impl_t *hdlp, void *result) 220 { 221 int priority = 0; 222 int psm_status = 0; 223 int pci_status = 0; 224 int pci_rval, psm_rval = PSM_FAILURE; 225 int types = 0; 226 int pciepci = 0; 227 int i, j; 228 int behavior; 229 ddi_intrspec_t isp; 230 struct intrspec *ispec; 231 ddi_intr_handle_impl_t tmp_hdl; 232 ddi_intr_msix_t *msix_p; 233 234 DDI_INTR_NEXDBG((CE_CONT, 235 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 236 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 237 238 /* Process the request */ 239 switch (intr_op) { 240 case DDI_INTROP_SUPPORTED_TYPES: 241 /* Fixed supported by default */ 242 *(int *)result = DDI_INTR_TYPE_FIXED; 243 244 /* Figure out if MSI or MSI-X is supported? */ 245 if (pci_msi_get_supported_type(rdip, &types) != DDI_SUCCESS) 246 return (DDI_SUCCESS); 247 248 if (psm_intr_ops != NULL) { 249 /* MSI or MSI-X is supported, OR it in */ 250 *(int *)result |= types; 251 252 tmp_hdl.ih_type = *(int *)result; 253 (void) (*psm_intr_ops)(rdip, &tmp_hdl, 254 PSM_INTR_OP_CHECK_MSI, result); 255 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 256 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 257 *(int *)result)); 258 } 259 break; 260 case DDI_INTROP_NINTRS: 261 if (pci_get_nintrs(rdip, hdlp->ih_type, result) != DDI_SUCCESS) 262 return (DDI_FAILURE); 263 break; 264 case DDI_INTROP_ALLOC: 265 /* 266 * MSI or MSIX (figure out number of vectors available) 267 * FIXED interrupts: just return available interrupts 268 */ 269 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 270 (psm_intr_ops != NULL) && 271 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 272 /* 273 * Following check is a special case for 'pcie_pci'. 274 * This makes sure vectors with the right priority 275 * are allocated for pcie_pci during ALLOC time. 276 */ 277 if (strcmp(ddi_driver_name(rdip), "pcie_pci") == 0) { 278 hdlp->ih_pri = 279 (pcie_pci_intr_pri_counter % 2) ? 4 : 7; 280 pciepci = 1; 281 } else 282 hdlp->ih_pri = priority; 283 behavior = hdlp->ih_scratch2; 284 (void) (*psm_intr_ops)(rdip, hdlp, 285 PSM_INTR_OP_ALLOC_VECTORS, result); 286 287 /* verify behavior flag and take appropriate action */ 288 if ((behavior == DDI_INTR_ALLOC_STRICT) && 289 (*(int *)result < hdlp->ih_scratch1)) { 290 DDI_INTR_NEXDBG((CE_CONT, 291 "pci_common_intr_ops: behavior %x, " 292 "couldn't get enough intrs\n", behavior)); 293 hdlp->ih_scratch1 = *(int *)result; 294 (void) (*psm_intr_ops)(rdip, hdlp, 295 PSM_INTR_OP_FREE_VECTORS, NULL); 296 return (DDI_EAGAIN); 297 } 298 299 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 300 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 301 msix_p = pci_msix_init(hdlp->ih_dip); 302 if (msix_p) 303 i_ddi_set_msix(hdlp->ih_dip, 304 msix_p); 305 } 306 msix_p->msix_intrs_in_use += *(int *)result; 307 } 308 309 if (pciepci) { 310 /* update priority in ispec */ 311 isp = pci_intx_get_ispec(pdip, rdip, 312 (int)hdlp->ih_inum); 313 ispec = (struct intrspec *)isp; 314 if (ispec) 315 ispec->intrspec_pri = hdlp->ih_pri; 316 ++pcie_pci_intr_pri_counter; 317 } 318 319 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 320 /* Figure out if this device supports MASKING */ 321 pci_rval = pci_intx_get_cap(rdip, &pci_status); 322 if (pci_rval == DDI_SUCCESS && pci_status) 323 hdlp->ih_cap |= pci_status; 324 *(int *)result = 1; /* DDI_INTR_TYPE_FIXED */ 325 } else 326 return (DDI_FAILURE); 327 break; 328 case DDI_INTROP_FREE: 329 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 330 (psm_intr_ops != NULL)) { 331 (void) (*psm_intr_ops)(rdip, hdlp, 332 PSM_INTR_OP_FREE_VECTORS, NULL); 333 334 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 335 msix_p = i_ddi_get_msix(hdlp->ih_dip); 336 if (msix_p && 337 --msix_p->msix_intrs_in_use == 0) { 338 pci_msix_fini(msix_p); 339 i_ddi_set_msix(hdlp->ih_dip, NULL); 340 } 341 } 342 } 343 break; 344 case DDI_INTROP_GETPRI: 345 /* Get the priority */ 346 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 347 return (DDI_FAILURE); 348 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 349 "priority = 0x%x\n", priority)); 350 *(int *)result = priority; 351 break; 352 case DDI_INTROP_SETPRI: 353 /* Validate the interrupt priority passed */ 354 if (*(int *)result > LOCK_LEVEL) 355 return (DDI_FAILURE); 356 357 /* Ensure that PSM is all initialized */ 358 if (psm_intr_ops == NULL) 359 return (DDI_FAILURE); 360 361 /* Change the priority */ 362 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 363 PSM_FAILURE) 364 return (DDI_FAILURE); 365 366 /* update ispec */ 367 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 368 ispec = (struct intrspec *)isp; 369 if (ispec) 370 ispec->intrspec_pri = *(int *)result; 371 break; 372 case DDI_INTROP_ADDISR: 373 /* update ispec */ 374 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 375 ispec = (struct intrspec *)isp; 376 if (ispec) 377 ispec->intrspec_func = hdlp->ih_cb_func; 378 break; 379 case DDI_INTROP_REMISR: 380 /* Get the interrupt structure pointer */ 381 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 382 ispec = (struct intrspec *)isp; 383 if (ispec) 384 ispec->intrspec_func = (uint_t (*)()) 0; 385 break; 386 case DDI_INTROP_GETCAP: 387 /* 388 * First check the config space and/or 389 * MSI capability register(s) 390 */ 391 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 392 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 393 &pci_status); 394 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 395 pci_rval = pci_intx_get_cap(rdip, &pci_status); 396 397 /* next check with pcplusmp */ 398 if (psm_intr_ops != NULL) 399 psm_rval = (*psm_intr_ops)(rdip, hdlp, 400 PSM_INTR_OP_GET_CAP, &psm_status); 401 402 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 403 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 404 psm_rval, psm_status, pci_rval, pci_status)); 405 406 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 407 *(int *)result = 0; 408 return (DDI_FAILURE); 409 } 410 411 if (psm_rval == PSM_SUCCESS) 412 *(int *)result = psm_status; 413 414 if (pci_rval == DDI_SUCCESS) 415 *(int *)result |= pci_status; 416 417 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 418 *(int *)result)); 419 break; 420 case DDI_INTROP_SETCAP: 421 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 422 "SETCAP cap=0x%x\n", *(int *)result)); 423 if (psm_intr_ops == NULL) 424 return (DDI_FAILURE); 425 426 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 427 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 428 " returned failure\n")); 429 return (DDI_FAILURE); 430 } 431 break; 432 case DDI_INTROP_ENABLE: 433 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 434 if (psm_intr_ops == NULL) 435 return (DDI_FAILURE); 436 437 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 438 DDI_SUCCESS) 439 return (DDI_FAILURE); 440 441 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 442 "vector=0x%x\n", hdlp->ih_vector)); 443 break; 444 case DDI_INTROP_DISABLE: 445 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 446 if (psm_intr_ops == NULL) 447 return (DDI_FAILURE); 448 449 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 450 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 451 "vector = %x\n", hdlp->ih_vector)); 452 break; 453 case DDI_INTROP_BLOCKENABLE: 454 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 455 "BLOCKENABLE\n")); 456 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 457 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 458 return (DDI_FAILURE); 459 } 460 461 /* Check if psm_intr_ops is NULL? */ 462 if (psm_intr_ops == NULL) 463 return (DDI_FAILURE); 464 465 for (i = 0; i < hdlp->ih_scratch1; i++) { 466 if (pci_enable_intr(pdip, rdip, hdlp, 467 hdlp->ih_inum + i) != DDI_SUCCESS) { 468 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 469 "pci_enable_intr failed for %d\n", i)); 470 for (j = 0; j < i; j++) 471 pci_disable_intr(pdip, rdip, hdlp, 472 hdlp->ih_inum + j); 473 return (DDI_FAILURE); 474 } 475 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 476 "BLOCKENABLE inum %x done\n", hdlp->ih_inum + i)); 477 } 478 break; 479 case DDI_INTROP_BLOCKDISABLE: 480 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 481 "BLOCKDISABLE\n")); 482 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 483 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 484 return (DDI_FAILURE); 485 } 486 487 /* Check if psm_intr_ops is present */ 488 if (psm_intr_ops == NULL) 489 return (DDI_FAILURE); 490 491 for (i = 0; i < hdlp->ih_scratch1; i++) { 492 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum + i); 493 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 494 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum + i)); 495 } 496 break; 497 case DDI_INTROP_SETMASK: 498 case DDI_INTROP_CLRMASK: 499 /* 500 * First handle in the config space 501 */ 502 if (intr_op == DDI_INTROP_SETMASK) { 503 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 504 pci_status = pci_msi_set_mask(rdip, 505 hdlp->ih_type, hdlp->ih_inum); 506 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 507 pci_status = pci_intx_set_mask(rdip); 508 } else { 509 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 510 pci_status = pci_msi_clr_mask(rdip, 511 hdlp->ih_type, hdlp->ih_inum); 512 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 513 pci_status = pci_intx_clr_mask(rdip); 514 } 515 516 /* For MSI/X; no need to check with pcplusmp */ 517 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 518 return (pci_status); 519 520 /* For fixed interrupts only: handle config space first */ 521 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 522 pci_status == DDI_SUCCESS) 523 break; 524 525 /* For fixed interrupts only: confer with pcplusmp next */ 526 if (psm_intr_ops != NULL) { 527 /* If interrupt is shared; do nothing */ 528 psm_rval = (*psm_intr_ops)(rdip, hdlp, 529 PSM_INTR_OP_GET_SHARED, &psm_status); 530 531 if (psm_rval == PSM_FAILURE || psm_status == 1) 532 return (pci_status); 533 534 /* Now, pcplusmp should try to set/clear the mask */ 535 if (intr_op == DDI_INTROP_SETMASK) 536 psm_rval = (*psm_intr_ops)(rdip, hdlp, 537 PSM_INTR_OP_SET_MASK, NULL); 538 else 539 psm_rval = (*psm_intr_ops)(rdip, hdlp, 540 PSM_INTR_OP_CLEAR_MASK, NULL); 541 } 542 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 543 case DDI_INTROP_GETPENDING: 544 /* 545 * First check the config space and/or 546 * MSI capability register(s) 547 */ 548 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 549 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 550 hdlp->ih_inum, &pci_status); 551 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 552 pci_rval = pci_intx_get_pending(rdip, &pci_status); 553 554 /* On failure; next try with pcplusmp */ 555 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 556 psm_rval = (*psm_intr_ops)(rdip, hdlp, 557 PSM_INTR_OP_GET_PENDING, &psm_status); 558 559 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 560 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 561 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 562 pci_status)); 563 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 564 *(int *)result = 0; 565 return (DDI_FAILURE); 566 } 567 568 if (psm_rval != PSM_FAILURE) 569 *(int *)result = psm_status; 570 else if (pci_rval != DDI_FAILURE) 571 *(int *)result = pci_status; 572 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 573 *(int *)result)); 574 break; 575 case DDI_INTROP_NAVAIL: 576 if ((psm_intr_ops != NULL) && (pci_get_priority(rdip, 577 hdlp, &priority) == DDI_SUCCESS)) { 578 /* Priority in the handle not initialized yet */ 579 hdlp->ih_pri = priority; 580 (void) (*psm_intr_ops)(rdip, hdlp, 581 PSM_INTR_OP_NAVAIL_VECTORS, result); 582 } else { 583 *(int *)result = 1; 584 } 585 DDI_INTR_NEXDBG((CE_CONT, "pci: NAVAIL returned = %x\n", 586 *(int *)result)); 587 break; 588 default: 589 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 590 } 591 592 return (DDI_SUCCESS); 593 } 594 595 int 596 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 597 int vecirq, boolean_t is_irq) 598 { 599 ddi_intr_handle_impl_t get_info_ii_hdl; 600 601 if (is_irq) 602 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 603 604 /* 605 * For this locally-declared and used handle, ih_private will contain a 606 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 607 * global interrupt handling. 608 */ 609 get_info_ii_hdl.ih_private = intrinfo_p; 610 get_info_ii_hdl.ih_vector = (ushort_t)vecirq; 611 612 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 613 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 614 return (DDI_FAILURE); 615 616 return (DDI_SUCCESS); 617 } 618 619 620 int 621 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 622 { 623 int rval; 624 625 apic_get_intr_t intrinfo; 626 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 627 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 628 629 if (rval == DDI_SUCCESS) 630 return (intrinfo.avgi_cpu_id); 631 else 632 return (-1); 633 } 634 635 636 static int 637 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 638 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 639 { 640 struct intrspec *ispec; 641 int irq; 642 int cpu_id; 643 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 644 645 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 646 (void *)hdlp, inum)); 647 648 /* Translate the interrupt if needed */ 649 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 650 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 651 ispec->intrspec_vec = inum; 652 ihdl_plat_datap->ip_ispecp = ispec; 653 654 /* translate the interrupt if needed */ 655 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 656 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 657 hdlp->ih_pri, irq)); 658 659 /* Add the interrupt handler */ 660 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 661 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 662 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 663 return (DDI_FAILURE); 664 665 /* Note this really is an irq. */ 666 hdlp->ih_vector = (ushort_t)irq; 667 668 /* Don't create kstats for unmoveable interrupts */ 669 if (((cpu_id = pci_get_cpu_from_vecirq(irq, IS_IRQ)) != -1) && 670 (!(cpu_id & PSMGI_CPU_USER_BOUND))) 671 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 672 673 return (DDI_SUCCESS); 674 } 675 676 677 static void 678 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 679 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 680 { 681 int irq; 682 struct intrspec *ispec; 683 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 684 685 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 686 if (ihdl_plat_datap->ip_ksp != NULL) { 687 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 688 ihdl_plat_datap->ip_ksp = NULL; 689 } 690 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 691 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 692 ispec->intrspec_vec = inum; 693 ihdl_plat_datap->ip_ispecp = ispec; 694 695 /* translate the interrupt if needed */ 696 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 697 698 /* Disable the interrupt handler */ 699 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 700 ihdl_plat_datap->ip_ispecp = NULL; 701 } 702 703 /* 704 * Miscellaneous library function 705 */ 706 int 707 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 708 { 709 int i; 710 int number; 711 int assigned_addr_len; 712 uint_t phys_hi = pci_rp->pci_phys_hi; 713 pci_regspec_t *assigned_addr; 714 715 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 716 (phys_hi & PCI_RELOCAT_B)) 717 return (DDI_SUCCESS); 718 719 /* 720 * the "reg" property specifies relocatable, get and interpret the 721 * "assigned-addresses" property. 722 */ 723 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 724 "assigned-addresses", (int **)&assigned_addr, 725 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 726 return (DDI_FAILURE); 727 728 /* 729 * Scan the "assigned-addresses" for one that matches the specified 730 * "reg" property entry. 731 */ 732 phys_hi &= PCI_CONF_ADDR_MASK; 733 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 734 for (i = 0; i < number; i++) { 735 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 736 phys_hi) { 737 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 738 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 739 ddi_prop_free(assigned_addr); 740 return (DDI_SUCCESS); 741 } 742 } 743 744 ddi_prop_free(assigned_addr); 745 return (DDI_FAILURE); 746 } 747 748 749 /* 750 * For pci_tools 751 */ 752 753 int 754 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 755 int mode, cred_t *credp, int *rvalp) 756 { 757 int rv = ENOTTY; 758 759 minor_t minor = getminor(dev); 760 761 switch (PCIHP_AP_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 762 case PCI_TOOL_REG_MINOR_NUM: 763 764 switch (cmd) { 765 case PCITOOL_DEVICE_SET_REG: 766 case PCITOOL_DEVICE_GET_REG: 767 768 /* Require full privileges. */ 769 if (secpolicy_kmdb(credp)) 770 rv = EPERM; 771 else 772 rv = pcitool_dev_reg_ops(dip, (void *)arg, 773 cmd, mode); 774 break; 775 776 case PCITOOL_NEXUS_SET_REG: 777 case PCITOOL_NEXUS_GET_REG: 778 779 /* Require full privileges. */ 780 if (secpolicy_kmdb(credp)) 781 rv = EPERM; 782 else 783 rv = pcitool_bus_reg_ops(dip, (void *)arg, 784 cmd, mode); 785 break; 786 } 787 break; 788 789 case PCI_TOOL_INTR_MINOR_NUM: 790 791 switch (cmd) { 792 case PCITOOL_DEVICE_SET_INTR: 793 794 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 795 if (secpolicy_ponline(credp)) { 796 rv = EPERM; 797 break; 798 } 799 800 /*FALLTHRU*/ 801 /* These require no special privileges. */ 802 case PCITOOL_DEVICE_GET_INTR: 803 case PCITOOL_DEVICE_NUM_INTR: 804 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 805 break; 806 } 807 break; 808 809 /* 810 * All non-PCItool ioctls go through here, including: 811 * devctl ioctls with minor number PCIHP_DEVCTL_MINOR and 812 * those for attachment points with where minor number is the 813 * device number. 814 */ 815 default: 816 rv = (pcihp_get_cb_ops())->cb_ioctl(dev, cmd, arg, mode, 817 credp, rvalp); 818 break; 819 } 820 821 return (rv); 822 } 823 824 825 /* 826 * These are the get and put functions to be shared with drivers. The 827 * mutex locking is done inside the functions referenced, rather than 828 * here, and is thus shared across PCI child drivers and any other 829 * consumers of PCI config space (such as the ACPI subsystem). 830 * 831 * The configuration space addresses come in as pointers. This is fine on 832 * a 32-bit system, where the VM space and configuration space are the same 833 * size. It's not such a good idea on a 64-bit system, where memory 834 * addresses are twice as large as configuration space addresses. At some 835 * point in the call tree we need to take a stand and say "you are 32-bit 836 * from this time forth", and this seems like a nice self-contained place. 837 */ 838 839 uint8_t 840 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 841 { 842 pci_acc_cfblk_t *cfp; 843 uint8_t rval; 844 int reg; 845 846 ASSERT64(((uintptr_t)addr >> 32) == 0); 847 848 reg = (int)(uintptr_t)addr; 849 850 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 851 852 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 853 reg); 854 855 return (rval); 856 } 857 858 void 859 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 860 uint8_t *dev_addr, size_t repcount, uint_t flags) 861 { 862 uint8_t *h, *d; 863 864 h = host_addr; 865 d = dev_addr; 866 867 if (flags == DDI_DEV_AUTOINCR) 868 for (; repcount; repcount--) 869 *h++ = pci_config_rd8(hdlp, d++); 870 else 871 for (; repcount; repcount--) 872 *h++ = pci_config_rd8(hdlp, d); 873 } 874 875 uint16_t 876 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 877 { 878 pci_acc_cfblk_t *cfp; 879 uint16_t rval; 880 int reg; 881 882 ASSERT64(((uintptr_t)addr >> 32) == 0); 883 884 reg = (int)(uintptr_t)addr; 885 886 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 887 888 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 889 reg); 890 891 return (rval); 892 } 893 894 void 895 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 896 uint16_t *dev_addr, size_t repcount, uint_t flags) 897 { 898 uint16_t *h, *d; 899 900 h = host_addr; 901 d = dev_addr; 902 903 if (flags == DDI_DEV_AUTOINCR) 904 for (; repcount; repcount--) 905 *h++ = pci_config_rd16(hdlp, d++); 906 else 907 for (; repcount; repcount--) 908 *h++ = pci_config_rd16(hdlp, d); 909 } 910 911 uint32_t 912 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 913 { 914 pci_acc_cfblk_t *cfp; 915 uint32_t rval; 916 int reg; 917 918 ASSERT64(((uintptr_t)addr >> 32) == 0); 919 920 reg = (int)(uintptr_t)addr; 921 922 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 923 924 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 925 cfp->c_funcnum, reg); 926 927 return (rval); 928 } 929 930 void 931 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 932 uint32_t *dev_addr, size_t repcount, uint_t flags) 933 { 934 uint32_t *h, *d; 935 936 h = host_addr; 937 d = dev_addr; 938 939 if (flags == DDI_DEV_AUTOINCR) 940 for (; repcount; repcount--) 941 *h++ = pci_config_rd32(hdlp, d++); 942 else 943 for (; repcount; repcount--) 944 *h++ = pci_config_rd32(hdlp, d); 945 } 946 947 948 void 949 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 950 { 951 pci_acc_cfblk_t *cfp; 952 int reg; 953 954 ASSERT64(((uintptr_t)addr >> 32) == 0); 955 956 reg = (int)(uintptr_t)addr; 957 958 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 959 960 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 961 cfp->c_funcnum, reg, value); 962 } 963 964 void 965 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 966 uint8_t *dev_addr, size_t repcount, uint_t flags) 967 { 968 uint8_t *h, *d; 969 970 h = host_addr; 971 d = dev_addr; 972 973 if (flags == DDI_DEV_AUTOINCR) 974 for (; repcount; repcount--) 975 pci_config_wr8(hdlp, d++, *h++); 976 else 977 for (; repcount; repcount--) 978 pci_config_wr8(hdlp, d, *h++); 979 } 980 981 void 982 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 983 { 984 pci_acc_cfblk_t *cfp; 985 int reg; 986 987 ASSERT64(((uintptr_t)addr >> 32) == 0); 988 989 reg = (int)(uintptr_t)addr; 990 991 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 992 993 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 994 cfp->c_funcnum, reg, value); 995 } 996 997 void 998 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 999 uint16_t *dev_addr, size_t repcount, uint_t flags) 1000 { 1001 uint16_t *h, *d; 1002 1003 h = host_addr; 1004 d = dev_addr; 1005 1006 if (flags == DDI_DEV_AUTOINCR) 1007 for (; repcount; repcount--) 1008 pci_config_wr16(hdlp, d++, *h++); 1009 else 1010 for (; repcount; repcount--) 1011 pci_config_wr16(hdlp, d, *h++); 1012 } 1013 1014 void 1015 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1016 { 1017 pci_acc_cfblk_t *cfp; 1018 int reg; 1019 1020 ASSERT64(((uintptr_t)addr >> 32) == 0); 1021 1022 reg = (int)(uintptr_t)addr; 1023 1024 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1025 1026 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1027 cfp->c_funcnum, reg, value); 1028 } 1029 1030 void 1031 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1032 uint32_t *dev_addr, size_t repcount, uint_t flags) 1033 { 1034 uint32_t *h, *d; 1035 1036 h = host_addr; 1037 d = dev_addr; 1038 1039 if (flags == DDI_DEV_AUTOINCR) 1040 for (; repcount; repcount--) 1041 pci_config_wr32(hdlp, d++, *h++); 1042 else 1043 for (; repcount; repcount--) 1044 pci_config_wr32(hdlp, d, *h++); 1045 } 1046 1047 uint64_t 1048 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1049 { 1050 uint32_t lw_val; 1051 uint32_t hi_val; 1052 uint32_t *dp; 1053 uint64_t val; 1054 1055 dp = (uint32_t *)addr; 1056 lw_val = pci_config_rd32(hdlp, dp); 1057 dp++; 1058 hi_val = pci_config_rd32(hdlp, dp); 1059 val = ((uint64_t)hi_val << 32) | lw_val; 1060 return (val); 1061 } 1062 1063 void 1064 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1065 { 1066 uint32_t lw_val; 1067 uint32_t hi_val; 1068 uint32_t *dp; 1069 1070 dp = (uint32_t *)addr; 1071 lw_val = (uint32_t)(value & 0xffffffff); 1072 hi_val = (uint32_t)(value >> 32); 1073 pci_config_wr32(hdlp, dp, lw_val); 1074 dp++; 1075 pci_config_wr32(hdlp, dp, hi_val); 1076 } 1077 1078 void 1079 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1080 uint64_t *dev_addr, size_t repcount, uint_t flags) 1081 { 1082 if (flags == DDI_DEV_AUTOINCR) { 1083 for (; repcount; repcount--) 1084 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1085 } else { 1086 for (; repcount; repcount--) 1087 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1088 } 1089 } 1090 1091 void 1092 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1093 uint64_t *dev_addr, size_t repcount, uint_t flags) 1094 { 1095 if (flags == DDI_DEV_AUTOINCR) { 1096 for (; repcount; repcount--) 1097 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1098 } else { 1099 for (; repcount; repcount--) 1100 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1101 } 1102 } 1103 1104 1105 /* 1106 * Enable Legacy PCI config space access for the following four north bridges 1107 * Host bridge: AMD HyperTransport Technology Configuration 1108 * Host bridge: AMD Address Map 1109 * Host bridge: AMD DRAM Controller 1110 * Host bridge: AMD Miscellaneous Control 1111 */ 1112 int 1113 is_amd_northbridge(dev_info_t *dip) 1114 { 1115 int vendor_id, device_id; 1116 1117 vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1118 "vendor-id", -1); 1119 device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1120 "device-id", -1); 1121 1122 if (IS_AMD_NTBRIDGE(vendor_id, device_id)) 1123 return (0); 1124 1125 return (1); 1126 } 1127