1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * This file contains various support routines. 27 */ 28 29 #include <sys/scsi/adapters/pmcs/pmcs.h> 30 31 /* 32 * Local static data 33 */ 34 static int tgtmap_stable_usec = MICROSEC; /* 1 second */ 35 static int tgtmap_csync_usec = 10 * MICROSEC; /* 10 seconds */ 36 37 /* 38 * SAS Topology Configuration 39 */ 40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *); 41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *); 42 43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *); 44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *); 45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int); 46 47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *); 48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *, 49 pmcs_phy_t *); 50 51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *); 52 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t); 53 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *); 54 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *); 55 static void pmcs_begin_observations(pmcs_hw_t *); 56 static void pmcs_flush_observations(pmcs_hw_t *); 57 static boolean_t pmcs_report_observations(pmcs_hw_t *); 58 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *, 59 pmcs_phy_t *); 60 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *); 61 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *); 62 static void pmcs_lock_phy_impl(pmcs_phy_t *, int); 63 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int); 64 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *); 65 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *); 66 static void pmcs_reap_dead_phy(pmcs_phy_t *); 67 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *); 68 static boolean_t pmcs_phy_target_match(pmcs_phy_t *); 69 static void pmcs_iport_active(pmcs_iport_t *); 70 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t, 71 void **); 72 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *, 73 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t); 74 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *); 75 static void pmcs_get_fw_version(pmcs_hw_t *); 76 static int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *, hrtime_t *); 77 78 /* 79 * Often used strings 80 */ 81 const char pmcs_nowrk[] = "%s: unable to get work structure"; 82 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry"; 83 const char pmcs_timeo[] = "%s: command timed out"; 84 85 extern const ddi_dma_attr_t pmcs_dattr; 86 extern kmutex_t pmcs_trace_lock; 87 88 /* 89 * Some Initial setup steps. 90 */ 91 92 int 93 pmcs_setup(pmcs_hw_t *pwp) 94 { 95 uint32_t barval = pwp->mpibar; 96 uint32_t i, scratch, regbar, regoff, barbar, baroff; 97 uint32_t new_ioq_depth, ferr = 0; 98 99 /* 100 * Check current state. If we're not at READY state, 101 * we can't go further. 102 */ 103 scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 104 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) { 105 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 106 "%s: AAP Error State (0x%x)", 107 __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 108 PMCS_MSGU_AAP_ERROR_MASK); 109 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 110 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 111 return (-1); 112 } 113 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 114 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 115 "%s: AAP unit not ready (state 0x%x)", 116 __func__, scratch & PMCS_MSGU_AAP_STATE_MASK); 117 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 118 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 119 return (-1); 120 } 121 122 /* 123 * Read the offset from the Message Unit scratchpad 0 register. 124 * This allows us to read the MPI Configuration table. 125 * 126 * Check its signature for validity. 127 */ 128 baroff = barval; 129 barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT; 130 baroff &= PMCS_MSGU_MPI_OFFSET_MASK; 131 132 regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0); 133 regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT; 134 regoff &= PMCS_MSGU_MPI_OFFSET_MASK; 135 136 if (regoff > baroff) { 137 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 138 "%s: bad MPI Table Length (register offset=0x%08x, " 139 "passed offset=0x%08x)", __func__, regoff, baroff); 140 return (-1); 141 } 142 if (regbar != barbar) { 143 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 144 "%s: bad MPI BAR (register BAROFF=0x%08x, " 145 "passed BAROFF=0x%08x)", __func__, regbar, barbar); 146 return (-1); 147 } 148 pwp->mpi_offset = regoff; 149 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) { 150 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 151 "%s: Bad MPI Configuration Table Signature 0x%x", __func__, 152 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS)); 153 return (-1); 154 } 155 156 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) { 157 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 158 "%s: Bad MPI Configuration Revision 0x%x", __func__, 159 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR)); 160 return (-1); 161 } 162 163 /* 164 * Generate offsets for the General System, Inbound Queue Configuration 165 * and Outbound Queue configuration tables. This way the macros to 166 * access those tables will work correctly. 167 */ 168 pwp->mpi_gst_offset = 169 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO); 170 pwp->mpi_iqc_offset = 171 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO); 172 pwp->mpi_oqc_offset = 173 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO); 174 175 pmcs_get_fw_version(pwp); 176 177 pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO); 178 pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16; 179 180 pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 181 pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 182 pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 183 if (pwp->max_iq <= PMCS_NIQ) { 184 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 185 "%s: not enough Inbound Queues supported " 186 "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ); 187 return (-1); 188 } 189 if (pwp->max_oq <= PMCS_NOQ) { 190 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 191 "%s: not enough Outbound Queues supported " 192 "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ); 193 return (-1); 194 } 195 if (pwp->nphy == 0) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 197 "%s: zero phys reported", __func__); 198 return (-1); 199 } 200 if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) { 201 pwp->hipri_queue = (1 << PMCS_IQ_OTHER); 202 } 203 204 205 for (i = 0; i < pwp->nphy; i++) { 206 PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i); 207 PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i); 208 } 209 210 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2, 211 (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) | 212 (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT)); 213 214 /* 215 * Verify that ioq_depth is valid (> 0 and not so high that it 216 * would cause us to overrun the chip with commands). 217 */ 218 if (pwp->ioq_depth == 0) { 219 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 220 "%s: I/O queue depth set to 0. Setting to %d", 221 __func__, PMCS_NQENTRY); 222 pwp->ioq_depth = PMCS_NQENTRY; 223 } 224 225 if (pwp->ioq_depth < PMCS_MIN_NQENTRY) { 226 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 227 "%s: I/O queue depth set too low (%d). Setting to %d", 228 __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY); 229 pwp->ioq_depth = PMCS_MIN_NQENTRY; 230 } 231 232 if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) { 233 new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1); 234 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 235 "%s: I/O queue depth set too high (%d). Setting to %d", 236 __func__, pwp->ioq_depth, new_ioq_depth); 237 pwp->ioq_depth = new_ioq_depth; 238 } 239 240 /* 241 * Allocate consistent memory for OQs and IQs. 242 */ 243 pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr; 244 pwp->iqp_dma_attr.dma_attr_align = 245 pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE; 246 247 /* 248 * The Rev C chip has the ability to do PIO to or from consistent 249 * memory anywhere in a 64 bit address space, but the firmware is 250 * not presently set up to do so. 251 */ 252 pwp->iqp_dma_attr.dma_attr_addr_hi = 253 pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull; 254 255 for (i = 0; i < PMCS_NIQ; i++) { 256 if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr, 257 &pwp->iqp_acchdls[i], 258 &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 259 (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) { 260 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 261 "Failed to setup DMA for iqp[%d]", i); 262 return (-1); 263 } 264 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 265 } 266 267 for (i = 0; i < PMCS_NOQ; i++) { 268 if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr, 269 &pwp->oqp_acchdls[i], 270 &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 271 (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) { 272 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 273 "Failed to setup DMA for oqp[%d]", i); 274 return (-1); 275 } 276 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 277 } 278 279 /* 280 * Install the IQ and OQ addresses (and null out the rest). 281 */ 282 for (i = 0; i < pwp->max_iq; i++) { 283 pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i)); 284 if (i < PMCS_NIQ) { 285 if (i != PMCS_IQ_OTHER) { 286 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 287 pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16)); 288 } else { 289 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 290 (1 << 30) | pwp->ioq_depth | 291 (PMCS_QENTRY_SIZE << 16)); 292 } 293 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 294 DWORD1(pwp->iqaddr[i])); 295 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 296 DWORD0(pwp->iqaddr[i])); 297 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 298 DWORD1(pwp->ciaddr+IQ_OFFSET(i))); 299 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 300 DWORD0(pwp->ciaddr+IQ_OFFSET(i))); 301 } else { 302 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 303 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 304 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 305 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 306 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 307 } 308 } 309 310 for (i = 0; i < pwp->max_oq; i++) { 311 pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i)); 312 if (i < PMCS_NOQ) { 313 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth | 314 (PMCS_QENTRY_SIZE << 16) | OQIEX); 315 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 316 DWORD1(pwp->oqaddr[i])); 317 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 318 DWORD0(pwp->oqaddr[i])); 319 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 320 DWORD1(pwp->ciaddr+OQ_OFFSET(i))); 321 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 322 DWORD0(pwp->ciaddr+OQ_OFFSET(i))); 323 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 324 pwp->oqvec[i] << 24); 325 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 326 } else { 327 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 328 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 329 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 330 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 331 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 332 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 333 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 334 } 335 } 336 337 /* 338 * Set up logging, if defined. 339 */ 340 if (pwp->fwlog) { 341 uint64_t logdma = pwp->fwaddr; 342 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma)); 343 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma)); 344 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1); 345 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog); 346 logdma += (PMCS_FWLOG_SIZE >> 1); 347 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma)); 348 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma)); 349 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1); 350 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog); 351 } 352 353 /* 354 * Interrupt vectors, outbound queues, and odb_auto_clear 355 * 356 * MSI/MSI-X: 357 * If we got 4 interrupt vectors, we'll assign one to each outbound 358 * queue as well as the fatal interrupt, and auto clear can be set 359 * for each. 360 * 361 * If we only got 2 vectors, one will be used for I/O completions 362 * and the other for the other two vectors. In this case, auto_ 363 * clear can only be set for I/Os, which is fine. The fatal 364 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which 365 * is not an interrupt vector. 366 * 367 * MSI/MSI-X/INT-X: 368 * If we only got 1 interrupt vector, auto_clear must be set to 0, 369 * and again the fatal interrupt will be mapped to the 370 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector). 371 */ 372 373 switch (pwp->int_type) { 374 case PMCS_INT_MSIX: 375 case PMCS_INT_MSI: 376 switch (pwp->intr_cnt) { 377 case 1: 378 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 379 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 380 pwp->odb_auto_clear = 0; 381 break; 382 case 2: 383 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 384 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 385 pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) | 386 (1 << PMCS_MSIX_IODONE); 387 break; 388 case 4: 389 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 390 (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT)); 391 pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) | 392 (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) | 393 (1 << PMCS_MSIX_EVENTS); 394 break; 395 } 396 break; 397 398 case PMCS_INT_FIXED: 399 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 400 PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 401 pwp->odb_auto_clear = 0; 402 break; 403 } 404 405 /* 406 * If the open retry interval is non-zero, set it. 407 */ 408 if (pwp->open_retry_interval != 0) { 409 int phynum; 410 411 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 412 "%s: Setting open retry interval to %d usecs", __func__, 413 pwp->open_retry_interval); 414 for (phynum = 0; phynum < pwp->nphy; phynum ++) { 415 pmcs_wr_gsm_reg(pwp, OPEN_RETRY_INTERVAL(phynum), 416 pwp->open_retry_interval); 417 } 418 } 419 420 /* 421 * Enable Interrupt Reassertion 422 * Default Delay 1000us 423 */ 424 ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR); 425 if ((ferr & PMCS_MPI_IRAE) == 0) { 426 ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK); 427 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE); 428 } 429 430 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear); 431 pwp->mpi_table_setup = 1; 432 return (0); 433 } 434 435 /* 436 * Start the Message Passing protocol with the PMC chip. 437 */ 438 int 439 pmcs_start_mpi(pmcs_hw_t *pwp) 440 { 441 int i; 442 443 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI); 444 for (i = 0; i < 1000; i++) { 445 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 446 PMCS_MSGU_IBDB_MPIINI) == 0) { 447 break; 448 } 449 drv_usecwait(1000); 450 } 451 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) { 452 return (-1); 453 } 454 drv_usecwait(500000); 455 456 /* 457 * Check to make sure we got to INIT state. 458 */ 459 if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) != 460 PMCS_MPI_STATE_INIT) { 461 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 462 "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__, 463 pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE), 464 pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR)); 465 return (-1); 466 } 467 return (0); 468 } 469 470 /* 471 * Stop the Message Passing protocol with the PMC chip. 472 */ 473 int 474 pmcs_stop_mpi(pmcs_hw_t *pwp) 475 { 476 int i; 477 478 for (i = 0; i < pwp->max_iq; i++) { 479 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 480 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 481 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 482 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 483 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 484 } 485 for (i = 0; i < pwp->max_oq; i++) { 486 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 487 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 488 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 489 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 490 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 491 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 492 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 493 } 494 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0); 495 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU); 496 for (i = 0; i < 2000; i++) { 497 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 498 PMCS_MSGU_IBDB_MPICTU) == 0) { 499 break; 500 } 501 drv_usecwait(1000); 502 } 503 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) { 504 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 505 "%s: MPI stop failed", __func__); 506 return (-1); 507 } 508 return (0); 509 } 510 511 /* 512 * Do a sequence of ECHO messages to test for MPI functionality, 513 * all inbound and outbound queue functionality and interrupts. 514 */ 515 int 516 pmcs_echo_test(pmcs_hw_t *pwp) 517 { 518 echo_test_t fred; 519 struct pmcwork *pwrk; 520 uint32_t *msg, count; 521 int iqe = 0, iqo = 0, result, rval = 0; 522 int iterations; 523 hrtime_t echo_start, echo_end, echo_total; 524 525 ASSERT(pwp->max_cmd > 0); 526 527 /* 528 * We want iterations to be max_cmd * 3 to ensure that we run the 529 * echo test enough times to iterate through every inbound queue 530 * at least twice. 531 */ 532 iterations = pwp->max_cmd * 3; 533 534 echo_total = 0; 535 count = 0; 536 537 while (count < iterations) { 538 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 539 if (pwrk == NULL) { 540 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 541 pmcs_nowrk, __func__); 542 rval = -1; 543 break; 544 } 545 546 mutex_enter(&pwp->iqp_lock[iqe]); 547 msg = GET_IQ_ENTRY(pwp, iqe); 548 if (msg == NULL) { 549 mutex_exit(&pwp->iqp_lock[iqe]); 550 pmcs_pwork(pwp, pwrk); 551 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 552 pmcs_nomsg, __func__); 553 rval = -1; 554 break; 555 } 556 557 bzero(msg, PMCS_QENTRY_SIZE); 558 559 if (iqe == PMCS_IQ_OTHER) { 560 /* This is on the high priority queue */ 561 msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO)); 562 } else { 563 msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO)); 564 } 565 msg[1] = LE_32(pwrk->htag); 566 fred.signature = 0xdeadbeef; 567 fred.count = count; 568 fred.ptr = &count; 569 (void) memcpy(&msg[2], &fred, sizeof (fred)); 570 pwrk->state = PMCS_WORK_STATE_ONCHIP; 571 572 INC_IQ_ENTRY(pwp, iqe); 573 574 echo_start = gethrtime(); 575 DTRACE_PROBE2(pmcs__echo__test__wait__start, 576 hrtime_t, echo_start, uint32_t, pwrk->htag); 577 578 if (++iqe == PMCS_NIQ) { 579 iqe = 0; 580 } 581 if (++iqo == PMCS_NOQ) { 582 iqo = 0; 583 } 584 585 WAIT_FOR(pwrk, 250, result); 586 587 echo_end = gethrtime(); 588 DTRACE_PROBE2(pmcs__echo__test__wait__end, 589 hrtime_t, echo_end, int, result); 590 591 echo_total += (echo_end - echo_start); 592 593 pmcs_pwork(pwp, pwrk); 594 if (result) { 595 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 596 "%s: command timed out on echo test #%d", 597 __func__, count); 598 rval = -1; 599 break; 600 } 601 } 602 603 /* 604 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to 605 * remove the overhead of things like the delay in getting signaled 606 * for completion. 607 */ 608 if (echo_total != 0) { 609 pwp->io_intr_coal.intr_latency = 610 (echo_total / iterations) / 2; 611 pwp->io_intr_coal.intr_threshold = 612 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 613 pwp->io_intr_coal.intr_latency); 614 } 615 616 return (rval); 617 } 618 619 /* 620 * Start the (real) phys 621 */ 622 int 623 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed) 624 { 625 int result; 626 uint32_t *msg; 627 struct pmcwork *pwrk; 628 pmcs_phy_t *pptr; 629 sas_identify_af_t sap; 630 631 mutex_enter(&pwp->lock); 632 pptr = pwp->root_phys + phynum; 633 if (pptr == NULL) { 634 mutex_exit(&pwp->lock); 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 636 "%s: cannot find port %d", __func__, phynum); 637 return (0); 638 } 639 640 pmcs_lock_phy(pptr); 641 mutex_exit(&pwp->lock); 642 643 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 644 if (pwrk == NULL) { 645 pmcs_unlock_phy(pptr); 646 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 647 return (-1); 648 } 649 650 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 651 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 652 653 if (msg == NULL) { 654 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 655 pmcs_unlock_phy(pptr); 656 pmcs_pwork(pwp, pwrk); 657 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 658 return (-1); 659 } 660 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START)); 661 msg[1] = LE_32(pwrk->htag); 662 msg[2] = LE_32(linkmode | speed | phynum); 663 bzero(&sap, sizeof (sap)); 664 sap.device_type = SAS_IF_DTYPE_ENDPOINT; 665 sap.ssp_ini_port = 1; 666 667 if (pwp->separate_ports) { 668 pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address); 669 } else { 670 pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address); 671 } 672 673 ASSERT(phynum < SAS2_PHYNUM_MAX); 674 sap.phy_identifier = phynum & SAS2_PHYNUM_MASK; 675 (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t)); 676 pwrk->state = PMCS_WORK_STATE_ONCHIP; 677 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 678 679 pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1); 680 pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1); 681 pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE; 682 pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE; 683 684 pmcs_unlock_phy(pptr); 685 WAIT_FOR(pwrk, 1000, result); 686 pmcs_pwork(pwp, pwrk); 687 688 if (result) { 689 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 690 } else { 691 mutex_enter(&pwp->lock); 692 pwp->phys_started |= (1 << phynum); 693 mutex_exit(&pwp->lock); 694 } 695 696 return (0); 697 } 698 699 int 700 pmcs_start_phys(pmcs_hw_t *pwp) 701 { 702 int i, rval; 703 704 for (i = 0; i < pwp->nphy; i++) { 705 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 706 if (pmcs_start_phy(pwp, i, 707 (pwp->phymode << PHY_MODE_SHIFT), 708 pwp->physpeed << PHY_LINK_SHIFT)) { 709 return (-1); 710 } 711 if (pmcs_clear_diag_counters(pwp, i)) { 712 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 713 "%s: failed to reset counters on PHY (%d)", 714 __func__, i); 715 } 716 } 717 } 718 719 rval = pmcs_get_time_stamp(pwp, &pwp->fw_timestamp, &pwp->hrtimestamp); 720 if (rval) { 721 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 722 "%s: Failed to obtain firmware timestamp", __func__); 723 } else { 724 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 725 "Firmware timestamp: 0x%" PRIx64, pwp->fw_timestamp); 726 } 727 728 return (0); 729 } 730 731 /* 732 * Called with PHY locked 733 */ 734 int 735 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type) 736 { 737 uint32_t *msg; 738 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 739 const char *mbar; 740 uint32_t amt; 741 uint32_t pdevid; 742 uint32_t stsoff; 743 uint32_t status; 744 int result, level, phynum; 745 struct pmcwork *pwrk; 746 uint32_t htag; 747 748 ASSERT(mutex_owned(&pptr->phy_lock)); 749 750 bzero(iomb, PMCS_QENTRY_SIZE); 751 phynum = pptr->phynum; 752 level = pptr->level; 753 if (level > 0) { 754 pdevid = pptr->parent->device_id; 755 } else if ((level == 0) && (pptr->dtype == EXPANDER)) { 756 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 757 "%s: Not resetting HBA PHY @ %s", __func__, pptr->path); 758 return (0); 759 } 760 761 if (!pptr->iport || !pptr->valid_device_id) { 762 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 763 "%s: Can't reach PHY %s", __func__, pptr->path); 764 return (0); 765 } 766 767 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 768 769 if (pwrk == NULL) { 770 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 771 return (ENOMEM); 772 } 773 774 pwrk->arg = iomb; 775 776 /* 777 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL 778 * function to do either a link reset or hard reset. If level == 0, 779 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the 780 * root (local) PHY 781 */ 782 if (level) { 783 stsoff = 2; 784 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 785 PMCIN_SMP_REQUEST)); 786 iomb[1] = LE_32(pwrk->htag); 787 iomb[2] = LE_32(pdevid); 788 iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT); 789 /* 790 * Send SMP PHY CONTROL/HARD or LINK RESET 791 */ 792 iomb[4] = BE_32(0x40910000); 793 iomb[5] = 0; 794 795 if (type == PMCS_PHYOP_HARD_RESET) { 796 mbar = "SMP PHY CONTROL/HARD RESET"; 797 iomb[6] = BE_32((phynum << 24) | 798 (PMCS_PHYOP_HARD_RESET << 16)); 799 } else { 800 mbar = "SMP PHY CONTROL/LINK RESET"; 801 iomb[6] = BE_32((phynum << 24) | 802 (PMCS_PHYOP_LINK_RESET << 16)); 803 } 804 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 805 "%s: sending %s to %s for phy 0x%x", 806 __func__, mbar, pptr->parent->path, pptr->phynum); 807 amt = 7; 808 } else { 809 /* 810 * Unlike most other Outbound messages, status for 811 * a local phy operation is in DWORD 3. 812 */ 813 stsoff = 3; 814 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 815 PMCIN_LOCAL_PHY_CONTROL)); 816 iomb[1] = LE_32(pwrk->htag); 817 if (type == PMCS_PHYOP_LINK_RESET) { 818 mbar = "LOCAL PHY LINK RESET"; 819 iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum); 820 } else { 821 mbar = "LOCAL PHY HARD RESET"; 822 iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum); 823 } 824 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 825 "%s: sending %s to %s", __func__, mbar, pptr->path); 826 amt = 3; 827 } 828 829 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 830 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 831 if (msg == NULL) { 832 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 833 pmcs_pwork(pwp, pwrk); 834 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 835 return (ENOMEM); 836 } 837 COPY_MESSAGE(msg, iomb, amt); 838 htag = pwrk->htag; 839 840 /* SMP serialization */ 841 pmcs_smp_acquire(pptr->iport); 842 843 pwrk->state = PMCS_WORK_STATE_ONCHIP; 844 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 845 846 pmcs_unlock_phy(pptr); 847 WAIT_FOR(pwrk, 1000, result); 848 pmcs_pwork(pwp, pwrk); 849 /* Release SMP lock before reacquiring PHY lock */ 850 pmcs_smp_release(pptr->iport); 851 pmcs_lock_phy(pptr); 852 853 if (result) { 854 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 855 856 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 857 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 858 "%s: Unable to issue SMP abort for htag 0x%08x", 859 __func__, htag); 860 } else { 861 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 862 "%s: Issuing SMP ABORT for htag 0x%08x", 863 __func__, htag); 864 } 865 return (EIO); 866 } 867 status = LE_32(iomb[stsoff]); 868 869 if (status != PMCOUT_STATUS_OK) { 870 char buf[32]; 871 const char *es = pmcs_status_str(status); 872 if (es == NULL) { 873 (void) snprintf(buf, sizeof (buf), "Status 0x%x", 874 status); 875 es = buf; 876 } 877 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 878 "%s: %s action returned %s for %s", __func__, mbar, es, 879 pptr->path); 880 return (status); 881 } 882 883 return (0); 884 } 885 886 /* 887 * Stop the (real) phys. No PHY or softstate locks are required as this only 888 * happens during detach. 889 */ 890 void 891 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum) 892 { 893 int result; 894 pmcs_phy_t *pptr; 895 uint32_t *msg; 896 struct pmcwork *pwrk; 897 898 pptr = pwp->root_phys + phynum; 899 if (pptr == NULL) { 900 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 901 "%s: unable to find port %d", __func__, phynum); 902 return; 903 } 904 905 if (pwp->phys_started & (1 << phynum)) { 906 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 907 908 if (pwrk == NULL) { 909 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 910 pmcs_nowrk, __func__); 911 return; 912 } 913 914 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 915 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 916 917 if (msg == NULL) { 918 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 919 pmcs_pwork(pwp, pwrk); 920 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 921 pmcs_nomsg, __func__); 922 return; 923 } 924 925 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP)); 926 msg[1] = LE_32(pwrk->htag); 927 msg[2] = LE_32(phynum); 928 pwrk->state = PMCS_WORK_STATE_ONCHIP; 929 /* 930 * Make this unconfigured now. 931 */ 932 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 933 WAIT_FOR(pwrk, 1000, result); 934 935 pmcs_pwork(pwp, pwrk); 936 if (result) { 937 pmcs_prt(pwp, PMCS_PRT_DEBUG, 938 pptr, NULL, pmcs_timeo, __func__); 939 } 940 941 pwp->phys_started &= ~(1 << phynum); 942 } 943 944 pptr->configured = 0; 945 } 946 947 /* 948 * No locks should be required as this is only called during detach 949 */ 950 void 951 pmcs_stop_phys(pmcs_hw_t *pwp) 952 { 953 int i; 954 for (i = 0; i < pwp->nphy; i++) { 955 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 956 pmcs_stop_phy(pwp, i); 957 } 958 } 959 } 960 961 /* 962 * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed. 963 * ERR_CNT_RESET: return status of cmd 964 * DIAG_REPORT_GET: return value of the counter 965 */ 966 int 967 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc, 968 uint8_t phynum) 969 { 970 uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1]; 971 int result; 972 struct pmcwork *pwrk; 973 974 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 975 if (pwrk == NULL) { 976 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 977 return (DDI_FAILURE); 978 } 979 pwrk->arg = msg; 980 htag = pwrk->htag; 981 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE)); 982 msg[1] = LE_32(htag); 983 msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) | 984 (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum); 985 986 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 987 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 988 if (ptr == NULL) { 989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 990 pmcs_pwork(pwp, pwrk); 991 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 992 return (DDI_FAILURE); 993 } 994 COPY_MESSAGE(ptr, msg, 3); 995 pwrk->state = PMCS_WORK_STATE_ONCHIP; 996 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 997 998 WAIT_FOR(pwrk, 1000, result); 999 1000 pmcs_pwork(pwp, pwrk); 1001 1002 if (result) { 1003 pmcs_timed_out(pwp, htag, __func__); 1004 return (DDI_FAILURE); 1005 } 1006 1007 status = LE_32(msg[3]); 1008 1009 /* Return for counter reset */ 1010 if (cmd == PMCS_ERR_CNT_RESET) 1011 return (status); 1012 1013 /* Return for counter value */ 1014 if (status) { 1015 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1016 "%s: failed, status (0x%x)", __func__, status); 1017 return (DDI_FAILURE); 1018 } 1019 return (LE_32(msg[4])); 1020 } 1021 1022 /* Get the current value of the counter for desc on phynum and return it. */ 1023 int 1024 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum) 1025 { 1026 return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum)); 1027 } 1028 1029 /* Clear all of the counters for phynum. Returns the status of the command. */ 1030 int 1031 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum) 1032 { 1033 uint32_t cmd = PMCS_ERR_CNT_RESET; 1034 uint32_t cmd_desc; 1035 1036 cmd_desc = PMCS_INVALID_DWORD_CNT; 1037 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1038 return (DDI_FAILURE); 1039 1040 cmd_desc = PMCS_DISPARITY_ERR_CNT; 1041 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1042 return (DDI_FAILURE); 1043 1044 cmd_desc = PMCS_LOST_DWORD_SYNC_CNT; 1045 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1046 return (DDI_FAILURE); 1047 1048 cmd_desc = PMCS_RESET_FAILED_CNT; 1049 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1050 return (DDI_FAILURE); 1051 1052 return (DDI_SUCCESS); 1053 } 1054 1055 /* 1056 * Get firmware timestamp 1057 */ 1058 static int 1059 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *fw_ts, hrtime_t *sys_hr_ts) 1060 { 1061 uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1]; 1062 int result; 1063 struct pmcwork *pwrk; 1064 1065 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 1066 if (pwrk == NULL) { 1067 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 1068 return (-1); 1069 } 1070 pwrk->arg = msg; 1071 htag = pwrk->htag; 1072 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP)); 1073 msg[1] = LE_32(pwrk->htag); 1074 1075 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1076 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1077 if (ptr == NULL) { 1078 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1079 pmcs_pwork(pwp, pwrk); 1080 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 1081 return (-1); 1082 } 1083 COPY_MESSAGE(ptr, msg, 2); 1084 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1085 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1086 1087 WAIT_FOR(pwrk, 1000, result); 1088 1089 pmcs_pwork(pwp, pwrk); 1090 1091 if (result) { 1092 pmcs_timed_out(pwp, htag, __func__); 1093 return (-1); 1094 } 1095 1096 mutex_enter(&pmcs_trace_lock); 1097 *sys_hr_ts = gethrtime(); 1098 gethrestime(&pwp->sys_timestamp); 1099 *fw_ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32); 1100 mutex_exit(&pmcs_trace_lock); 1101 return (0); 1102 } 1103 1104 /* 1105 * Dump all pertinent registers 1106 */ 1107 1108 void 1109 pmcs_register_dump(pmcs_hw_t *pwp) 1110 { 1111 int i; 1112 uint32_t val; 1113 1114 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start", 1115 ddi_get_instance(pwp->dip)); 1116 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1117 "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x", 1118 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB), 1119 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK), 1120 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR)); 1121 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x", 1122 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0)); 1123 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x", 1124 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1)); 1125 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x", 1126 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2)); 1127 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x", 1128 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3)); 1129 for (i = 0; i < PMCS_NIQ; i++) { 1130 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u", 1131 i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i)); 1132 } 1133 for (i = 0; i < PMCS_NOQ; i++) { 1134 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u", 1135 i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i)); 1136 } 1137 val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE); 1138 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1139 "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)", 1140 val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4, 1141 PMCS_HMI_ERR(val)); 1142 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x", 1143 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0)); 1144 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x", 1145 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1)); 1146 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x", 1147 pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK)); 1148 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x", 1149 pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK)); 1150 for (i = 0; i < pwp->nphy; i++) { 1151 uint32_t rerrf, pinfo, started = 0, link = 0; 1152 pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i)); 1153 if (pinfo & 1) { 1154 started = 1; 1155 link = pinfo & 2; 1156 } 1157 rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i)); 1158 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1159 "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x", 1160 i, started, link, rerrf); 1161 } 1162 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end", 1163 ddi_get_instance(pwp->dip)); 1164 } 1165 1166 /* 1167 * Handle SATA Abort and other error processing 1168 */ 1169 int 1170 pmcs_abort_handler(pmcs_hw_t *pwp) 1171 { 1172 pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND]; 1173 pmcs_xscsi_t *tgt; 1174 int r, level = 0; 1175 1176 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__); 1177 1178 mutex_enter(&pwp->lock); 1179 pptr = pwp->root_phys; 1180 mutex_exit(&pwp->lock); 1181 1182 while (pptr) { 1183 /* 1184 * XXX: Need to make sure this doesn't happen 1185 * XXX: when non-NCQ commands are running. 1186 */ 1187 pmcs_lock_phy(pptr); 1188 if (pptr->need_rl_ext) { 1189 ASSERT(pptr->dtype == SATA); 1190 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 1191 goto next_phy; 1192 } 1193 r = pmcs_sata_abort_ncq(pwp, pptr); 1194 pmcs_release_scratch(pwp); 1195 if (r == ENOMEM) { 1196 goto next_phy; 1197 } 1198 if (r) { 1199 r = pmcs_reset_phy(pwp, pptr, 1200 PMCS_PHYOP_LINK_RESET); 1201 if (r == ENOMEM) { 1202 goto next_phy; 1203 } 1204 /* what if other failures happened? */ 1205 pptr->abort_pending = 1; 1206 pptr->abort_sent = 0; 1207 } 1208 } 1209 if (pptr->abort_pending == 0 || pptr->abort_sent) { 1210 goto next_phy; 1211 } 1212 pptr->abort_pending = 0; 1213 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) { 1214 pptr->abort_pending = 1; 1215 goto next_phy; 1216 } 1217 pptr->abort_sent = 1; 1218 1219 /* 1220 * If the iport is no longer active, flush the queues 1221 */ 1222 if ((pptr->iport == NULL) || 1223 (pptr->iport->ua_state != UA_ACTIVE)) { 1224 tgt = pptr->target; 1225 if (tgt) { 1226 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 1227 "%s: Clearing target 0x%p, inactive iport", 1228 __func__, (void *) tgt); 1229 mutex_enter(&tgt->statlock); 1230 pmcs_clear_xp(pwp, tgt); 1231 mutex_exit(&tgt->statlock); 1232 } 1233 } 1234 1235 next_phy: 1236 if (pptr->children) { 1237 pnext = pptr->children; 1238 pnext_uplevel[level++] = pptr->sibling; 1239 } else { 1240 pnext = pptr->sibling; 1241 while ((pnext == NULL) && (level > 0)) { 1242 pnext = pnext_uplevel[--level]; 1243 } 1244 } 1245 1246 pmcs_unlock_phy(pptr); 1247 pptr = pnext; 1248 } 1249 1250 return (0); 1251 } 1252 1253 /* 1254 * Register a device (get a device handle for it). 1255 * Called with PHY lock held. 1256 */ 1257 int 1258 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1259 { 1260 struct pmcwork *pwrk; 1261 int result = 0; 1262 uint32_t *msg; 1263 uint32_t tmp, status; 1264 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1265 1266 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1267 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1268 1269 if (msg == NULL || 1270 (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 1271 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1272 result = ENOMEM; 1273 goto out; 1274 } 1275 1276 pwrk->arg = iomb; 1277 pwrk->dtype = pptr->dtype; 1278 1279 msg[1] = LE_32(pwrk->htag); 1280 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE)); 1281 tmp = PMCS_DEVREG_TLR | 1282 (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT); 1283 if (IS_ROOT_PHY(pptr)) { 1284 msg[2] = LE_32(pptr->portid | 1285 (pptr->phynum << PMCS_PHYID_SHIFT)); 1286 } else { 1287 msg[2] = LE_32(pptr->portid); 1288 } 1289 if (pptr->dtype == SATA) { 1290 if (IS_ROOT_PHY(pptr)) { 1291 tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT; 1292 } else { 1293 tmp |= PMCS_DEVREG_TYPE_SATA; 1294 } 1295 } else { 1296 tmp |= PMCS_DEVREG_TYPE_SAS; 1297 } 1298 msg[3] = LE_32(tmp); 1299 msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT); 1300 (void) memcpy(&msg[5], pptr->sas_address, 8); 1301 1302 CLEAN_MESSAGE(msg, 7); 1303 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1304 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1305 1306 pmcs_unlock_phy(pptr); 1307 WAIT_FOR(pwrk, 250, result); 1308 pmcs_lock_phy(pptr); 1309 pmcs_pwork(pwp, pwrk); 1310 1311 if (result) { 1312 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1313 result = ETIMEDOUT; 1314 goto out; 1315 } 1316 status = LE_32(iomb[2]); 1317 tmp = LE_32(iomb[3]); 1318 switch (status) { 1319 case PMCS_DEVREG_OK: 1320 case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED: 1321 case PMCS_DEVREG_PHY_ALREADY_REGISTERED: 1322 if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) { 1323 result = EEXIST; 1324 goto out; 1325 } else if (status != PMCS_DEVREG_OK) { 1326 if (tmp == 0xffffffff) { /* F/W bug */ 1327 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1328 "%s: phy %s already has bogus devid 0x%x", 1329 __func__, pptr->path, tmp); 1330 result = EIO; 1331 goto out; 1332 } else { 1333 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1334 "%s: phy %s already has a device id 0x%x", 1335 __func__, pptr->path, tmp); 1336 } 1337 } 1338 break; 1339 default: 1340 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1341 "%s: status 0x%x when trying to register device %s", 1342 __func__, status, pptr->path); 1343 result = EIO; 1344 goto out; 1345 } 1346 pptr->device_id = tmp; 1347 pptr->valid_device_id = 1; 1348 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT 1349 " registered with device_id 0x%x (portid %d)", pptr->path, 1350 SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid); 1351 out: 1352 return (result); 1353 } 1354 1355 /* 1356 * Deregister a device (remove a device handle). 1357 * Called with PHY locked. 1358 */ 1359 void 1360 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1361 { 1362 struct pmcwork *pwrk; 1363 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 1364 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1365 int result; 1366 1367 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1368 if (pwrk == NULL) { 1369 return; 1370 } 1371 1372 pwrk->arg = iomb; 1373 pwrk->dtype = pptr->dtype; 1374 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1375 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1376 if (ptr == NULL) { 1377 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1378 pmcs_pwork(pwp, pwrk); 1379 return; 1380 } 1381 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 1382 PMCIN_DEREGISTER_DEVICE_HANDLE)); 1383 msg[1] = LE_32(pwrk->htag); 1384 msg[2] = LE_32(pptr->device_id); 1385 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1386 COPY_MESSAGE(ptr, msg, 3); 1387 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1388 1389 pmcs_unlock_phy(pptr); 1390 WAIT_FOR(pwrk, 250, result); 1391 pmcs_pwork(pwp, pwrk); 1392 pmcs_lock_phy(pptr); 1393 1394 if (result) { 1395 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1396 return; 1397 } 1398 status = LE_32(iomb[2]); 1399 if (status != PMCOUT_STATUS_OK) { 1400 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1401 "%s: status 0x%x when trying to deregister device %s", 1402 __func__, status, pptr->path); 1403 } else { 1404 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1405 "%s: device %s deregistered", __func__, pptr->path); 1406 pptr->valid_device_id = 0; 1407 pptr->device_id = PMCS_INVALID_DEVICE_ID; 1408 pptr->configured = 0; 1409 pptr->deregister_wait = 0; 1410 } 1411 } 1412 1413 /* 1414 * Deregister all registered devices. 1415 */ 1416 void 1417 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 1418 { 1419 /* 1420 * Start at the maximum level and walk back to level 0. This only 1421 * gets done during detach after all threads and timers have been 1422 * destroyed. 1423 */ 1424 while (phyp) { 1425 if (phyp->children) { 1426 pmcs_deregister_devices(pwp, phyp->children); 1427 } 1428 pmcs_lock_phy(phyp); 1429 if (phyp->valid_device_id) { 1430 pmcs_deregister_device(pwp, phyp); 1431 } 1432 pmcs_unlock_phy(phyp); 1433 phyp = phyp->sibling; 1434 } 1435 } 1436 1437 /* 1438 * Perform a 'soft' reset on the PMC chip 1439 */ 1440 int 1441 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart) 1442 { 1443 uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode; 1444 pmcs_phy_t *pptr; 1445 char *msg = NULL; 1446 int i; 1447 1448 /* 1449 * Disable interrupts 1450 */ 1451 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1452 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1453 1454 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__); 1455 1456 if (pwp->locks_initted) { 1457 mutex_enter(&pwp->lock); 1458 } 1459 pwp->blocked = 1; 1460 1461 /* 1462 * Clear our softstate copies of the MSGU and IOP heartbeats. 1463 */ 1464 pwp->last_msgu_tick = pwp->last_iop_tick = 0; 1465 1466 /* 1467 * Step 1 1468 */ 1469 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2); 1470 if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) { 1471 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1472 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1473 for (i = 0; i < 100; i++) { 1474 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1475 PMCS_MSGU_HOST_SOFT_RESET_READY; 1476 if (s2) { 1477 break; 1478 } 1479 drv_usecwait(10000); 1480 } 1481 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1482 PMCS_MSGU_HOST_SOFT_RESET_READY; 1483 if (s2 == 0) { 1484 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1485 "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came " 1486 "ready", __func__); 1487 pmcs_register_dump(pwp); 1488 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1489 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 || 1490 (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1491 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) { 1492 pwp->state = STATE_DEAD; 1493 pwp->blocked = 0; 1494 if (pwp->locks_initted) { 1495 mutex_exit(&pwp->lock); 1496 } 1497 return (-1); 1498 } 1499 } 1500 } 1501 1502 /* 1503 * Step 2 1504 */ 1505 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0); 1506 drv_usecwait(10); 1507 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0); 1508 drv_usecwait(10); 1509 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0); 1510 drv_usecwait(10); 1511 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT, 1512 pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT)); 1513 drv_usecwait(10); 1514 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0); 1515 drv_usecwait(10); 1516 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT, 1517 pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT)); 1518 drv_usecwait(10); 1519 1520 sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1521 PMCS_MSGU_AAP_SFR_PROGRESS; 1522 sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS; 1523 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 " 1524 "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0), 1525 HST_SFT_RESET_SIG); 1526 pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG); 1527 1528 /* 1529 * Step 3 1530 */ 1531 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1532 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1533 gsm & ~PMCS_SOFT_RESET_BITS); 1534 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS); 1535 1536 /* 1537 * Step 4 1538 */ 1539 rapchk = pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN); 1540 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1541 "%08x -> %08x", rapchk, 0); 1542 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0); 1543 wapchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN); 1544 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1545 "%08x -> %08x", wapchk, 0); 1546 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0); 1547 wdpchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN); 1548 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1549 "%08x -> %08x", wdpchk, 0); 1550 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0); 1551 1552 /* 1553 * Step 5 1554 */ 1555 drv_usecwait(100); 1556 1557 /* 1558 * Step 5.5 (Temporary workaround for 1.07.xx Beta) 1559 */ 1560 tsmode = pmcs_rd_gsm_reg(pwp, 0, PMCS_GPIO_TRISTATE_MODE_ADDR); 1561 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x", 1562 tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1563 pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR, 1564 tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1565 drv_usecwait(10); 1566 1567 /* 1568 * Step 6 1569 */ 1570 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1571 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1572 spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1573 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1574 spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1575 drv_usecwait(10); 1576 1577 /* 1578 * Step 7 1579 */ 1580 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1581 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1582 spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1583 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1584 1585 /* 1586 * Step 8 1587 */ 1588 drv_usecwait(100); 1589 1590 /* 1591 * Step 9 1592 */ 1593 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1594 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1595 spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1596 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1597 1598 /* 1599 * Step 10 1600 */ 1601 drv_usecwait(100); 1602 1603 /* 1604 * Step 11 1605 */ 1606 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1607 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1608 gsm | PMCS_SOFT_RESET_BITS); 1609 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS); 1610 drv_usecwait(10); 1611 1612 /* 1613 * Step 12 1614 */ 1615 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1616 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN), 1617 rapchk); 1618 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk); 1619 drv_usecwait(10); 1620 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1621 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN), 1622 wapchk); 1623 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk); 1624 drv_usecwait(10); 1625 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1626 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN), 1627 wapchk); 1628 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk); 1629 drv_usecwait(10); 1630 1631 /* 1632 * Step 13 1633 */ 1634 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1635 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1636 spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1637 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1638 spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1639 1640 /* 1641 * Step 14 1642 */ 1643 drv_usecwait(100); 1644 1645 /* 1646 * Step 15 1647 */ 1648 for (spc = 0, i = 0; i < 1000; i++) { 1649 drv_usecwait(1000); 1650 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1651 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) { 1652 break; 1653 } 1654 } 1655 1656 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) { 1657 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1658 "SFR didn't toggle (sfr 0x%x)", spc); 1659 pwp->state = STATE_DEAD; 1660 pwp->blocked = 0; 1661 if (pwp->locks_initted) { 1662 mutex_exit(&pwp->lock); 1663 } 1664 return (-1); 1665 } 1666 1667 /* 1668 * Step 16 1669 */ 1670 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1671 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1672 1673 /* 1674 * Wait for up to 5 seconds for AAP state to come either ready or error. 1675 */ 1676 for (i = 0; i < 50; i++) { 1677 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1678 PMCS_MSGU_AAP_STATE_MASK; 1679 if (spc == PMCS_MSGU_AAP_STATE_ERROR || 1680 spc == PMCS_MSGU_AAP_STATE_READY) { 1681 break; 1682 } 1683 drv_usecwait(100000); 1684 } 1685 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1686 if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 1687 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1688 "soft reset failed (state 0x%x)", spc); 1689 pwp->state = STATE_DEAD; 1690 pwp->blocked = 0; 1691 if (pwp->locks_initted) { 1692 mutex_exit(&pwp->lock); 1693 } 1694 return (-1); 1695 } 1696 1697 /* Clear the firmware log */ 1698 if (pwp->fwlogp) { 1699 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 1700 } 1701 1702 /* Reset our queue indices and entries */ 1703 bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi)); 1704 bzero(pwp->last_iqci, sizeof (pwp->last_iqci)); 1705 bzero(pwp->last_htag, sizeof (pwp->last_htag)); 1706 for (i = 0; i < PMCS_NIQ; i++) { 1707 if (pwp->iqp[i]) { 1708 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1709 pmcs_wr_iqpi(pwp, i, 0); 1710 pmcs_wr_iqci(pwp, i, 0); 1711 } 1712 } 1713 for (i = 0; i < PMCS_NOQ; i++) { 1714 if (pwp->oqp[i]) { 1715 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1716 pmcs_wr_oqpi(pwp, i, 0); 1717 pmcs_wr_oqci(pwp, i, 0); 1718 } 1719 1720 } 1721 1722 if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING || 1723 pwp->state == STATE_PROBING || pwp->locks_initted == 0) { 1724 pwp->blocked = 0; 1725 if (pwp->locks_initted) { 1726 mutex_exit(&pwp->lock); 1727 } 1728 return (0); 1729 } 1730 1731 /* 1732 * Return at this point if we dont need to startup. 1733 */ 1734 if (no_restart) { 1735 return (0); 1736 } 1737 1738 ASSERT(pwp->locks_initted != 0); 1739 1740 /* 1741 * Flush the target queues and clear each target's PHY 1742 */ 1743 if (pwp->targets) { 1744 for (i = 0; i < pwp->max_dev; i++) { 1745 pmcs_xscsi_t *xp = pwp->targets[i]; 1746 1747 if (xp == NULL) { 1748 continue; 1749 } 1750 1751 mutex_enter(&xp->statlock); 1752 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 1753 xp->phy = NULL; 1754 mutex_exit(&xp->statlock); 1755 } 1756 } 1757 1758 /* 1759 * Zero out the ports list, free non root phys, clear root phys 1760 */ 1761 bzero(pwp->ports, sizeof (pwp->ports)); 1762 pmcs_free_all_phys(pwp, pwp->root_phys); 1763 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 1764 pmcs_lock_phy(pptr); 1765 pmcs_clear_phy(pwp, pptr); 1766 pptr->target = NULL; 1767 pmcs_unlock_phy(pptr); 1768 } 1769 1770 /* 1771 * Restore Interrupt Mask 1772 */ 1773 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 1774 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1775 1776 pwp->mpi_table_setup = 0; 1777 mutex_exit(&pwp->lock); 1778 1779 /* 1780 * Set up MPI again. 1781 */ 1782 if (pmcs_setup(pwp)) { 1783 msg = "unable to setup MPI tables again"; 1784 goto fail_restart; 1785 } 1786 pmcs_report_fwversion(pwp); 1787 1788 /* 1789 * Restart MPI 1790 */ 1791 if (pmcs_start_mpi(pwp)) { 1792 msg = "unable to restart MPI again"; 1793 goto fail_restart; 1794 } 1795 1796 mutex_enter(&pwp->lock); 1797 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1798 mutex_exit(&pwp->lock); 1799 1800 /* 1801 * Run any completions 1802 */ 1803 PMCS_CQ_RUN(pwp); 1804 1805 /* 1806 * Delay 1807 */ 1808 drv_usecwait(1000000); 1809 return (0); 1810 1811 fail_restart: 1812 mutex_enter(&pwp->lock); 1813 pwp->state = STATE_DEAD; 1814 mutex_exit(&pwp->lock); 1815 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1816 "%s: Failed: %s", __func__, msg); 1817 return (-1); 1818 } 1819 1820 1821 /* 1822 * Perform a 'hot' reset, which will soft reset the chip and 1823 * restore the state back to pre-reset context. Called with pwp 1824 * lock held. 1825 */ 1826 int 1827 pmcs_hot_reset(pmcs_hw_t *pwp) 1828 { 1829 pmcs_iport_t *iport; 1830 1831 ASSERT(mutex_owned(&pwp->lock)); 1832 pwp->state = STATE_IN_RESET; 1833 1834 /* 1835 * For any iports on this HBA, report empty target sets and 1836 * then tear them down. 1837 */ 1838 rw_enter(&pwp->iports_lock, RW_READER); 1839 for (iport = list_head(&pwp->iports); iport != NULL; 1840 iport = list_next(&pwp->iports, iport)) { 1841 mutex_enter(&iport->lock); 1842 (void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap); 1843 (void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0); 1844 pmcs_iport_teardown_phys(iport); 1845 mutex_exit(&iport->lock); 1846 } 1847 rw_exit(&pwp->iports_lock); 1848 1849 /* Grab a register dump, in the event that reset fails */ 1850 pmcs_register_dump_int(pwp); 1851 mutex_exit(&pwp->lock); 1852 1853 /* Ensure discovery is not running before we proceed */ 1854 mutex_enter(&pwp->config_lock); 1855 while (pwp->configuring) { 1856 cv_wait(&pwp->config_cv, &pwp->config_lock); 1857 } 1858 mutex_exit(&pwp->config_lock); 1859 1860 /* Issue soft reset and clean up related softstate */ 1861 if (pmcs_soft_reset(pwp, B_FALSE)) { 1862 /* 1863 * Disable interrupts, in case we got far enough along to 1864 * enable them, then fire off ereport and service impact. 1865 */ 1866 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1867 "%s: failed soft reset", __func__); 1868 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1869 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1870 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 1871 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1872 mutex_enter(&pwp->lock); 1873 pwp->state = STATE_DEAD; 1874 return (DDI_FAILURE); 1875 } 1876 1877 mutex_enter(&pwp->lock); 1878 pwp->state = STATE_RUNNING; 1879 mutex_exit(&pwp->lock); 1880 1881 /* 1882 * Finally, restart the phys, which will bring the iports back 1883 * up and eventually result in discovery running. 1884 */ 1885 if (pmcs_start_phys(pwp)) { 1886 /* We should be up and running now, so retry */ 1887 if (pmcs_start_phys(pwp)) { 1888 /* Apparently unable to restart PHYs, fail */ 1889 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1890 "%s: failed to restart PHYs after soft reset", 1891 __func__); 1892 mutex_enter(&pwp->lock); 1893 return (DDI_FAILURE); 1894 } 1895 } 1896 1897 mutex_enter(&pwp->lock); 1898 return (DDI_SUCCESS); 1899 } 1900 1901 /* 1902 * Reset a device or a logical unit. 1903 */ 1904 int 1905 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun) 1906 { 1907 int rval = 0; 1908 1909 if (pptr == NULL) { 1910 return (ENXIO); 1911 } 1912 1913 pmcs_lock_phy(pptr); 1914 if (pptr->dtype == SAS) { 1915 /* 1916 * Some devices do not support SAS_I_T_NEXUS_RESET as 1917 * it is not a mandatory (in SAM4) task management 1918 * function, while LOGIC_UNIT_RESET is mandatory. 1919 * 1920 * The problem here is that we need to iterate over 1921 * all known LUNs to emulate the semantics of 1922 * "RESET_TARGET". 1923 * 1924 * XXX: FIX ME 1925 */ 1926 if (lun == (uint64_t)-1) { 1927 lun = 0; 1928 } 1929 rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun, 1930 NULL); 1931 } else if (pptr->dtype == SATA) { 1932 if (lun != 0ull) { 1933 pmcs_unlock_phy(pptr); 1934 return (EINVAL); 1935 } 1936 rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET); 1937 } else { 1938 pmcs_unlock_phy(pptr); 1939 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1940 "%s: cannot reset a SMP device yet (%s)", 1941 __func__, pptr->path); 1942 return (EINVAL); 1943 } 1944 1945 /* 1946 * Now harvest any commands killed by this action 1947 * by issuing an ABORT for all commands on this device. 1948 * 1949 * We do this even if the the tmf or reset fails (in case there 1950 * are any dead commands around to be harvested *anyway*). 1951 * We don't have to await for the abort to complete. 1952 */ 1953 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 1954 pptr->abort_pending = 1; 1955 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1956 } 1957 1958 pmcs_unlock_phy(pptr); 1959 return (rval); 1960 } 1961 1962 /* 1963 * Called with PHY locked. 1964 */ 1965 static int 1966 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1967 { 1968 if (pptr->valid_device_id == 0) { 1969 int result = pmcs_register_device(pwp, pptr); 1970 1971 /* 1972 * If we changed while registering, punt 1973 */ 1974 if (pptr->changed) { 1975 RESTART_DISCOVERY(pwp); 1976 return (-1); 1977 } 1978 1979 /* 1980 * If we had a failure to register, check against errors. 1981 * An ENOMEM error means we just retry (temp resource shortage). 1982 */ 1983 if (result == ENOMEM) { 1984 PHY_CHANGED(pwp, pptr); 1985 RESTART_DISCOVERY(pwp); 1986 return (-1); 1987 } 1988 1989 /* 1990 * An ETIMEDOUT error means we retry (if our counter isn't 1991 * exhausted) 1992 */ 1993 if (result == ETIMEDOUT) { 1994 if (ddi_get_lbolt() < pptr->config_stop) { 1995 PHY_CHANGED(pwp, pptr); 1996 RESTART_DISCOVERY(pwp); 1997 } else { 1998 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 1999 "%s: Retries exhausted for %s, killing", 2000 __func__, pptr->path); 2001 pptr->config_stop = 0; 2002 pmcs_kill_changed(pwp, pptr, 0); 2003 } 2004 return (-1); 2005 } 2006 /* 2007 * Other errors or no valid device id is fatal, but don't 2008 * preclude a future action. 2009 */ 2010 if (result || pptr->valid_device_id == 0) { 2011 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2012 "%s: %s could not be registered", __func__, 2013 pptr->path); 2014 return (-1); 2015 } 2016 } 2017 return (0); 2018 } 2019 2020 int 2021 pmcs_iport_tgtmap_create(pmcs_iport_t *iport) 2022 { 2023 ASSERT(iport); 2024 if (iport == NULL) 2025 return (B_FALSE); 2026 2027 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2028 2029 /* create target map */ 2030 if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, 2031 tgtmap_csync_usec, tgtmap_stable_usec, (void *)iport, 2032 pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb, 2033 &iport->iss_tgtmap) != DDI_SUCCESS) { 2034 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL, 2035 "%s: failed to create tgtmap", __func__); 2036 return (B_FALSE); 2037 } 2038 return (B_TRUE); 2039 } 2040 2041 int 2042 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport) 2043 { 2044 ASSERT(iport && iport->iss_tgtmap); 2045 if ((iport == NULL) || (iport->iss_tgtmap == NULL)) 2046 return (B_FALSE); 2047 2048 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2049 2050 /* destroy target map */ 2051 scsi_hba_tgtmap_destroy(iport->iss_tgtmap); 2052 return (B_TRUE); 2053 } 2054 2055 /* 2056 * Remove all phys from an iport's phymap and empty it's phylist. 2057 * Called when a port has been reset by the host (see pmcs_intr.c) 2058 * or prior to issuing a soft reset if we detect a stall on the chip 2059 * (see pmcs_attach.c). 2060 */ 2061 void 2062 pmcs_iport_teardown_phys(pmcs_iport_t *iport) 2063 { 2064 pmcs_hw_t *pwp; 2065 sas_phymap_phys_t *phys; 2066 int phynum; 2067 2068 ASSERT(iport); 2069 ASSERT(mutex_owned(&iport->lock)); 2070 pwp = iport->pwp; 2071 ASSERT(pwp); 2072 2073 /* 2074 * Remove all phys from the iport handle's phy list, unset its 2075 * primary phy and update its state. 2076 */ 2077 pmcs_remove_phy_from_iport(iport, NULL); 2078 iport->pptr = NULL; 2079 iport->ua_state = UA_PEND_DEACTIVATE; 2080 2081 /* Remove all phys from the phymap */ 2082 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2083 if (phys) { 2084 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2085 (void) sas_phymap_phy_rem(pwp->hss_phymap, phynum); 2086 } 2087 sas_phymap_phys_free(phys); 2088 } 2089 } 2090 2091 /* 2092 * Query the phymap and populate the iport handle passed in. 2093 * Called with iport lock held. 2094 */ 2095 int 2096 pmcs_iport_configure_phys(pmcs_iport_t *iport) 2097 { 2098 pmcs_hw_t *pwp; 2099 pmcs_phy_t *pptr; 2100 sas_phymap_phys_t *phys; 2101 int phynum; 2102 int inst; 2103 2104 ASSERT(iport); 2105 ASSERT(mutex_owned(&iport->lock)); 2106 pwp = iport->pwp; 2107 ASSERT(pwp); 2108 inst = ddi_get_instance(iport->dip); 2109 2110 mutex_enter(&pwp->lock); 2111 ASSERT(pwp->root_phys != NULL); 2112 2113 /* 2114 * Query the phymap regarding the phys in this iport and populate 2115 * the iport's phys list. Hereafter this list is maintained via 2116 * port up and down events in pmcs_intr.c 2117 */ 2118 ASSERT(list_is_empty(&iport->phys)); 2119 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2120 ASSERT(phys != NULL); 2121 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2122 /* Grab the phy pointer from root_phys */ 2123 pptr = pwp->root_phys + phynum; 2124 ASSERT(pptr); 2125 pmcs_lock_phy(pptr); 2126 ASSERT(pptr->phynum == phynum); 2127 2128 /* 2129 * Set a back pointer in the phy to this iport. 2130 */ 2131 pptr->iport = iport; 2132 2133 /* 2134 * If this phy is the primary, set a pointer to it on our 2135 * iport handle, and set our portid from it. 2136 */ 2137 if (!pptr->subsidiary) { 2138 iport->pptr = pptr; 2139 iport->portid = pptr->portid; 2140 } 2141 2142 /* 2143 * Finally, insert the phy into our list 2144 */ 2145 pmcs_unlock_phy(pptr); 2146 pmcs_add_phy_to_iport(iport, pptr); 2147 2148 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found " 2149 "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum, 2150 (void *)pptr, inst, iport->refcnt); 2151 } 2152 mutex_exit(&pwp->lock); 2153 sas_phymap_phys_free(phys); 2154 RESTART_DISCOVERY(pwp); 2155 return (DDI_SUCCESS); 2156 } 2157 2158 /* 2159 * Return the iport that ua is associated with, or NULL. If an iport is 2160 * returned, it will be held and the caller must release the hold. 2161 */ 2162 static pmcs_iport_t * 2163 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua) 2164 { 2165 pmcs_iport_t *iport = NULL; 2166 2167 rw_enter(&pwp->iports_lock, RW_READER); 2168 for (iport = list_head(&pwp->iports); 2169 iport != NULL; 2170 iport = list_next(&pwp->iports, iport)) { 2171 mutex_enter(&iport->lock); 2172 if (strcmp(iport->ua, ua) == 0) { 2173 mutex_exit(&iport->lock); 2174 mutex_enter(&iport->refcnt_lock); 2175 iport->refcnt++; 2176 mutex_exit(&iport->refcnt_lock); 2177 break; 2178 } 2179 mutex_exit(&iport->lock); 2180 } 2181 rw_exit(&pwp->iports_lock); 2182 2183 return (iport); 2184 } 2185 2186 /* 2187 * Return the iport that pptr is associated with, or NULL. 2188 * If an iport is returned, there is a hold that the caller must release. 2189 */ 2190 pmcs_iport_t * 2191 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 2192 { 2193 pmcs_iport_t *iport = NULL; 2194 char *ua; 2195 2196 ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn); 2197 if (ua) { 2198 iport = pmcs_get_iport_by_ua(pwp, ua); 2199 if (iport) { 2200 mutex_enter(&iport->lock); 2201 pmcs_iport_active(iport); 2202 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2203 "found iport [0x%p] on ua (%s), refcnt (%d)", 2204 __func__, (void *)iport, ua, iport->refcnt); 2205 mutex_exit(&iport->lock); 2206 } 2207 } 2208 2209 return (iport); 2210 } 2211 2212 /* 2213 * Promote the next phy on this port to primary, and return it. 2214 * Called when the primary PHY on a port is going down, but the port 2215 * remains up (see pmcs_intr.c). 2216 */ 2217 pmcs_phy_t * 2218 pmcs_promote_next_phy(pmcs_phy_t *prev_primary) 2219 { 2220 pmcs_hw_t *pwp; 2221 pmcs_iport_t *iport; 2222 pmcs_phy_t *pptr, *child; 2223 int portid; 2224 2225 pmcs_lock_phy(prev_primary); 2226 portid = prev_primary->portid; 2227 iport = prev_primary->iport; 2228 pwp = prev_primary->pwp; 2229 2230 /* Use the first available phy in this port */ 2231 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2232 if ((pptr->portid == portid) && (pptr != prev_primary)) { 2233 mutex_enter(&pptr->phy_lock); 2234 break; 2235 } 2236 } 2237 2238 if (pptr == NULL) { 2239 pmcs_unlock_phy(prev_primary); 2240 return (NULL); 2241 } 2242 2243 if (iport) { 2244 mutex_enter(&iport->lock); 2245 iport->pptr = pptr; 2246 mutex_exit(&iport->lock); 2247 } 2248 2249 /* Update the phy handle with the data from the previous primary */ 2250 pptr->children = prev_primary->children; 2251 child = pptr->children; 2252 while (child) { 2253 child->parent = pptr; 2254 child = child->sibling; 2255 } 2256 pptr->ncphy = prev_primary->ncphy; 2257 pptr->width = prev_primary->width; 2258 pptr->dtype = prev_primary->dtype; 2259 pptr->pend_dtype = prev_primary->pend_dtype; 2260 pptr->tolerates_sas2 = prev_primary->tolerates_sas2; 2261 pptr->atdt = prev_primary->atdt; 2262 pptr->portid = prev_primary->portid; 2263 pptr->link_rate = prev_primary->link_rate; 2264 pptr->configured = prev_primary->configured; 2265 pptr->iport = prev_primary->iport; 2266 pptr->target = prev_primary->target; 2267 if (pptr->target) { 2268 pptr->target->phy = pptr; 2269 } 2270 2271 /* Update the phy mask properties for the affected PHYs */ 2272 /* Clear the current values... */ 2273 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 2274 pptr->tgt_port_pm_tmp, B_FALSE); 2275 /* ...replace with the values from prev_primary... */ 2276 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp, 2277 prev_primary->tgt_port_pm_tmp, B_TRUE); 2278 /* ...then clear prev_primary's PHY values from the new primary */ 2279 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm, 2280 prev_primary->tgt_port_pm, B_FALSE); 2281 /* Clear the prev_primary's values */ 2282 pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp, 2283 prev_primary->tgt_port_pm_tmp, B_FALSE); 2284 2285 pptr->subsidiary = 0; 2286 2287 prev_primary->subsidiary = 1; 2288 prev_primary->children = NULL; 2289 prev_primary->target = NULL; 2290 pptr->device_id = prev_primary->device_id; 2291 pptr->valid_device_id = 1; 2292 pmcs_unlock_phy(prev_primary); 2293 2294 /* 2295 * We call pmcs_unlock_phy() on pptr because it now contains the 2296 * list of children. 2297 */ 2298 pmcs_unlock_phy(pptr); 2299 2300 return (pptr); 2301 } 2302 2303 void 2304 pmcs_rele_iport(pmcs_iport_t *iport) 2305 { 2306 /* 2307 * Release a refcnt on this iport. If this is the last reference, 2308 * signal the potential waiter in pmcs_iport_unattach(). 2309 */ 2310 ASSERT(iport->refcnt > 0); 2311 mutex_enter(&iport->refcnt_lock); 2312 iport->refcnt--; 2313 mutex_exit(&iport->refcnt_lock); 2314 if (iport->refcnt == 0) { 2315 cv_signal(&iport->refcnt_cv); 2316 } 2317 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: iport " 2318 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt); 2319 } 2320 2321 void 2322 pmcs_phymap_activate(void *arg, char *ua, void **privp) 2323 { 2324 _NOTE(ARGUNUSED(privp)); 2325 pmcs_hw_t *pwp = arg; 2326 pmcs_iport_t *iport = NULL; 2327 2328 mutex_enter(&pwp->lock); 2329 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD) || 2330 (pwp->state == STATE_IN_RESET)) { 2331 mutex_exit(&pwp->lock); 2332 return; 2333 } 2334 pwp->phymap_active++; 2335 mutex_exit(&pwp->lock); 2336 2337 if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) != 2338 DDI_SUCCESS) { 2339 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2340 "add iport handle on unit address [%s]", __func__, ua); 2341 } else { 2342 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2343 "phymap_active count (%d), added iport handle on unit " 2344 "address [%s]", __func__, pwp->phymap_active, ua); 2345 } 2346 2347 /* Set the HBA softstate as our private data for this unit address */ 2348 *privp = (void *)pwp; 2349 2350 /* 2351 * We are waiting on attach for this iport node, unless it is still 2352 * attached. This can happen if a consumer has an outstanding open 2353 * on our iport node, but the port is down. If this is the case, we 2354 * need to configure our iport here for reuse. 2355 */ 2356 iport = pmcs_get_iport_by_ua(pwp, ua); 2357 if (iport) { 2358 mutex_enter(&iport->lock); 2359 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 2360 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2361 "failed to configure phys on iport [0x%p] at " 2362 "unit address (%s)", __func__, (void *)iport, ua); 2363 } 2364 pmcs_iport_active(iport); 2365 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 2366 &iport->nphy); 2367 mutex_exit(&iport->lock); 2368 pmcs_rele_iport(iport); 2369 } 2370 2371 } 2372 2373 void 2374 pmcs_phymap_deactivate(void *arg, char *ua, void *privp) 2375 { 2376 _NOTE(ARGUNUSED(privp)); 2377 pmcs_hw_t *pwp = arg; 2378 pmcs_iport_t *iport; 2379 2380 mutex_enter(&pwp->lock); 2381 pwp->phymap_active--; 2382 mutex_exit(&pwp->lock); 2383 2384 if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) != 2385 DDI_SUCCESS) { 2386 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2387 "remove iport handle on unit address [%s]", __func__, ua); 2388 } else { 2389 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2390 "phymap_active count (%d), removed iport handle on unit " 2391 "address [%s]", __func__, pwp->phymap_active, ua); 2392 } 2393 2394 iport = pmcs_get_iport_by_ua(pwp, ua); 2395 2396 if (iport == NULL) { 2397 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed " 2398 "lookup of iport handle on unit addr (%s)", __func__, ua); 2399 return; 2400 } 2401 2402 mutex_enter(&iport->lock); 2403 iport->ua_state = UA_INACTIVE; 2404 iport->portid = PMCS_IPORT_INVALID_PORT_ID; 2405 pmcs_remove_phy_from_iport(iport, NULL); 2406 mutex_exit(&iport->lock); 2407 pmcs_rele_iport(iport); 2408 } 2409 2410 /* 2411 * Top-level discovery function 2412 */ 2413 void 2414 pmcs_discover(pmcs_hw_t *pwp) 2415 { 2416 pmcs_phy_t *pptr; 2417 pmcs_phy_t *root_phy; 2418 2419 DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags, 2420 boolean_t, pwp->config_changed); 2421 2422 mutex_enter(&pwp->lock); 2423 2424 if (pwp->state != STATE_RUNNING) { 2425 mutex_exit(&pwp->lock); 2426 return; 2427 } 2428 2429 /* Ensure we have at least one phymap active */ 2430 if (pwp->phymap_active == 0) { 2431 mutex_exit(&pwp->lock); 2432 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2433 "%s: phymap inactive, exiting", __func__); 2434 return; 2435 } 2436 2437 mutex_exit(&pwp->lock); 2438 2439 /* 2440 * If no iports have attached, but we have PHYs that are up, we 2441 * are waiting for iport attach to complete. Restart discovery. 2442 */ 2443 rw_enter(&pwp->iports_lock, RW_READER); 2444 if (!pwp->iports_attached) { 2445 rw_exit(&pwp->iports_lock); 2446 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2447 "%s: no iports attached, retry discovery", __func__); 2448 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2449 return; 2450 } 2451 rw_exit(&pwp->iports_lock); 2452 2453 mutex_enter(&pwp->config_lock); 2454 if (pwp->configuring) { 2455 mutex_exit(&pwp->config_lock); 2456 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2457 "%s: configuration already in progress", __func__); 2458 return; 2459 } 2460 2461 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 2462 mutex_exit(&pwp->config_lock); 2463 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2464 "%s: cannot allocate scratch", __func__); 2465 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2466 return; 2467 } 2468 2469 pwp->configuring = 1; 2470 pwp->config_changed = B_FALSE; 2471 mutex_exit(&pwp->config_lock); 2472 2473 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin"); 2474 2475 /* 2476 * First, tell SCSA that we're beginning set operations. 2477 */ 2478 pmcs_begin_observations(pwp); 2479 2480 /* 2481 * The order of the following traversals is important. 2482 * 2483 * The first one checks for changed expanders. 2484 * 2485 * The second one aborts commands for dead devices and deregisters them. 2486 * 2487 * The third one clears the contents of dead expanders from the tree 2488 * 2489 * The fourth one clears now dead devices in expanders that remain. 2490 */ 2491 2492 /* 2493 * 1. Check expanders marked changed (but not dead) to see if they still 2494 * have the same number of phys and the same SAS address. Mark them, 2495 * their subsidiary phys (if wide) and their descendents dead if 2496 * anything has changed. Check the devices they contain to see if 2497 * *they* have changed. If they've changed from type NOTHING we leave 2498 * them marked changed to be configured later (picking up a new SAS 2499 * address and link rate if possible). Otherwise, any change in type, 2500 * SAS address or removal of target role will cause us to mark them 2501 * (and their descendents) as dead (and cause any pending commands 2502 * and associated devices to be removed). 2503 * 2504 * NOTE: We don't want to bail on discovery if the config has 2505 * changed until *after* we run pmcs_kill_devices. 2506 */ 2507 root_phy = pwp->root_phys; 2508 pmcs_check_expanders(pwp, root_phy); 2509 2510 /* 2511 * 2. Descend the tree looking for dead devices and kill them 2512 * by aborting all active commands and then deregistering them. 2513 */ 2514 if (pmcs_kill_devices(pwp, root_phy)) { 2515 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2516 "%s: pmcs_kill_devices failed!", __func__); 2517 } 2518 2519 /* 2520 * 3. Check for dead expanders and remove their children from the tree. 2521 * By the time we get here, the devices and commands for them have 2522 * already been terminated and removed. 2523 * 2524 * We do this independent of the configuration count changing so we can 2525 * free any dead device PHYs that were discovered while checking 2526 * expanders. We ignore any subsidiary phys as pmcs_clear_expander 2527 * will take care of those. 2528 * 2529 * NOTE: pmcs_clear_expander requires softstate lock 2530 */ 2531 mutex_enter(&pwp->lock); 2532 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2533 /* 2534 * Call pmcs_clear_expander for every root PHY. It will 2535 * recurse and determine which (if any) expanders actually 2536 * need to be cleared. 2537 */ 2538 pmcs_lock_phy(pptr); 2539 pmcs_clear_expander(pwp, pptr, 0); 2540 pmcs_unlock_phy(pptr); 2541 } 2542 mutex_exit(&pwp->lock); 2543 2544 /* 2545 * 4. Check for dead devices and nullify them. By the time we get here, 2546 * the devices and commands for them have already been terminated 2547 * and removed. This is different from step 2 in that this just nulls 2548 * phys that are part of expanders that are still here but used to 2549 * be something but are no longer something (e.g., after a pulled 2550 * disk drive). Note that dead expanders had their contained phys 2551 * removed from the tree- here, the expanders themselves are 2552 * nullified (unless they were removed by being contained in another 2553 * expander phy). 2554 */ 2555 pmcs_clear_phys(pwp, root_phy); 2556 2557 /* 2558 * 5. Now check for and configure new devices. 2559 */ 2560 if (pmcs_configure_new_devices(pwp, root_phy)) { 2561 goto restart; 2562 } 2563 2564 out: 2565 DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags, 2566 boolean_t, pwp->config_changed); 2567 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end"); 2568 2569 mutex_enter(&pwp->config_lock); 2570 2571 if (pwp->config_changed == B_FALSE) { 2572 /* 2573 * Observation is stable, report what we currently see to 2574 * the tgtmaps for delta processing. Start by setting 2575 * BEGIN on all tgtmaps. 2576 */ 2577 mutex_exit(&pwp->config_lock); 2578 if (pmcs_report_observations(pwp) == B_FALSE) { 2579 goto restart; 2580 } 2581 mutex_enter(&pwp->config_lock); 2582 } else { 2583 /* 2584 * If config_changed is TRUE, we need to reschedule 2585 * discovery now. 2586 */ 2587 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2588 "%s: Config has changed, will re-run discovery", __func__); 2589 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2590 } 2591 2592 pmcs_release_scratch(pwp); 2593 if (!pwp->quiesced) { 2594 pwp->blocked = 0; 2595 } 2596 pwp->configuring = 0; 2597 cv_signal(&pwp->config_cv); 2598 mutex_exit(&pwp->config_lock); 2599 2600 #ifdef DEBUG 2601 pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys); 2602 if (pptr != NULL) { 2603 if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) { 2604 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 2605 "PHY %s dead=%d changed=%d configured=%d " 2606 "but no work scheduled", pptr->path, pptr->dead, 2607 pptr->changed, pptr->configured); 2608 } 2609 pmcs_unlock_phy(pptr); 2610 } 2611 #endif 2612 2613 return; 2614 2615 restart: 2616 /* Clean up and restart discovery */ 2617 pmcs_release_scratch(pwp); 2618 pmcs_flush_observations(pwp); 2619 mutex_enter(&pwp->config_lock); 2620 pwp->configuring = 0; 2621 cv_signal(&pwp->config_cv); 2622 RESTART_DISCOVERY_LOCKED(pwp); 2623 mutex_exit(&pwp->config_lock); 2624 } 2625 2626 /* 2627 * Return any PHY that needs to have scheduled work done. The PHY is returned 2628 * locked. 2629 */ 2630 static pmcs_phy_t * 2631 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2632 { 2633 pmcs_phy_t *cphyp, *pnext; 2634 2635 while (pptr) { 2636 pmcs_lock_phy(pptr); 2637 2638 if (pptr->changed || (pptr->dead && pptr->valid_device_id)) { 2639 return (pptr); 2640 } 2641 2642 pnext = pptr->sibling; 2643 2644 if (pptr->children) { 2645 cphyp = pptr->children; 2646 pmcs_unlock_phy(pptr); 2647 cphyp = pmcs_find_phy_needing_work(pwp, cphyp); 2648 if (cphyp) { 2649 return (cphyp); 2650 } 2651 } else { 2652 pmcs_unlock_phy(pptr); 2653 } 2654 2655 pptr = pnext; 2656 } 2657 2658 return (NULL); 2659 } 2660 2661 /* 2662 * We may (or may not) report observations to SCSA. This is prefaced by 2663 * issuing a set_begin for each iport target map. 2664 */ 2665 static void 2666 pmcs_begin_observations(pmcs_hw_t *pwp) 2667 { 2668 pmcs_iport_t *iport; 2669 scsi_hba_tgtmap_t *tgtmap; 2670 2671 rw_enter(&pwp->iports_lock, RW_READER); 2672 for (iport = list_head(&pwp->iports); iport != NULL; 2673 iport = list_next(&pwp->iports, iport)) { 2674 /* 2675 * Unless we have at least one phy up, skip this iport. 2676 * Note we don't need to lock the iport for report_skip 2677 * since it is only used here. We are doing the skip so that 2678 * the phymap and iportmap stabilization times are honored - 2679 * giving us the ability to recover port operation within the 2680 * stabilization time without unconfiguring targets using the 2681 * port. 2682 */ 2683 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2684 iport->report_skip = 1; 2685 continue; /* skip set_begin */ 2686 } 2687 iport->report_skip = 0; 2688 2689 tgtmap = iport->iss_tgtmap; 2690 ASSERT(tgtmap); 2691 if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) { 2692 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2693 "%s: cannot set_begin tgtmap ", __func__); 2694 rw_exit(&pwp->iports_lock); 2695 return; 2696 } 2697 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2698 "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap); 2699 } 2700 rw_exit(&pwp->iports_lock); 2701 } 2702 2703 /* 2704 * Tell SCSA to flush the observations we've already sent (if any), as they 2705 * are no longer valid. 2706 */ 2707 static void 2708 pmcs_flush_observations(pmcs_hw_t *pwp) 2709 { 2710 pmcs_iport_t *iport; 2711 scsi_hba_tgtmap_t *tgtmap; 2712 2713 rw_enter(&pwp->iports_lock, RW_READER); 2714 for (iport = list_head(&pwp->iports); iport != NULL; 2715 iport = list_next(&pwp->iports, iport)) { 2716 /* 2717 * Skip this iport if it has no PHYs up. 2718 */ 2719 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2720 continue; 2721 } 2722 2723 tgtmap = iport->iss_tgtmap; 2724 ASSERT(tgtmap); 2725 if (scsi_hba_tgtmap_set_flush(tgtmap) != DDI_SUCCESS) { 2726 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2727 "%s: Failed set_flush on tgtmap 0x%p", __func__, 2728 (void *)tgtmap); 2729 } else { 2730 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2731 "%s: set flush on tgtmap 0x%p", __func__, 2732 (void *)tgtmap); 2733 } 2734 } 2735 rw_exit(&pwp->iports_lock); 2736 } 2737 2738 /* 2739 * Report current observations to SCSA. 2740 */ 2741 static boolean_t 2742 pmcs_report_observations(pmcs_hw_t *pwp) 2743 { 2744 pmcs_iport_t *iport; 2745 scsi_hba_tgtmap_t *tgtmap; 2746 char *ap; 2747 pmcs_phy_t *pptr; 2748 uint64_t wwn; 2749 2750 /* 2751 * Observation is stable, report what we currently see to the tgtmaps 2752 * for delta processing. 2753 */ 2754 pptr = pwp->root_phys; 2755 2756 while (pptr) { 2757 pmcs_lock_phy(pptr); 2758 2759 /* 2760 * Skip PHYs that have nothing attached or are dead. 2761 */ 2762 if ((pptr->dtype == NOTHING) || pptr->dead) { 2763 pmcs_unlock_phy(pptr); 2764 pptr = pptr->sibling; 2765 continue; 2766 } 2767 2768 if (pptr->changed) { 2769 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2770 "%s: oops, PHY %s changed; restart discovery", 2771 __func__, pptr->path); 2772 pmcs_unlock_phy(pptr); 2773 return (B_FALSE); 2774 } 2775 2776 /* 2777 * Get the iport for this root PHY, then call the helper 2778 * to report observations for this iport's targets 2779 */ 2780 wwn = pmcs_barray2wwn(pptr->sas_address); 2781 pmcs_unlock_phy(pptr); 2782 iport = pmcs_get_iport_by_wwn(pwp, wwn); 2783 if (iport == NULL) { 2784 /* No iport for this tgt */ 2785 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2786 "%s: no iport for this target", __func__); 2787 pptr = pptr->sibling; 2788 continue; 2789 } 2790 2791 pmcs_lock_phy(pptr); 2792 if (!iport->report_skip) { 2793 if (pmcs_report_iport_observations( 2794 pwp, iport, pptr) == B_FALSE) { 2795 pmcs_rele_iport(iport); 2796 pmcs_unlock_phy(pptr); 2797 return (B_FALSE); 2798 } 2799 } 2800 pmcs_rele_iport(iport); 2801 pmcs_unlock_phy(pptr); 2802 pptr = pptr->sibling; 2803 } 2804 2805 /* 2806 * The observation is complete, end sets. Note we will skip any 2807 * iports that are active, but have no PHYs in them (i.e. awaiting 2808 * unconfigure). Set to restart discovery if we find this. 2809 */ 2810 rw_enter(&pwp->iports_lock, RW_READER); 2811 for (iport = list_head(&pwp->iports); 2812 iport != NULL; 2813 iport = list_next(&pwp->iports, iport)) { 2814 2815 if (iport->report_skip) 2816 continue; /* skip set_end */ 2817 2818 tgtmap = iport->iss_tgtmap; 2819 ASSERT(tgtmap); 2820 if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) { 2821 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2822 "%s: cannot set_end tgtmap ", __func__); 2823 rw_exit(&pwp->iports_lock); 2824 return (B_FALSE); 2825 } 2826 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2827 "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap); 2828 } 2829 2830 /* 2831 * Now that discovery is complete, set up the necessary 2832 * DDI properties on each iport node. 2833 */ 2834 for (iport = list_head(&pwp->iports); iport != NULL; 2835 iport = list_next(&pwp->iports, iport)) { 2836 /* Set up the 'attached-port' property on the iport */ 2837 ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 2838 mutex_enter(&iport->lock); 2839 pptr = iport->pptr; 2840 mutex_exit(&iport->lock); 2841 if (pptr == NULL) { 2842 /* 2843 * This iport is down, but has not been 2844 * removed from our list (unconfigured). 2845 * Set our value to '0'. 2846 */ 2847 (void) snprintf(ap, 1, "%s", "0"); 2848 } else { 2849 /* Otherwise, set it to remote phy's wwn */ 2850 pmcs_lock_phy(pptr); 2851 wwn = pmcs_barray2wwn(pptr->sas_address); 2852 (void) scsi_wwn_to_wwnstr(wwn, 1, ap); 2853 pmcs_unlock_phy(pptr); 2854 } 2855 if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip, 2856 SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) { 2857 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed " 2858 "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", 2859 __func__); 2860 } 2861 kmem_free(ap, PMCS_MAX_UA_SIZE); 2862 } 2863 rw_exit(&pwp->iports_lock); 2864 2865 return (B_TRUE); 2866 } 2867 2868 /* 2869 * Report observations into a particular iport's target map 2870 * 2871 * Called with phyp (and all descendents) locked 2872 */ 2873 static boolean_t 2874 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport, 2875 pmcs_phy_t *phyp) 2876 { 2877 pmcs_phy_t *lphyp; 2878 scsi_hba_tgtmap_t *tgtmap; 2879 scsi_tgtmap_tgt_type_t tgt_type; 2880 char *ua; 2881 uint64_t wwn; 2882 2883 tgtmap = iport->iss_tgtmap; 2884 ASSERT(tgtmap); 2885 2886 lphyp = phyp; 2887 while (lphyp) { 2888 switch (lphyp->dtype) { 2889 default: /* Skip unknown PHYs. */ 2890 /* for non-root phys, skip to sibling */ 2891 goto next_phy; 2892 2893 case SATA: 2894 case SAS: 2895 tgt_type = SCSI_TGT_SCSI_DEVICE; 2896 break; 2897 2898 case EXPANDER: 2899 tgt_type = SCSI_TGT_SMP_DEVICE; 2900 break; 2901 } 2902 2903 if (lphyp->dead || !lphyp->configured) { 2904 goto next_phy; 2905 } 2906 2907 /* 2908 * Validate the PHY's SAS address 2909 */ 2910 if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) { 2911 pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL, 2912 "PHY 0x%p (%s) has invalid SAS address; " 2913 "will not enumerate", (void *)lphyp, lphyp->path); 2914 goto next_phy; 2915 } 2916 2917 wwn = pmcs_barray2wwn(lphyp->sas_address); 2918 ua = scsi_wwn_to_wwnstr(wwn, 1, NULL); 2919 2920 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL, 2921 "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]", 2922 ua, (void *)tgtmap, (void*)lphyp); 2923 2924 if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) != 2925 DDI_SUCCESS) { 2926 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2927 "%s: failed to add address %s", __func__, ua); 2928 scsi_free_wwnstr(ua); 2929 return (B_FALSE); 2930 } 2931 scsi_free_wwnstr(ua); 2932 2933 if (lphyp->children) { 2934 if (pmcs_report_iport_observations(pwp, iport, 2935 lphyp->children) == B_FALSE) { 2936 return (B_FALSE); 2937 } 2938 } 2939 2940 /* for non-root phys, report siblings too */ 2941 next_phy: 2942 if (IS_ROOT_PHY(lphyp)) { 2943 lphyp = NULL; 2944 } else { 2945 lphyp = lphyp->sibling; 2946 } 2947 } 2948 2949 return (B_TRUE); 2950 } 2951 2952 /* 2953 * Check for and configure new devices. 2954 * 2955 * If the changed device is a SATA device, add a SATA device. 2956 * 2957 * If the changed device is a SAS device, add a SAS device. 2958 * 2959 * If the changed device is an EXPANDER device, do a REPORT 2960 * GENERAL SMP command to find out the number of contained phys. 2961 * 2962 * For each number of contained phys, allocate a phy, do a 2963 * DISCOVERY SMP command to find out what kind of device it 2964 * is and add it to the linked list of phys on the *next* level. 2965 * 2966 * NOTE: pptr passed in by the caller will be a root PHY 2967 */ 2968 static int 2969 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2970 { 2971 int rval = 0; 2972 pmcs_iport_t *iport; 2973 pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild; 2974 uint64_t wwn; 2975 2976 /* 2977 * First, walk through each PHY at this level 2978 */ 2979 while (pptr) { 2980 pmcs_lock_phy(pptr); 2981 pnext = pptr->sibling; 2982 2983 /* 2984 * Set the new dtype if it has changed 2985 */ 2986 if ((pptr->pend_dtype != NEW) && 2987 (pptr->pend_dtype != pptr->dtype)) { 2988 pptr->dtype = pptr->pend_dtype; 2989 } 2990 2991 if (pptr->changed == 0 || pptr->dead || pptr->configured) { 2992 goto next_phy; 2993 } 2994 2995 /* 2996 * Confirm that this target's iport is configured 2997 */ 2998 root_phy = pmcs_get_root_phy(pptr); 2999 wwn = pmcs_barray2wwn(root_phy->sas_address); 3000 pmcs_unlock_phy(pptr); 3001 iport = pmcs_get_iport_by_wwn(pwp, wwn); 3002 if (iport == NULL) { 3003 /* No iport for this tgt, restart */ 3004 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 3005 "%s: iport not yet configured, " 3006 "retry discovery", __func__); 3007 pnext = NULL; 3008 rval = -1; 3009 pmcs_lock_phy(pptr); 3010 goto next_phy; 3011 } 3012 3013 pmcs_lock_phy(pptr); 3014 switch (pptr->dtype) { 3015 case NOTHING: 3016 pptr->changed = 0; 3017 break; 3018 case SATA: 3019 case SAS: 3020 pptr->iport = iport; 3021 pmcs_new_tport(pwp, pptr); 3022 break; 3023 case EXPANDER: 3024 pmcs_configure_expander(pwp, pptr, iport); 3025 break; 3026 } 3027 pmcs_rele_iport(iport); 3028 3029 mutex_enter(&pwp->config_lock); 3030 if (pwp->config_changed) { 3031 mutex_exit(&pwp->config_lock); 3032 pnext = NULL; 3033 goto next_phy; 3034 } 3035 mutex_exit(&pwp->config_lock); 3036 3037 next_phy: 3038 pmcs_unlock_phy(pptr); 3039 pptr = pnext; 3040 } 3041 3042 if (rval != 0) { 3043 return (rval); 3044 } 3045 3046 /* 3047 * Now walk through each PHY again, recalling ourselves if they 3048 * have children 3049 */ 3050 pptr = orig_pptr; 3051 while (pptr) { 3052 pmcs_lock_phy(pptr); 3053 pnext = pptr->sibling; 3054 pchild = pptr->children; 3055 pmcs_unlock_phy(pptr); 3056 3057 if (pchild) { 3058 rval = pmcs_configure_new_devices(pwp, pchild); 3059 if (rval != 0) { 3060 break; 3061 } 3062 } 3063 3064 pptr = pnext; 3065 } 3066 3067 return (rval); 3068 } 3069 3070 /* 3071 * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise 3072 * mark them all as not changed. 3073 * 3074 * Called with parent PHY locked. 3075 */ 3076 void 3077 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed, 3078 int level) 3079 { 3080 pmcs_phy_t *pptr; 3081 3082 if (level == 0) { 3083 if (changed) { 3084 PHY_CHANGED(pwp, parent); 3085 } else { 3086 parent->changed = 0; 3087 } 3088 if (parent->dtype == EXPANDER && parent->level) { 3089 parent->width = 1; 3090 } 3091 if (parent->children) { 3092 pmcs_set_changed(pwp, parent->children, changed, 3093 level + 1); 3094 } 3095 } else { 3096 pptr = parent; 3097 while (pptr) { 3098 if (changed) { 3099 PHY_CHANGED(pwp, pptr); 3100 } else { 3101 pptr->changed = 0; 3102 } 3103 if (pptr->dtype == EXPANDER && pptr->level) { 3104 pptr->width = 1; 3105 } 3106 if (pptr->children) { 3107 pmcs_set_changed(pwp, pptr->children, changed, 3108 level + 1); 3109 } 3110 pptr = pptr->sibling; 3111 } 3112 } 3113 } 3114 3115 /* 3116 * Take the passed phy mark it and its descendants as dead. 3117 * Fire up reconfiguration to abort commands and bury it. 3118 * 3119 * Called with the parent PHY locked. 3120 */ 3121 void 3122 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level) 3123 { 3124 pmcs_phy_t *pptr = parent; 3125 3126 while (pptr) { 3127 pptr->link_rate = 0; 3128 pptr->abort_sent = 0; 3129 pptr->abort_pending = 1; 3130 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 3131 pptr->need_rl_ext = 0; 3132 3133 if (pptr->dead == 0) { 3134 PHY_CHANGED(pwp, pptr); 3135 RESTART_DISCOVERY(pwp); 3136 } 3137 3138 pptr->dead = 1; 3139 3140 if (pptr->children) { 3141 pmcs_kill_changed(pwp, pptr->children, level + 1); 3142 } 3143 3144 /* 3145 * Only kill siblings at level > 0 3146 */ 3147 if (level == 0) { 3148 return; 3149 } 3150 3151 pptr = pptr->sibling; 3152 } 3153 } 3154 3155 /* 3156 * Go through every PHY and clear any that are dead (unless they're expanders) 3157 */ 3158 static void 3159 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3160 { 3161 pmcs_phy_t *pnext, *phyp; 3162 3163 phyp = pptr; 3164 while (phyp) { 3165 if (IS_ROOT_PHY(phyp)) { 3166 pmcs_lock_phy(phyp); 3167 } 3168 3169 if ((phyp->dtype != EXPANDER) && phyp->dead) { 3170 pmcs_clear_phy(pwp, phyp); 3171 } 3172 3173 if (phyp->children) { 3174 pmcs_clear_phys(pwp, phyp->children); 3175 } 3176 3177 pnext = phyp->sibling; 3178 3179 if (IS_ROOT_PHY(phyp)) { 3180 pmcs_unlock_phy(phyp); 3181 } 3182 3183 phyp = pnext; 3184 } 3185 } 3186 3187 /* 3188 * Clear volatile parts of a phy. Called with PHY locked. 3189 */ 3190 void 3191 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3192 { 3193 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s", 3194 __func__, pptr->path); 3195 ASSERT(mutex_owned(&pptr->phy_lock)); 3196 /* keep sibling */ 3197 /* keep children */ 3198 /* keep parent */ 3199 pptr->device_id = PMCS_INVALID_DEVICE_ID; 3200 /* keep hw_event_ack */ 3201 pptr->ncphy = 0; 3202 /* keep phynum */ 3203 pptr->width = 0; 3204 pptr->ds_recovery_retries = 0; 3205 pptr->ds_prev_good_recoveries = 0; 3206 pptr->last_good_recovery = 0; 3207 pptr->prev_recovery = 0; 3208 3209 /* keep dtype */ 3210 pptr->config_stop = 0; 3211 pptr->spinup_hold = 0; 3212 pptr->atdt = 0; 3213 /* keep portid */ 3214 pptr->link_rate = 0; 3215 pptr->valid_device_id = 0; 3216 pptr->abort_sent = 0; 3217 pptr->abort_pending = 0; 3218 pptr->need_rl_ext = 0; 3219 pptr->subsidiary = 0; 3220 pptr->configured = 0; 3221 pptr->deregister_wait = 0; 3222 pptr->reenumerate = 0; 3223 /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */ 3224 /* XXX: What about directly attached disks? */ 3225 if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING)) 3226 pptr->dead = 1; 3227 pptr->changed = 0; 3228 /* keep SAS address */ 3229 /* keep path */ 3230 /* keep ref_count */ 3231 /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */ 3232 if (!IS_ROOT_PHY(pptr)) { 3233 pptr->last_iport = pptr->iport; 3234 pptr->iport = NULL; 3235 } 3236 /* keep target */ 3237 } 3238 3239 /* 3240 * Allocate softstate for this target if there isn't already one. If there 3241 * is, just redo our internal configuration. If it is actually "new", we'll 3242 * soon get a tran_tgt_init for it. 3243 * 3244 * Called with PHY locked. 3245 */ 3246 static void 3247 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3248 { 3249 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s", 3250 __func__, (void *)pptr, pptr->path); 3251 3252 if (pmcs_configure_phy(pwp, pptr) == B_FALSE) { 3253 /* 3254 * If the config failed, mark the PHY as changed. 3255 */ 3256 PHY_CHANGED(pwp, pptr); 3257 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3258 "%s: pmcs_configure_phy failed for phy 0x%p", __func__, 3259 (void *)pptr); 3260 return; 3261 } 3262 3263 /* Mark PHY as no longer changed */ 3264 pptr->changed = 0; 3265 3266 /* 3267 * If the PHY has no target pointer: 3268 * 3269 * If it's a root PHY, see if another PHY in the iport holds the 3270 * target pointer (primary PHY changed). If so, move it over. 3271 * 3272 * If it's not a root PHY, see if there's a PHY on the dead_phys 3273 * list that matches. 3274 */ 3275 if (pptr->target == NULL) { 3276 if (IS_ROOT_PHY(pptr)) { 3277 pmcs_phy_t *rphy = pwp->root_phys; 3278 3279 while (rphy) { 3280 if (rphy == pptr) { 3281 rphy = rphy->sibling; 3282 continue; 3283 } 3284 3285 mutex_enter(&rphy->phy_lock); 3286 if ((rphy->iport == pptr->iport) && 3287 (rphy->target != NULL)) { 3288 mutex_enter(&rphy->target->statlock); 3289 pptr->target = rphy->target; 3290 rphy->target = NULL; 3291 pptr->target->phy = pptr; 3292 /* The target is now on pptr */ 3293 mutex_exit(&pptr->target->statlock); 3294 mutex_exit(&rphy->phy_lock); 3295 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3296 pptr, pptr->target, 3297 "%s: Moved target from %s to %s", 3298 __func__, rphy->path, pptr->path); 3299 break; 3300 } 3301 mutex_exit(&rphy->phy_lock); 3302 3303 rphy = rphy->sibling; 3304 } 3305 } else { 3306 pmcs_reap_dead_phy(pptr); 3307 } 3308 } 3309 3310 /* 3311 * Only assign the device if there is a target for this PHY with a 3312 * matching SAS address. If an iport is disconnected from one piece 3313 * of storage and connected to another within the iport stabilization 3314 * time, we can get the PHY/target mismatch situation. 3315 * 3316 * Otherwise, it'll get done in tran_tgt_init. 3317 */ 3318 if (pptr->target) { 3319 mutex_enter(&pptr->target->statlock); 3320 if (pmcs_phy_target_match(pptr) == B_FALSE) { 3321 mutex_exit(&pptr->target->statlock); 3322 if (!IS_ROOT_PHY(pptr)) { 3323 pmcs_dec_phy_ref_count(pptr); 3324 } 3325 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3326 "%s: Not assigning existing tgt %p for PHY %p " 3327 "(WWN mismatch)", __func__, (void *)pptr->target, 3328 (void *)pptr); 3329 pptr->target = NULL; 3330 return; 3331 } 3332 3333 if (!pmcs_assign_device(pwp, pptr->target)) { 3334 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 3335 "%s: pmcs_assign_device failed for target 0x%p", 3336 __func__, (void *)pptr->target); 3337 } 3338 mutex_exit(&pptr->target->statlock); 3339 } 3340 } 3341 3342 /* 3343 * Called with PHY lock held. 3344 */ 3345 static boolean_t 3346 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3347 { 3348 char *dtype; 3349 3350 ASSERT(mutex_owned(&pptr->phy_lock)); 3351 3352 /* 3353 * Mark this device as no longer changed. 3354 */ 3355 pptr->changed = 0; 3356 3357 /* 3358 * If we don't have a device handle, get one. 3359 */ 3360 if (pmcs_get_device_handle(pwp, pptr)) { 3361 return (B_FALSE); 3362 } 3363 3364 pptr->configured = 1; 3365 3366 switch (pptr->dtype) { 3367 case SAS: 3368 dtype = "SAS"; 3369 break; 3370 case SATA: 3371 dtype = "SATA"; 3372 break; 3373 case EXPANDER: 3374 dtype = "SMP"; 3375 break; 3376 default: 3377 dtype = "???"; 3378 } 3379 3380 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s " 3381 "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path, 3382 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3383 3384 return (B_TRUE); 3385 } 3386 3387 /* 3388 * Called with PHY locked 3389 */ 3390 static void 3391 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport) 3392 { 3393 pmcs_phy_t *ctmp, *clist = NULL, *cnext; 3394 int result, i, nphy = 0; 3395 boolean_t root_phy = B_FALSE; 3396 3397 ASSERT(iport); 3398 3399 /* 3400 * Step 1- clear our "changed" bit. If we need to retry/restart due 3401 * to resource shortages, we'll set it again. While we're doing 3402 * configuration, other events may set it again as well. If the PHY 3403 * is a root PHY and is currently marked as having changed, reset the 3404 * config_stop timer as well. 3405 */ 3406 if (IS_ROOT_PHY(pptr) && pptr->changed) { 3407 pptr->config_stop = ddi_get_lbolt() + 3408 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3409 } 3410 pptr->changed = 0; 3411 3412 /* 3413 * Step 2- make sure we don't overflow 3414 */ 3415 if (pptr->level == PMCS_MAX_XPND-1) { 3416 pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL, 3417 "%s: SAS expansion tree too deep", __func__); 3418 return; 3419 } 3420 3421 /* 3422 * Step 3- Check if this expander is part of a wide phy that has 3423 * already been configured. 3424 * 3425 * This is known by checking this level for another EXPANDER device 3426 * with the same SAS address and isn't already marked as a subsidiary 3427 * phy and a parent whose SAS address is the same as our SAS address 3428 * (if there are parents). 3429 */ 3430 if (!IS_ROOT_PHY(pptr)) { 3431 /* 3432 * No need to lock the parent here because we're in discovery 3433 * and the only time a PHY's children pointer can change is 3434 * in discovery; either in pmcs_clear_expander (which has 3435 * already been called) or here, down below. Plus, trying to 3436 * grab the parent's lock here can cause deadlock. 3437 */ 3438 ctmp = pptr->parent->children; 3439 } else { 3440 ctmp = pwp->root_phys; 3441 root_phy = B_TRUE; 3442 } 3443 3444 while (ctmp) { 3445 /* 3446 * If we've checked all PHYs up to pptr, we stop. Otherwise, 3447 * we'll be checking for a primary PHY with a higher PHY 3448 * number than pptr, which will never happen. The primary 3449 * PHY on non-root expanders will ALWAYS be the lowest 3450 * numbered PHY. 3451 */ 3452 if (ctmp == pptr) { 3453 break; 3454 } 3455 3456 /* 3457 * If pptr and ctmp are root PHYs, just grab the mutex on 3458 * ctmp. No need to lock the entire tree. If they are not 3459 * root PHYs, there is no need to lock since a non-root PHY's 3460 * SAS address and other characteristics can only change in 3461 * discovery anyway. 3462 */ 3463 if (root_phy) { 3464 mutex_enter(&ctmp->phy_lock); 3465 } 3466 3467 if (ctmp->dtype == EXPANDER && ctmp->width && 3468 memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) { 3469 int widephy = 0; 3470 /* 3471 * If these phys are not root PHYs, compare their SAS 3472 * addresses too. 3473 */ 3474 if (!root_phy) { 3475 if (memcmp(ctmp->parent->sas_address, 3476 pptr->parent->sas_address, 8) == 0) { 3477 widephy = 1; 3478 } 3479 } else { 3480 widephy = 1; 3481 } 3482 if (widephy) { 3483 ctmp->width++; 3484 pptr->subsidiary = 1; 3485 3486 /* 3487 * Update the primary PHY's attached-port-pm 3488 * and target-port-pm information with the info 3489 * from this subsidiary 3490 */ 3491 pmcs_update_phy_pm_props(ctmp, 3492 pptr->att_port_pm_tmp, 3493 pptr->tgt_port_pm_tmp, B_TRUE); 3494 3495 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3496 "%s: PHY %s part of wide PHY %s " 3497 "(now %d wide)", __func__, pptr->path, 3498 ctmp->path, ctmp->width); 3499 if (root_phy) { 3500 mutex_exit(&ctmp->phy_lock); 3501 } 3502 return; 3503 } 3504 } 3505 3506 cnext = ctmp->sibling; 3507 if (root_phy) { 3508 mutex_exit(&ctmp->phy_lock); 3509 } 3510 ctmp = cnext; 3511 } 3512 3513 /* 3514 * Step 4- If we don't have a device handle, get one. Since this 3515 * is the primary PHY, make sure subsidiary is cleared. 3516 */ 3517 pptr->subsidiary = 0; 3518 pptr->iport = iport; 3519 if (pmcs_get_device_handle(pwp, pptr)) { 3520 goto out; 3521 } 3522 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s " 3523 SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path, 3524 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3525 3526 /* 3527 * Step 5- figure out how many phys are in this expander. 3528 */ 3529 nphy = pmcs_expander_get_nphy(pwp, pptr); 3530 if (nphy <= 0) { 3531 if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) { 3532 PHY_CHANGED(pwp, pptr); 3533 RESTART_DISCOVERY(pwp); 3534 } else { 3535 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3536 "%s: Retries exhausted for %s, killing", __func__, 3537 pptr->path); 3538 pptr->config_stop = 0; 3539 pmcs_kill_changed(pwp, pptr, 0); 3540 } 3541 goto out; 3542 } 3543 3544 /* 3545 * Step 6- Allocate a list of phys for this expander and figure out 3546 * what each one is. 3547 */ 3548 for (i = 0; i < nphy; i++) { 3549 ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP); 3550 bzero(ctmp, sizeof (pmcs_phy_t)); 3551 ctmp->device_id = PMCS_INVALID_DEVICE_ID; 3552 ctmp->sibling = clist; 3553 ctmp->pend_dtype = NEW; /* Init pending dtype */ 3554 ctmp->config_stop = ddi_get_lbolt() + 3555 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3556 clist = ctmp; 3557 } 3558 3559 mutex_enter(&pwp->config_lock); 3560 if (pwp->config_changed) { 3561 RESTART_DISCOVERY_LOCKED(pwp); 3562 mutex_exit(&pwp->config_lock); 3563 /* 3564 * Clean up the newly allocated PHYs and return 3565 */ 3566 while (clist) { 3567 ctmp = clist->sibling; 3568 kmem_cache_free(pwp->phy_cache, clist); 3569 clist = ctmp; 3570 } 3571 return; 3572 } 3573 mutex_exit(&pwp->config_lock); 3574 3575 /* 3576 * Step 7- Now fill in the rest of the static portions of the phy. 3577 */ 3578 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3579 ctmp->parent = pptr; 3580 ctmp->pwp = pwp; 3581 ctmp->level = pptr->level+1; 3582 ctmp->portid = pptr->portid; 3583 if (ctmp->tolerates_sas2) { 3584 ASSERT(i < SAS2_PHYNUM_MAX); 3585 ctmp->phynum = i & SAS2_PHYNUM_MASK; 3586 } else { 3587 ASSERT(i < SAS_PHYNUM_MAX); 3588 ctmp->phynum = i & SAS_PHYNUM_MASK; 3589 } 3590 pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path)); 3591 pmcs_lock_phy(ctmp); 3592 } 3593 3594 /* 3595 * Step 8- Discover things about each phy in the expander. 3596 */ 3597 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3598 result = pmcs_expander_content_discover(pwp, pptr, ctmp); 3599 if (result <= 0) { 3600 if (ddi_get_lbolt() < pptr->config_stop) { 3601 PHY_CHANGED(pwp, pptr); 3602 RESTART_DISCOVERY(pwp); 3603 } else { 3604 pptr->config_stop = 0; 3605 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3606 "%s: Retries exhausted for %s, killing", 3607 __func__, pptr->path); 3608 pmcs_kill_changed(pwp, pptr, 0); 3609 } 3610 goto out; 3611 } 3612 3613 /* Set pend_dtype to dtype for 1st time initialization */ 3614 ctmp->pend_dtype = ctmp->dtype; 3615 } 3616 3617 /* 3618 * Step 9: Install the new list on the next level. There should 3619 * typically be no children pointer on this PHY. There is one known 3620 * case where this can happen, though. If a root PHY goes down and 3621 * comes back up before discovery can run, we will fail to remove the 3622 * children from that PHY since it will no longer be marked dead. 3623 * However, in this case, all children should also be marked dead. If 3624 * we see that, take those children and put them on the dead_phys list. 3625 */ 3626 if (pptr->children != NULL) { 3627 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3628 "%s: Expander @ %s still has children: Clean up", 3629 __func__, pptr->path); 3630 pmcs_add_dead_phys(pwp, pptr->children); 3631 } 3632 3633 /* 3634 * Set the new children pointer for this expander 3635 */ 3636 pptr->children = clist; 3637 clist = NULL; 3638 pptr->ncphy = nphy; 3639 pptr->configured = 1; 3640 3641 /* 3642 * We only set width if we're greater than level 0. 3643 */ 3644 if (pptr->level) { 3645 pptr->width = 1; 3646 } 3647 3648 /* 3649 * Now tell the rest of the world about us, as an SMP node. 3650 */ 3651 pptr->iport = iport; 3652 pmcs_new_tport(pwp, pptr); 3653 3654 out: 3655 while (clist) { 3656 ctmp = clist->sibling; 3657 pmcs_unlock_phy(clist); 3658 kmem_cache_free(pwp->phy_cache, clist); 3659 clist = ctmp; 3660 } 3661 } 3662 3663 /* 3664 * 2. Check expanders marked changed (but not dead) to see if they still have 3665 * the same number of phys and the same SAS address. Mark them, their subsidiary 3666 * phys (if wide) and their descendents dead if anything has changed. Check the 3667 * the devices they contain to see if *they* have changed. If they've changed 3668 * from type NOTHING we leave them marked changed to be configured later 3669 * (picking up a new SAS address and link rate if possible). Otherwise, any 3670 * change in type, SAS address or removal of target role will cause us to 3671 * mark them (and their descendents) as dead and cause any pending commands 3672 * and associated devices to be removed. 3673 * 3674 * Called with PHY (pptr) locked. 3675 */ 3676 3677 static void 3678 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3679 { 3680 int nphy, result; 3681 pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL; 3682 boolean_t kill_changed, changed; 3683 3684 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3685 "%s: check %s", __func__, pptr->path); 3686 3687 /* 3688 * Step 1: Mark phy as not changed. We will mark it changed if we need 3689 * to retry. 3690 */ 3691 pptr->changed = 0; 3692 3693 /* 3694 * Reset the config_stop time. Although we're not actually configuring 3695 * anything here, we do want some indication of when to give up trying 3696 * if we can't communicate with the expander. 3697 */ 3698 pptr->config_stop = ddi_get_lbolt() + 3699 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3700 3701 /* 3702 * Step 2: Figure out how many phys are in this expander. If 3703 * pmcs_expander_get_nphy returns 0 we ran out of resources, 3704 * so reschedule and try later. If it returns another error, 3705 * just return. 3706 */ 3707 nphy = pmcs_expander_get_nphy(pwp, pptr); 3708 if (nphy <= 0) { 3709 if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) { 3710 PHY_CHANGED(pwp, pptr); 3711 RESTART_DISCOVERY(pwp); 3712 } else { 3713 pptr->config_stop = 0; 3714 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3715 "%s: Retries exhausted for %s, killing", __func__, 3716 pptr->path); 3717 pmcs_kill_changed(pwp, pptr, 0); 3718 } 3719 return; 3720 } 3721 3722 /* 3723 * Step 3: If the number of phys don't agree, kill the old sub-tree. 3724 */ 3725 if (nphy != pptr->ncphy) { 3726 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3727 "%s: number of contained phys for %s changed from %d to %d", 3728 __func__, pptr->path, pptr->ncphy, nphy); 3729 /* 3730 * Force a rescan of this expander after dead contents 3731 * are cleared and removed. 3732 */ 3733 pmcs_kill_changed(pwp, pptr, 0); 3734 return; 3735 } 3736 3737 /* 3738 * Step 4: if we're at the bottom of the stack, we're done 3739 * (we can't have any levels below us) 3740 */ 3741 if (pptr->level == PMCS_MAX_XPND-1) { 3742 return; 3743 } 3744 3745 /* 3746 * Step 5: Discover things about each phy in this expander. We do 3747 * this by walking the current list of contained phys and doing a 3748 * content discovery for it to a local phy. 3749 */ 3750 ctmp = pptr->children; 3751 ASSERT(ctmp); 3752 if (ctmp == NULL) { 3753 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3754 "%s: No children attached to expander @ %s?", __func__, 3755 pptr->path); 3756 return; 3757 } 3758 3759 while (ctmp) { 3760 /* 3761 * Allocate a local PHY to contain the proposed new contents 3762 * and link it to the rest of the local PHYs so that they 3763 * can all be freed later. 3764 */ 3765 local = pmcs_clone_phy(ctmp); 3766 3767 if (local_list == NULL) { 3768 local_list = local; 3769 local_tail = local; 3770 } else { 3771 local_tail->sibling = local; 3772 local_tail = local; 3773 } 3774 3775 /* 3776 * Need to lock the local PHY since pmcs_expander_content_ 3777 * discovery may call pmcs_clear_phy on it, which expects 3778 * the PHY to be locked. 3779 */ 3780 pmcs_lock_phy(local); 3781 result = pmcs_expander_content_discover(pwp, pptr, local); 3782 pmcs_unlock_phy(local); 3783 if (result <= 0) { 3784 if (ddi_get_lbolt() < pptr->config_stop) { 3785 PHY_CHANGED(pwp, pptr); 3786 RESTART_DISCOVERY(pwp); 3787 } else { 3788 pptr->config_stop = 0; 3789 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3790 "%s: Retries exhausted for %s, killing", 3791 __func__, pptr->path); 3792 pmcs_kill_changed(pwp, pptr, 0); 3793 } 3794 3795 /* 3796 * Release all the local PHYs that we allocated. 3797 */ 3798 pmcs_free_phys(pwp, local_list); 3799 return; 3800 } 3801 3802 ctmp = ctmp->sibling; 3803 } 3804 3805 /* 3806 * Step 6: Compare the local PHY's contents to our current PHY. If 3807 * there are changes, take the appropriate action. 3808 * This is done in two steps (step 5 above, and 6 here) so that if we 3809 * have to bail during this process (e.g. pmcs_expander_content_discover 3810 * fails), we haven't actually changed the state of any of the real 3811 * PHYs. Next time we come through here, we'll be starting over from 3812 * scratch. This keeps us from marking a changed PHY as no longer 3813 * changed, but then having to bail only to come back next time and 3814 * think that the PHY hadn't changed. If this were to happen, we 3815 * would fail to properly configure the device behind this PHY. 3816 */ 3817 local = local_list; 3818 ctmp = pptr->children; 3819 3820 while (ctmp) { 3821 changed = B_FALSE; 3822 kill_changed = B_FALSE; 3823 3824 /* 3825 * We set local to local_list prior to this loop so that we 3826 * can simply walk the local_list while we walk this list. The 3827 * two lists should be completely in sync. 3828 * 3829 * Clear the changed flag here. 3830 */ 3831 ctmp->changed = 0; 3832 3833 if (ctmp->dtype != local->dtype) { 3834 if (ctmp->dtype != NOTHING) { 3835 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3836 "%s: %s type changed from %s to %s " 3837 "(killing)", __func__, ctmp->path, 3838 PHY_TYPE(ctmp), PHY_TYPE(local)); 3839 /* 3840 * Force a rescan of this expander after dead 3841 * contents are cleared and removed. 3842 */ 3843 changed = B_TRUE; 3844 kill_changed = B_TRUE; 3845 } else { 3846 changed = B_TRUE; 3847 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3848 "%s: %s type changed from NOTHING to %s", 3849 __func__, ctmp->path, PHY_TYPE(local)); 3850 /* 3851 * Since this PHY was nothing and is now 3852 * something, reset the config_stop timer. 3853 */ 3854 ctmp->config_stop = ddi_get_lbolt() + 3855 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3856 } 3857 3858 } else if (ctmp->atdt != local->atdt) { 3859 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: " 3860 "%s attached device type changed from %d to %d " 3861 "(killing)", __func__, ctmp->path, ctmp->atdt, 3862 local->atdt); 3863 /* 3864 * Force a rescan of this expander after dead 3865 * contents are cleared and removed. 3866 */ 3867 changed = B_TRUE; 3868 3869 if (local->atdt == 0) { 3870 kill_changed = B_TRUE; 3871 } 3872 } else if (ctmp->link_rate != local->link_rate) { 3873 pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s " 3874 "changed speed from %s to %s", __func__, ctmp->path, 3875 pmcs_get_rate(ctmp->link_rate), 3876 pmcs_get_rate(local->link_rate)); 3877 /* If the speed changed from invalid, force rescan */ 3878 if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) { 3879 changed = B_TRUE; 3880 RESTART_DISCOVERY(pwp); 3881 } else { 3882 /* Just update to the new link rate */ 3883 ctmp->link_rate = local->link_rate; 3884 } 3885 3886 if (!PMCS_VALID_LINK_RATE(local->link_rate)) { 3887 kill_changed = B_TRUE; 3888 } 3889 } else if (memcmp(ctmp->sas_address, local->sas_address, 3890 sizeof (ctmp->sas_address)) != 0) { 3891 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3892 "%s: SAS Addr for %s changed from " SAS_ADDR_FMT 3893 "to " SAS_ADDR_FMT " (kill old tree)", __func__, 3894 ctmp->path, SAS_ADDR_PRT(ctmp->sas_address), 3895 SAS_ADDR_PRT(local->sas_address)); 3896 /* 3897 * Force a rescan of this expander after dead 3898 * contents are cleared and removed. 3899 */ 3900 changed = B_TRUE; 3901 } else { 3902 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3903 "%s: %s looks the same (type %s)", 3904 __func__, ctmp->path, PHY_TYPE(ctmp)); 3905 /* 3906 * If EXPANDER, still mark it changed so we 3907 * re-evaluate its contents. If it's not an expander, 3908 * but it hasn't been configured, also mark it as 3909 * changed so that it will undergo configuration. 3910 */ 3911 if (ctmp->dtype == EXPANDER) { 3912 changed = B_TRUE; 3913 } else if ((ctmp->dtype != NOTHING) && 3914 !ctmp->configured) { 3915 ctmp->changed = 1; 3916 } else { 3917 /* It simply hasn't changed */ 3918 ctmp->changed = 0; 3919 } 3920 } 3921 3922 /* 3923 * If the PHY changed, call pmcs_kill_changed if indicated, 3924 * update its contents to reflect its current state and mark it 3925 * as changed. 3926 */ 3927 if (changed) { 3928 /* 3929 * pmcs_kill_changed will mark the PHY as changed, so 3930 * only do PHY_CHANGED if we did not do kill_changed. 3931 */ 3932 if (kill_changed) { 3933 pmcs_kill_changed(pwp, ctmp, 0); 3934 } else { 3935 /* 3936 * If we're not killing the device, it's not 3937 * dead. Mark the PHY as changed. 3938 */ 3939 PHY_CHANGED(pwp, ctmp); 3940 3941 if (ctmp->dead) { 3942 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3943 ctmp, NULL, "%s: Unmarking PHY %s " 3944 "dead, restarting discovery", 3945 __func__, ctmp->path); 3946 ctmp->dead = 0; 3947 RESTART_DISCOVERY(pwp); 3948 } 3949 } 3950 3951 /* 3952 * If the dtype of this PHY is now NOTHING, mark it as 3953 * unconfigured. Set pend_dtype to what the new dtype 3954 * is. It'll get updated at the end of the discovery 3955 * process. 3956 */ 3957 if (local->dtype == NOTHING) { 3958 bzero(ctmp->sas_address, 3959 sizeof (local->sas_address)); 3960 ctmp->atdt = 0; 3961 ctmp->link_rate = 0; 3962 ctmp->pend_dtype = NOTHING; 3963 ctmp->configured = 0; 3964 } else { 3965 (void) memcpy(ctmp->sas_address, 3966 local->sas_address, 3967 sizeof (local->sas_address)); 3968 ctmp->atdt = local->atdt; 3969 ctmp->link_rate = local->link_rate; 3970 ctmp->pend_dtype = local->dtype; 3971 } 3972 } 3973 3974 local = local->sibling; 3975 ctmp = ctmp->sibling; 3976 } 3977 3978 /* 3979 * If we got to here, that means we were able to see all the PHYs 3980 * and we can now update all of the real PHYs with the information 3981 * we got on the local PHYs. Once that's done, free all the local 3982 * PHYs. 3983 */ 3984 3985 pmcs_free_phys(pwp, local_list); 3986 } 3987 3988 /* 3989 * Top level routine to check expanders. We call pmcs_check_expander for 3990 * each expander. Since we're not doing any configuration right now, it 3991 * doesn't matter if this is breadth-first. 3992 */ 3993 static void 3994 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3995 { 3996 pmcs_phy_t *phyp, *pnext, *pchild; 3997 3998 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3999 "%s: %s", __func__, pptr->path); 4000 4001 /* 4002 * Check each expander at this level 4003 */ 4004 phyp = pptr; 4005 while (phyp) { 4006 pmcs_lock_phy(phyp); 4007 4008 if ((phyp->dtype == EXPANDER) && phyp->changed && 4009 !phyp->dead && !phyp->subsidiary && 4010 phyp->configured) { 4011 pmcs_check_expander(pwp, phyp); 4012 } 4013 4014 pnext = phyp->sibling; 4015 pmcs_unlock_phy(phyp); 4016 phyp = pnext; 4017 } 4018 4019 /* 4020 * Now check the children 4021 */ 4022 phyp = pptr; 4023 while (phyp) { 4024 pmcs_lock_phy(phyp); 4025 pnext = phyp->sibling; 4026 pchild = phyp->children; 4027 pmcs_unlock_phy(phyp); 4028 4029 if (pchild) { 4030 pmcs_check_expanders(pwp, pchild); 4031 } 4032 4033 phyp = pnext; 4034 } 4035 } 4036 4037 /* 4038 * Called with softstate and PHY locked 4039 */ 4040 static void 4041 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level) 4042 { 4043 pmcs_phy_t *ctmp; 4044 4045 ASSERT(mutex_owned(&pwp->lock)); 4046 ASSERT(mutex_owned(&pptr->phy_lock)); 4047 ASSERT(pptr->level < PMCS_MAX_XPND - 1); 4048 4049 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4050 "%s: checking %s", __func__, pptr->path); 4051 4052 ctmp = pptr->children; 4053 while (ctmp) { 4054 /* 4055 * If the expander is dead, mark its children dead 4056 */ 4057 if (pptr->dead) { 4058 ctmp->dead = 1; 4059 } 4060 if (ctmp->dtype == EXPANDER) { 4061 pmcs_clear_expander(pwp, ctmp, level + 1); 4062 } 4063 ctmp = ctmp->sibling; 4064 } 4065 4066 /* 4067 * If this expander is not dead, we're done here. 4068 */ 4069 if (!pptr->dead) { 4070 return; 4071 } 4072 4073 /* 4074 * Now snip out the list of children below us and release them 4075 */ 4076 if (pptr->children) { 4077 pmcs_add_dead_phys(pwp, pptr->children); 4078 } 4079 4080 pptr->children = NULL; 4081 4082 /* 4083 * Clear subsidiary phys as well. Getting the parent's PHY lock 4084 * is only necessary if level == 0 since otherwise the parent is 4085 * already locked. 4086 */ 4087 if (!IS_ROOT_PHY(pptr)) { 4088 if (level == 0) { 4089 mutex_enter(&pptr->parent->phy_lock); 4090 } 4091 ctmp = pptr->parent->children; 4092 if (level == 0) { 4093 mutex_exit(&pptr->parent->phy_lock); 4094 } 4095 } else { 4096 ctmp = pwp->root_phys; 4097 } 4098 4099 while (ctmp) { 4100 if (ctmp == pptr) { 4101 ctmp = ctmp->sibling; 4102 continue; 4103 } 4104 /* 4105 * We only need to lock subsidiary PHYs on the level 0 4106 * expander. Any children of that expander, subsidiaries or 4107 * not, will already be locked. 4108 */ 4109 if (level == 0) { 4110 pmcs_lock_phy(ctmp); 4111 } 4112 if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 || 4113 memcmp(ctmp->sas_address, pptr->sas_address, 4114 sizeof (ctmp->sas_address)) != 0) { 4115 if (level == 0) { 4116 pmcs_unlock_phy(ctmp); 4117 } 4118 ctmp = ctmp->sibling; 4119 continue; 4120 } 4121 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 4122 "%s: subsidiary %s", __func__, ctmp->path); 4123 pmcs_clear_phy(pwp, ctmp); 4124 if (level == 0) { 4125 pmcs_unlock_phy(ctmp); 4126 } 4127 ctmp = ctmp->sibling; 4128 } 4129 4130 pmcs_clear_phy(pwp, pptr); 4131 } 4132 4133 /* 4134 * Called with PHY locked and with scratch acquired. We return 0 if 4135 * we fail to allocate resources or notice that the configuration 4136 * count changed while we were running the command. We return 4137 * less than zero if we had an I/O error or received an unsupported 4138 * configuration. Otherwise we return the number of phys in the 4139 * expander. 4140 */ 4141 #define DFM(m, y) if (m == NULL) m = y 4142 static int 4143 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 4144 { 4145 struct pmcwork *pwrk; 4146 char buf[64]; 4147 const uint_t rdoff = 0x100; /* returned data offset */ 4148 smp_response_frame_t *srf; 4149 smp_report_general_resp_t *srgr; 4150 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival; 4151 int result = 0; 4152 4153 ival = 0x40001100; 4154 4155 again: 4156 if (!pptr->iport || !pptr->valid_device_id) { 4157 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 4158 "%s: Can't reach PHY %s", __func__, pptr->path); 4159 goto out; 4160 } 4161 4162 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 4163 if (pwrk == NULL) { 4164 goto out; 4165 } 4166 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4167 pwrk->arg = pwp->scratch; 4168 pwrk->dtype = pptr->dtype; 4169 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4170 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4171 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4172 if (ptr == NULL) { 4173 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4174 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL, 4175 "%s: GET_IQ_ENTRY failed", __func__); 4176 pmcs_pwork(pwp, pwrk); 4177 goto out; 4178 } 4179 4180 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4181 msg[1] = LE_32(pwrk->htag); 4182 msg[2] = LE_32(pptr->device_id); 4183 msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE); 4184 /* 4185 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors). 4186 */ 4187 msg[4] = BE_32(ival); 4188 msg[5] = 0; 4189 msg[6] = 0; 4190 msg[7] = 0; 4191 msg[8] = 0; 4192 msg[9] = 0; 4193 msg[10] = 0; 4194 msg[11] = 0; 4195 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4196 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4197 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4198 msg[15] = 0; 4199 4200 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4201 4202 /* SMP serialization */ 4203 pmcs_smp_acquire(pptr->iport); 4204 4205 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4206 htag = pwrk->htag; 4207 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4208 4209 pmcs_unlock_phy(pptr); 4210 WAIT_FOR(pwrk, 1000, result); 4211 /* Release SMP lock before reacquiring PHY lock */ 4212 pmcs_smp_release(pptr->iport); 4213 pmcs_lock_phy(pptr); 4214 4215 pmcs_pwork(pwp, pwrk); 4216 4217 mutex_enter(&pwp->config_lock); 4218 if (pwp->config_changed) { 4219 RESTART_DISCOVERY_LOCKED(pwp); 4220 mutex_exit(&pwp->config_lock); 4221 result = 0; 4222 goto out; 4223 } 4224 mutex_exit(&pwp->config_lock); 4225 4226 if (result) { 4227 pmcs_timed_out(pwp, htag, __func__); 4228 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4229 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag); 4230 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 4231 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4232 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4233 __func__, htag); 4234 } else { 4235 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4236 "%s: Issuing SMP ABORT for htag 0x%08x", 4237 __func__, htag); 4238 } 4239 result = 0; 4240 goto out; 4241 } 4242 ptr = (void *)pwp->scratch; 4243 status = LE_32(ptr[2]); 4244 if (status == PMCOUT_STATUS_UNDERFLOW || 4245 status == PMCOUT_STATUS_OVERFLOW) { 4246 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4247 "%s: over/underflow", __func__); 4248 status = PMCOUT_STATUS_OK; 4249 } 4250 srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2]; 4251 srgr = (smp_report_general_resp_t *) 4252 &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1]; 4253 4254 if (status != PMCOUT_STATUS_OK) { 4255 char *nag = NULL; 4256 (void) snprintf(buf, sizeof (buf), 4257 "%s: SMP op failed (0x%x)", __func__, status); 4258 switch (status) { 4259 case PMCOUT_STATUS_IO_PORT_IN_RESET: 4260 DFM(nag, "I/O Port In Reset"); 4261 /* FALLTHROUGH */ 4262 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4263 DFM(nag, "Hardware Timeout"); 4264 /* FALLTHROUGH */ 4265 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4266 DFM(nag, "Internal SMP Resource Failure"); 4267 /* FALLTHROUGH */ 4268 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4269 DFM(nag, "PHY Not Ready"); 4270 /* FALLTHROUGH */ 4271 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4272 DFM(nag, "Connection Rate Not Supported"); 4273 /* FALLTHROUGH */ 4274 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4275 DFM(nag, "Open Retry Timeout"); 4276 /* FALLTHROUGH */ 4277 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4278 DFM(nag, "HW Resource Busy"); 4279 /* FALLTHROUGH */ 4280 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4281 DFM(nag, "Response Connection Error"); 4282 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4283 "%s: expander %s SMP operation failed (%s)", 4284 __func__, pptr->path, nag); 4285 break; 4286 4287 /* 4288 * For the IO_DS_NON_OPERATIONAL case, we need to kick off 4289 * device state recovery and return 0 so that the caller 4290 * doesn't assume this expander is dead for good. 4291 */ 4292 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: { 4293 pmcs_xscsi_t *xp = pptr->target; 4294 4295 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp, 4296 "%s: expander %s device state non-operational", 4297 __func__, pptr->path); 4298 4299 if (xp == NULL) { 4300 /* 4301 * Kick off recovery right now. 4302 */ 4303 SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY); 4304 (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker, 4305 pwp, DDI_NOSLEEP); 4306 } else { 4307 mutex_enter(&xp->statlock); 4308 pmcs_start_dev_state_recovery(xp, pptr); 4309 mutex_exit(&xp->statlock); 4310 } 4311 4312 break; 4313 } 4314 4315 default: 4316 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4317 result = -EIO; 4318 break; 4319 } 4320 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4321 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4322 "%s: bad response frame type 0x%x", 4323 __func__, srf->srf_frame_type); 4324 result = -EINVAL; 4325 } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) { 4326 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4327 "%s: bad response function 0x%x", 4328 __func__, srf->srf_function); 4329 result = -EINVAL; 4330 } else if (srf->srf_result != 0) { 4331 /* 4332 * Check to see if we have a value of 3 for failure and 4333 * whether we were using a SAS2.0 allocation length value 4334 * and retry without it. 4335 */ 4336 if (srf->srf_result == 3 && (ival & 0xff00)) { 4337 ival &= ~0xff00; 4338 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4339 "%s: err 0x%x with SAS2 request- retry with SAS1", 4340 __func__, srf->srf_result); 4341 goto again; 4342 } 4343 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4344 "%s: bad response 0x%x", __func__, srf->srf_result); 4345 result = -EINVAL; 4346 } else if (srgr->srgr_configuring) { 4347 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4348 "%s: expander at phy %s is still configuring", 4349 __func__, pptr->path); 4350 result = 0; 4351 } else { 4352 result = srgr->srgr_number_of_phys; 4353 if (ival & 0xff00) { 4354 pptr->tolerates_sas2 = 1; 4355 } 4356 /* 4357 * Save off the REPORT_GENERAL response 4358 */ 4359 bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t)); 4360 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4361 "%s has %d phys and %s SAS2", pptr->path, result, 4362 pptr->tolerates_sas2? "tolerates" : "does not tolerate"); 4363 } 4364 out: 4365 return (result); 4366 } 4367 4368 /* 4369 * Called with expander locked (and thus, pptr) as well as all PHYs up to 4370 * the root, and scratch acquired. Return 0 if we fail to allocate resources 4371 * or notice that the configuration changed while we were running the command. 4372 * 4373 * We return less than zero if we had an I/O error or received an 4374 * unsupported configuration. 4375 */ 4376 static int 4377 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander, 4378 pmcs_phy_t *pptr) 4379 { 4380 struct pmcwork *pwrk; 4381 char buf[64]; 4382 uint8_t sas_address[8]; 4383 uint8_t att_sas_address[8]; 4384 smp_response_frame_t *srf; 4385 smp_discover_resp_t *sdr; 4386 const uint_t rdoff = 0x100; /* returned data offset */ 4387 uint8_t *roff; 4388 uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag; 4389 int result = 0; 4390 uint8_t ini_support; 4391 uint8_t tgt_support; 4392 4393 if (!expander->iport || !expander->valid_device_id) { 4394 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target, 4395 "%s: Can't reach PHY %s", __func__, expander->path); 4396 goto out; 4397 } 4398 4399 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander); 4400 if (pwrk == NULL) { 4401 goto out; 4402 } 4403 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4404 pwrk->arg = pwp->scratch; 4405 pwrk->dtype = expander->dtype; 4406 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4407 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4408 msg[1] = LE_32(pwrk->htag); 4409 msg[2] = LE_32(expander->device_id); 4410 msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) | 4411 SMP_INDIRECT_RESPONSE); 4412 /* 4413 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors). 4414 */ 4415 if (expander->tolerates_sas2) { 4416 msg[4] = BE_32(0x40101B00); 4417 } else { 4418 msg[4] = BE_32(0x40100000); 4419 } 4420 msg[5] = 0; 4421 msg[6] = BE_32((pptr->phynum << 16)); 4422 msg[7] = 0; 4423 msg[8] = 0; 4424 msg[9] = 0; 4425 msg[10] = 0; 4426 msg[11] = 0; 4427 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4428 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4429 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4430 msg[15] = 0; 4431 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4432 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4433 if (ptr == NULL) { 4434 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4435 goto out; 4436 } 4437 4438 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4439 4440 /* SMP serialization */ 4441 pmcs_smp_acquire(expander->iport); 4442 4443 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4444 htag = pwrk->htag; 4445 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4446 4447 /* 4448 * Drop PHY lock while waiting so other completions aren't potentially 4449 * blocked. 4450 */ 4451 pmcs_unlock_phy(expander); 4452 WAIT_FOR(pwrk, 1000, result); 4453 /* Release SMP lock before reacquiring PHY lock */ 4454 pmcs_smp_release(expander->iport); 4455 pmcs_lock_phy(expander); 4456 4457 pmcs_pwork(pwp, pwrk); 4458 4459 mutex_enter(&pwp->config_lock); 4460 if (pwp->config_changed) { 4461 RESTART_DISCOVERY_LOCKED(pwp); 4462 mutex_exit(&pwp->config_lock); 4463 result = 0; 4464 goto out; 4465 } 4466 mutex_exit(&pwp->config_lock); 4467 4468 if (result) { 4469 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 4470 if (pmcs_abort(pwp, expander, htag, 0, 0)) { 4471 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4472 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4473 __func__, htag); 4474 } else { 4475 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4476 "%s: Issuing SMP ABORT for htag 0x%08x", 4477 __func__, htag); 4478 } 4479 result = -ETIMEDOUT; 4480 goto out; 4481 } 4482 ptr = (void *)pwp->scratch; 4483 /* 4484 * Point roff to the DMA offset for returned data 4485 */ 4486 roff = pwp->scratch; 4487 roff += rdoff; 4488 srf = (smp_response_frame_t *)roff; 4489 sdr = (smp_discover_resp_t *)(roff+4); 4490 status = LE_32(ptr[2]); 4491 if (status == PMCOUT_STATUS_UNDERFLOW || 4492 status == PMCOUT_STATUS_OVERFLOW) { 4493 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4494 "%s: over/underflow", __func__); 4495 status = PMCOUT_STATUS_OK; 4496 } 4497 if (status != PMCOUT_STATUS_OK) { 4498 char *nag = NULL; 4499 (void) snprintf(buf, sizeof (buf), 4500 "%s: SMP op failed (0x%x)", __func__, status); 4501 switch (status) { 4502 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4503 DFM(nag, "Hardware Timeout"); 4504 /* FALLTHROUGH */ 4505 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4506 DFM(nag, "Internal SMP Resource Failure"); 4507 /* FALLTHROUGH */ 4508 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4509 DFM(nag, "PHY Not Ready"); 4510 /* FALLTHROUGH */ 4511 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4512 DFM(nag, "Connection Rate Not Supported"); 4513 /* FALLTHROUGH */ 4514 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4515 DFM(nag, "Open Retry Timeout"); 4516 /* FALLTHROUGH */ 4517 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4518 DFM(nag, "HW Resource Busy"); 4519 /* FALLTHROUGH */ 4520 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4521 DFM(nag, "Response Connection Error"); 4522 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4523 "%s: expander %s SMP operation failed (%s)", 4524 __func__, pptr->path, nag); 4525 break; 4526 default: 4527 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4528 result = -EIO; 4529 break; 4530 } 4531 goto out; 4532 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4533 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4534 "%s: bad response frame type 0x%x", 4535 __func__, srf->srf_frame_type); 4536 result = -EINVAL; 4537 goto out; 4538 } else if (srf->srf_function != SMP_FUNC_DISCOVER) { 4539 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4540 "%s: bad response function 0x%x", 4541 __func__, srf->srf_function); 4542 result = -EINVAL; 4543 goto out; 4544 } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) { 4545 result = pmcs_smp_function_result(pwp, srf); 4546 /* Need not fail if PHY is Vacant */ 4547 if (result != SMP_RES_PHY_VACANT) { 4548 result = -EINVAL; 4549 goto out; 4550 } 4551 } 4552 4553 /* 4554 * Save off the DISCOVER response 4555 */ 4556 bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t)); 4557 4558 ini_support = (sdr->sdr_attached_sata_host | 4559 (sdr->sdr_attached_smp_initiator << 1) | 4560 (sdr->sdr_attached_stp_initiator << 2) | 4561 (sdr->sdr_attached_ssp_initiator << 3)); 4562 4563 tgt_support = (sdr->sdr_attached_sata_device | 4564 (sdr->sdr_attached_smp_target << 1) | 4565 (sdr->sdr_attached_stp_target << 2) | 4566 (sdr->sdr_attached_ssp_target << 3)); 4567 4568 pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address); 4569 pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address); 4570 4571 /* 4572 * Set the routing attribute regardless of the PHY type. 4573 */ 4574 pptr->routing_attr = sdr->sdr_routing_attr; 4575 4576 switch (sdr->sdr_attached_device_type) { 4577 case SAS_IF_DTYPE_ENDPOINT: 4578 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4579 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4580 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4581 pptr->path, 4582 sdr->sdr_attached_device_type, 4583 sdr->sdr_negotiated_logical_link_rate, 4584 ini_support, 4585 tgt_support, 4586 SAS_ADDR_PRT(sas_address), 4587 SAS_ADDR_PRT(att_sas_address), 4588 sdr->sdr_attached_phy_identifier); 4589 4590 if (sdr->sdr_attached_sata_device || 4591 sdr->sdr_attached_stp_target) { 4592 pptr->dtype = SATA; 4593 } else if (sdr->sdr_attached_ssp_target) { 4594 pptr->dtype = SAS; 4595 } else if (tgt_support || ini_support) { 4596 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4597 "%s: %s has tgt support=%x init support=(%x)", 4598 __func__, pptr->path, tgt_support, ini_support); 4599 } 4600 4601 switch (pptr->routing_attr) { 4602 case SMP_ROUTING_SUBTRACTIVE: 4603 case SMP_ROUTING_TABLE: 4604 case SMP_ROUTING_DIRECT: 4605 pptr->routing_method = SMP_ROUTING_DIRECT; 4606 break; 4607 default: 4608 pptr->routing_method = 0xff; /* Invalid method */ 4609 break; 4610 } 4611 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4612 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4613 break; 4614 case SAS_IF_DTYPE_EDGE: 4615 case SAS_IF_DTYPE_FANOUT: 4616 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4617 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4618 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4619 pptr->path, 4620 sdr->sdr_attached_device_type, 4621 sdr->sdr_negotiated_logical_link_rate, 4622 ini_support, 4623 tgt_support, 4624 SAS_ADDR_PRT(sas_address), 4625 SAS_ADDR_PRT(att_sas_address), 4626 sdr->sdr_attached_phy_identifier); 4627 if (sdr->sdr_attached_smp_target) { 4628 /* 4629 * Avoid configuring phys that just point back 4630 * at a parent phy 4631 */ 4632 if (expander->parent && 4633 memcmp(expander->parent->sas_address, 4634 att_sas_address, 4635 sizeof (expander->parent->sas_address)) == 0) { 4636 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL, 4637 "%s: skipping port back to parent " 4638 "expander (%s)", __func__, pptr->path); 4639 pptr->dtype = NOTHING; 4640 break; 4641 } 4642 pptr->dtype = EXPANDER; 4643 4644 } else if (tgt_support || ini_support) { 4645 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4646 "%s has tgt support=%x init support=(%x)", 4647 pptr->path, tgt_support, ini_support); 4648 pptr->dtype = EXPANDER; 4649 } 4650 if (pptr->routing_attr == SMP_ROUTING_DIRECT) { 4651 pptr->routing_method = 0xff; /* Invalid method */ 4652 } else { 4653 pptr->routing_method = pptr->routing_attr; 4654 } 4655 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4656 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4657 break; 4658 default: 4659 pptr->dtype = NOTHING; 4660 break; 4661 } 4662 if (pptr->dtype != NOTHING) { 4663 pmcs_phy_t *ctmp; 4664 4665 /* 4666 * If the attached device is a SATA device and the expander 4667 * is (possibly) a SAS2 compliant expander, check for whether 4668 * there is a NAA=5 WWN field starting at this offset and 4669 * use that for the SAS Address for this device. 4670 */ 4671 if (expander->tolerates_sas2 && pptr->dtype == SATA && 4672 (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) { 4673 (void) memcpy(pptr->sas_address, 4674 &roff[SAS_ATTACHED_NAME_OFFSET], 8); 4675 } else { 4676 (void) memcpy(pptr->sas_address, att_sas_address, 8); 4677 } 4678 pptr->atdt = (sdr->sdr_attached_device_type); 4679 /* 4680 * Now run up from the expander's parent up to the top to 4681 * make sure we only use the least common link_rate. 4682 */ 4683 for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) { 4684 if (ctmp->link_rate < 4685 sdr->sdr_negotiated_logical_link_rate) { 4686 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4687 "%s: derating link rate from %x to %x due " 4688 "to %s being slower", pptr->path, 4689 sdr->sdr_negotiated_logical_link_rate, 4690 ctmp->link_rate, 4691 ctmp->path); 4692 sdr->sdr_negotiated_logical_link_rate = 4693 ctmp->link_rate; 4694 } 4695 } 4696 pptr->link_rate = sdr->sdr_negotiated_logical_link_rate; 4697 pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate; 4698 pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate; 4699 pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate; 4700 pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate; 4701 PHY_CHANGED(pwp, pptr); 4702 } else { 4703 pmcs_clear_phy(pwp, pptr); 4704 } 4705 result = 1; 4706 out: 4707 return (result); 4708 } 4709 4710 /* 4711 * Get a work structure and assign it a tag with type and serial number 4712 * If a structure is returned, it is returned locked. 4713 */ 4714 pmcwork_t * 4715 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp) 4716 { 4717 pmcwork_t *p; 4718 uint16_t snum; 4719 uint32_t off; 4720 4721 mutex_enter(&pwp->wfree_lock); 4722 p = STAILQ_FIRST(&pwp->wf); 4723 if (p == NULL) { 4724 /* 4725 * If we couldn't get a work structure, it's time to bite 4726 * the bullet, grab the pfree_lock and copy over all the 4727 * work structures from the pending free list to the actual 4728 * free list (assuming it's not also empty). 4729 */ 4730 mutex_enter(&pwp->pfree_lock); 4731 if (STAILQ_FIRST(&pwp->pf) == NULL) { 4732 mutex_exit(&pwp->pfree_lock); 4733 mutex_exit(&pwp->wfree_lock); 4734 return (NULL); 4735 } 4736 pwp->wf.stqh_first = pwp->pf.stqh_first; 4737 pwp->wf.stqh_last = pwp->pf.stqh_last; 4738 STAILQ_INIT(&pwp->pf); 4739 mutex_exit(&pwp->pfree_lock); 4740 4741 p = STAILQ_FIRST(&pwp->wf); 4742 ASSERT(p != NULL); 4743 } 4744 STAILQ_REMOVE(&pwp->wf, p, pmcwork, next); 4745 snum = pwp->wserno++; 4746 mutex_exit(&pwp->wfree_lock); 4747 4748 off = p - pwp->work; 4749 4750 mutex_enter(&p->lock); 4751 ASSERT(p->state == PMCS_WORK_STATE_NIL); 4752 ASSERT(p->htag == PMCS_TAG_FREE); 4753 p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK; 4754 p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK); 4755 p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK); 4756 p->start = gethrtime(); 4757 p->state = PMCS_WORK_STATE_READY; 4758 p->ssp_event = 0; 4759 p->dead = 0; 4760 4761 if (phyp) { 4762 p->phy = phyp; 4763 pmcs_inc_phy_ref_count(phyp); 4764 } 4765 4766 return (p); 4767 } 4768 4769 /* 4770 * Called with pwrk lock held. Returned with lock released. 4771 */ 4772 void 4773 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p) 4774 { 4775 ASSERT(p != NULL); 4776 ASSERT(mutex_owned(&p->lock)); 4777 4778 p->last_ptr = p->ptr; 4779 p->last_arg = p->arg; 4780 p->last_phy = p->phy; 4781 p->last_xp = p->xp; 4782 p->last_htag = p->htag; 4783 p->last_state = p->state; 4784 p->finish = gethrtime(); 4785 4786 if (p->phy) { 4787 pmcs_dec_phy_ref_count(p->phy); 4788 } 4789 4790 p->state = PMCS_WORK_STATE_NIL; 4791 p->htag = PMCS_TAG_FREE; 4792 p->xp = NULL; 4793 p->ptr = NULL; 4794 p->arg = NULL; 4795 p->phy = NULL; 4796 p->abt_htag = 0; 4797 p->timer = 0; 4798 mutex_exit(&p->lock); 4799 4800 if (mutex_tryenter(&pwp->wfree_lock) == 0) { 4801 mutex_enter(&pwp->pfree_lock); 4802 STAILQ_INSERT_TAIL(&pwp->pf, p, next); 4803 mutex_exit(&pwp->pfree_lock); 4804 } else { 4805 STAILQ_INSERT_TAIL(&pwp->wf, p, next); 4806 mutex_exit(&pwp->wfree_lock); 4807 } 4808 } 4809 4810 /* 4811 * Find a work structure based upon a tag and make sure that the tag 4812 * serial number matches the work structure we've found. 4813 * If a structure is found, its lock is held upon return. 4814 * If lock_phy is B_TRUE, then lock the phy also when returning the work struct 4815 */ 4816 pmcwork_t * 4817 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag, boolean_t lock_phy) 4818 { 4819 pmcwork_t *p; 4820 uint32_t idx = PMCS_TAG_INDEX(htag); 4821 4822 p = &pwp->work[idx]; 4823 4824 mutex_enter(&p->lock); 4825 if (p->htag == htag) { 4826 if (lock_phy) { 4827 mutex_exit(&p->lock); 4828 mutex_enter(&p->phy->phy_lock); 4829 mutex_enter(&p->lock); 4830 } 4831 return (p); 4832 } 4833 mutex_exit(&p->lock); 4834 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 4835 "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag); 4836 return (NULL); 4837 } 4838 4839 /* 4840 * Issue an abort for a command or for all commands. 4841 * 4842 * Since this can be called from interrupt context, 4843 * we don't wait for completion if wait is not set. 4844 * 4845 * Called with PHY lock held. 4846 */ 4847 int 4848 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds, 4849 int wait) 4850 { 4851 pmcwork_t *pwrk; 4852 pmcs_xscsi_t *tgt; 4853 uint32_t msg[PMCS_MSG_SIZE], *ptr; 4854 int result, abt_type; 4855 uint32_t abt_htag, status; 4856 4857 if (pptr->abort_all_start) { 4858 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for " 4859 "(%s) already in progress.", __func__, pptr->path); 4860 return (EBUSY); 4861 } 4862 4863 switch (pptr->dtype) { 4864 case SAS: 4865 abt_type = PMCIN_SSP_ABORT; 4866 break; 4867 case SATA: 4868 abt_type = PMCIN_SATA_ABORT; 4869 break; 4870 case EXPANDER: 4871 abt_type = PMCIN_SMP_ABORT; 4872 break; 4873 default: 4874 return (0); 4875 } 4876 4877 pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE, 4878 pptr); 4879 4880 if (pwrk == NULL) { 4881 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 4882 return (ENOMEM); 4883 } 4884 4885 pwrk->dtype = pptr->dtype; 4886 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4887 if (wait) { 4888 pwrk->arg = msg; 4889 } 4890 if (pptr->valid_device_id == 0) { 4891 pmcs_pwork(pwp, pwrk); 4892 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4893 "%s: Invalid DeviceID", __func__); 4894 return (ENODEV); 4895 } 4896 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type)); 4897 msg[1] = LE_32(pwrk->htag); 4898 msg[2] = LE_32(pptr->device_id); 4899 if (all_cmds) { 4900 msg[3] = 0; 4901 msg[4] = LE_32(1); 4902 pwrk->ptr = NULL; 4903 pptr->abort_all_start = gethrtime(); 4904 } else { 4905 msg[3] = LE_32(tag); 4906 msg[4] = 0; 4907 pwrk->abt_htag = tag; 4908 } 4909 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4910 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4911 if (ptr == NULL) { 4912 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4913 pmcs_pwork(pwp, pwrk); 4914 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 4915 return (ENOMEM); 4916 } 4917 4918 COPY_MESSAGE(ptr, msg, 5); 4919 if (all_cmds) { 4920 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4921 "%s: aborting all commands for %s device %s. (htag=0x%x)", 4922 __func__, pmcs_get_typename(pptr->dtype), pptr->path, 4923 msg[1]); 4924 } else { 4925 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4926 "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)", 4927 __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path, 4928 msg[1]); 4929 } 4930 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4931 4932 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4933 if (!wait) { 4934 mutex_exit(&pwrk->lock); 4935 return (0); 4936 } 4937 4938 abt_htag = pwrk->htag; 4939 pmcs_unlock_phy(pwrk->phy); 4940 WAIT_FOR(pwrk, 1000, result); 4941 pmcs_lock_phy(pwrk->phy); 4942 4943 tgt = pwrk->xp; 4944 pmcs_pwork(pwp, pwrk); 4945 4946 if (tgt != NULL) { 4947 mutex_enter(&tgt->aqlock); 4948 if (!STAILQ_EMPTY(&tgt->aq)) { 4949 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4950 "%s: Abort complete (result=0x%x), but " 4951 "aq not empty (tgt 0x%p), waiting", 4952 __func__, result, (void *)tgt); 4953 cv_wait(&tgt->abort_cv, &tgt->aqlock); 4954 } 4955 mutex_exit(&tgt->aqlock); 4956 } 4957 4958 if (all_cmds) { 4959 pptr->abort_all_start = 0; 4960 cv_signal(&pptr->abort_all_cv); 4961 } 4962 4963 if (result) { 4964 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4965 "%s: Abort (htag 0x%08x) request timed out", 4966 __func__, abt_htag); 4967 if (tgt != NULL) { 4968 mutex_enter(&tgt->statlock); 4969 if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) && 4970 (tgt->dev_state != 4971 PMCS_DEVICE_STATE_NON_OPERATIONAL)) { 4972 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4973 "%s: Trying DS error recovery for tgt 0x%p", 4974 __func__, (void *)tgt); 4975 (void) pmcs_send_err_recovery_cmd(pwp, 4976 PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt); 4977 } 4978 mutex_exit(&tgt->statlock); 4979 } 4980 return (ETIMEDOUT); 4981 } 4982 4983 status = LE_32(msg[2]); 4984 if (status != PMCOUT_STATUS_OK) { 4985 /* 4986 * The only non-success status are IO_NOT_VALID & 4987 * IO_ABORT_IN_PROGRESS. 4988 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's 4989 * status is of concern and this duplicate cmd status can 4990 * be ignored. 4991 * If IO_NOT_VALID, that's not an error per-se. 4992 * For abort of single I/O complete the command anyway. 4993 * If, however, we were aborting all, that is a problem 4994 * as IO_NOT_VALID really means that the IO or device is 4995 * not there. So, discovery process will take of the cleanup. 4996 */ 4997 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4998 "%s: abort result 0x%x", __func__, LE_32(msg[2])); 4999 if (all_cmds) { 5000 PHY_CHANGED(pwp, pptr); 5001 RESTART_DISCOVERY(pwp); 5002 } else { 5003 return (EINVAL); 5004 } 5005 5006 return (0); 5007 } 5008 5009 if (tgt != NULL) { 5010 mutex_enter(&tgt->statlock); 5011 if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) { 5012 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5013 "%s: Restoring OPERATIONAL dev_state for tgt 0x%p", 5014 __func__, (void *)tgt); 5015 (void) pmcs_send_err_recovery_cmd(pwp, 5016 PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt); 5017 } 5018 mutex_exit(&tgt->statlock); 5019 } 5020 5021 return (0); 5022 } 5023 5024 /* 5025 * Issue a task management function to an SSP device. 5026 * 5027 * Called with PHY lock held. 5028 * statlock CANNOT be held upon entry. 5029 */ 5030 int 5031 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag, 5032 uint64_t lun, uint32_t *response) 5033 { 5034 int result, ds; 5035 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 5036 sas_ssp_rsp_iu_t *rptr = (void *)local; 5037 static const uint8_t ssp_rsp_evec[] = { 5038 0x58, 0x61, 0x56, 0x72, 0x00 5039 }; 5040 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 5041 struct pmcwork *pwrk; 5042 pmcs_xscsi_t *xp; 5043 5044 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5045 if (pwrk == NULL) { 5046 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 5047 return (ENOMEM); 5048 } 5049 /* 5050 * NB: We use the PMCS_OQ_GENERAL outbound queue 5051 * NB: so as to not get entangled in normal I/O 5052 * NB: processing. 5053 */ 5054 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5055 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5056 PMCIN_SSP_INI_TM_START)); 5057 msg[1] = LE_32(pwrk->htag); 5058 msg[2] = LE_32(pptr->device_id); 5059 if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) { 5060 msg[3] = LE_32(tag); 5061 } else { 5062 msg[3] = 0; 5063 } 5064 msg[4] = LE_32(tmf); 5065 msg[5] = BE_32((uint32_t)lun); 5066 msg[6] = BE_32((uint32_t)(lun >> 32)); 5067 msg[7] = LE_32(PMCIN_MESSAGE_REPORT); 5068 5069 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5070 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5071 if (ptr == NULL) { 5072 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5073 pmcs_pwork(pwp, pwrk); 5074 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 5075 return (ENOMEM); 5076 } 5077 COPY_MESSAGE(ptr, msg, 7); 5078 pwrk->arg = msg; 5079 pwrk->dtype = pptr->dtype; 5080 xp = pptr->target; 5081 pwrk->xp = xp; 5082 5083 if (xp != NULL) { 5084 mutex_enter(&xp->statlock); 5085 if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) { 5086 mutex_exit(&xp->statlock); 5087 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5088 pmcs_pwork(pwp, pwrk); 5089 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not " 5090 "sending '%s' because DS is '%s'", __func__, 5091 pmcs_tmf2str(tmf), pmcs_status_str 5092 (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL)); 5093 return (EIO); 5094 } 5095 mutex_exit(&xp->statlock); 5096 } 5097 5098 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5099 "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__, 5100 pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag); 5101 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5102 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5103 5104 pmcs_unlock_phy(pptr); 5105 /* 5106 * This is a command sent to the target device, so it can take 5107 * significant amount of time to complete when path & device is busy. 5108 * Set a timeout to 20 seconds 5109 */ 5110 WAIT_FOR(pwrk, 20000, result); 5111 pmcs_lock_phy(pptr); 5112 pmcs_pwork(pwp, pwrk); 5113 5114 if (result) { 5115 if (xp == NULL) { 5116 return (ETIMEDOUT); 5117 } 5118 5119 mutex_enter(&xp->statlock); 5120 pmcs_start_dev_state_recovery(xp, pptr); 5121 mutex_exit(&xp->statlock); 5122 return (ETIMEDOUT); 5123 } 5124 5125 status = LE_32(msg[2]); 5126 if (status != PMCOUT_STATUS_OK) { 5127 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5128 "%s: status %s for TMF %s action to %s, lun %llu", 5129 __func__, pmcs_status_str(status), pmcs_tmf2str(tmf), 5130 pptr->path, (unsigned long long) lun); 5131 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5132 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5133 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5134 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5135 } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) { 5136 /* 5137 * If the status is IN_RECOVERY, it's an indication 5138 * that it's now time for us to request to have the 5139 * device state set to OPERATIONAL since we're the ones 5140 * that requested recovery to begin with. 5141 */ 5142 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5143 } else { 5144 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5145 } 5146 if (xp != NULL) { 5147 mutex_enter(&xp->statlock); 5148 if (xp->dev_state != ds) { 5149 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5150 "%s: Sending err recovery cmd" 5151 " for tgt 0x%p (status = %s)", 5152 __func__, (void *)xp, 5153 pmcs_status_str(status)); 5154 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5155 pptr, xp); 5156 } 5157 mutex_exit(&xp->statlock); 5158 } 5159 return (EIO); 5160 } else { 5161 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5162 if (xp != NULL) { 5163 mutex_enter(&xp->statlock); 5164 if (xp->dev_state != ds) { 5165 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5166 "%s: Sending err recovery cmd" 5167 " for tgt 0x%p (status = %s)", 5168 __func__, (void *)xp, 5169 pmcs_status_str(status)); 5170 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5171 pptr, xp); 5172 } 5173 mutex_exit(&xp->statlock); 5174 } 5175 } 5176 if (LE_32(msg[3]) == 0) { 5177 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5178 "TMF completed with no response"); 5179 return (EIO); 5180 } 5181 pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec); 5182 xd = (uint8_t *)(&msg[5]); 5183 xd += SAS_RSP_HDR_SIZE; 5184 if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) { 5185 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5186 "%s: TMF response not RESPONSE DATA (0x%x)", 5187 __func__, rptr->datapres); 5188 return (EIO); 5189 } 5190 if (rptr->response_data_length != 4) { 5191 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 5192 "Bad SAS RESPONSE DATA LENGTH", msg); 5193 return (EIO); 5194 } 5195 (void) memcpy(&status, xd, sizeof (uint32_t)); 5196 status = BE_32(status); 5197 if (response != NULL) 5198 *response = status; 5199 /* 5200 * The status is actually in the low-order byte. The upper three 5201 * bytes contain additional information for the TMFs that support them. 5202 * However, at this time we do not issue any of those. In the other 5203 * cases, the upper three bytes are supposed to be 0, but it appears 5204 * they aren't always. Just mask them off. 5205 */ 5206 switch (status & 0xff) { 5207 case SAS_RSP_TMF_COMPLETE: 5208 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5209 "%s: TMF complete", __func__); 5210 result = 0; 5211 break; 5212 case SAS_RSP_TMF_SUCCEEDED: 5213 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5214 "%s: TMF succeeded", __func__); 5215 result = 0; 5216 break; 5217 case SAS_RSP_INVALID_FRAME: 5218 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5219 "%s: TMF returned INVALID FRAME", __func__); 5220 result = EIO; 5221 break; 5222 case SAS_RSP_TMF_NOT_SUPPORTED: 5223 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5224 "%s: TMF returned TMF NOT SUPPORTED", __func__); 5225 result = EIO; 5226 break; 5227 case SAS_RSP_TMF_FAILED: 5228 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5229 "%s: TMF returned TMF FAILED", __func__); 5230 result = EIO; 5231 break; 5232 case SAS_RSP_TMF_INCORRECT_LUN: 5233 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5234 "%s: TMF returned INCORRECT LUN", __func__); 5235 result = EIO; 5236 break; 5237 case SAS_RSP_OVERLAPPED_OIPTTA: 5238 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5239 "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG " 5240 "ATTEMPTED", __func__); 5241 result = EIO; 5242 break; 5243 default: 5244 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5245 "%s: TMF returned unknown code 0x%x", __func__, status); 5246 result = EIO; 5247 break; 5248 } 5249 return (result); 5250 } 5251 5252 /* 5253 * Called with PHY lock held and scratch acquired 5254 */ 5255 int 5256 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 5257 { 5258 const char *utag_fail_fmt = "%s: untagged NCQ command failure"; 5259 const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)"; 5260 uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status; 5261 uint8_t *fp = pwp->scratch, ds; 5262 fis_t fis; 5263 pmcwork_t *pwrk; 5264 pmcs_xscsi_t *tgt; 5265 5266 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5267 if (pwrk == NULL) { 5268 return (ENOMEM); 5269 } 5270 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5271 msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, 5272 PMCIN_SATA_HOST_IO_START)); 5273 msg[1] = LE_32(pwrk->htag); 5274 msg[2] = LE_32(pptr->device_id); 5275 msg[3] = LE_32(512); 5276 msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI); 5277 msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV); 5278 msg[6] = LE_32(0x10); 5279 msg[8] = LE_32(1); 5280 msg[9] = 0; 5281 msg[10] = 0; 5282 msg[11] = 0; 5283 msg[12] = LE_32(DWORD0(pwp->scratch_dma)); 5284 msg[13] = LE_32(DWORD1(pwp->scratch_dma)); 5285 msg[14] = LE_32(512); 5286 msg[15] = 0; 5287 5288 pwrk->arg = msg; 5289 pwrk->dtype = pptr->dtype; 5290 5291 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5292 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5293 if (ptr == NULL) { 5294 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5295 pmcs_pwork(pwp, pwrk); 5296 return (ENOMEM); 5297 } 5298 COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE); 5299 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5300 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5301 5302 pmcs_unlock_phy(pptr); 5303 WAIT_FOR(pwrk, 250, result); 5304 pmcs_lock_phy(pptr); 5305 pmcs_pwork(pwp, pwrk); 5306 5307 tgt = pptr->target; 5308 if (result) { 5309 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__); 5310 return (EIO); 5311 } 5312 status = LE_32(msg[2]); 5313 if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) { 5314 if (tgt == NULL) { 5315 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5316 "%s: cannot find target for phy 0x%p for " 5317 "dev state recovery", __func__, (void *)pptr); 5318 return (EIO); 5319 } 5320 5321 mutex_enter(&tgt->statlock); 5322 5323 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg); 5324 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5325 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5326 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5327 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5328 } else { 5329 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5330 } 5331 if (tgt->dev_state != ds) { 5332 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying " 5333 "SATA DS Recovery for tgt(0x%p) for status(%s)", 5334 __func__, (void *)tgt, pmcs_status_str(status)); 5335 (void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt); 5336 } 5337 5338 mutex_exit(&tgt->statlock); 5339 return (EIO); 5340 } 5341 fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H; 5342 fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5]; 5343 fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9]; 5344 fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13]; 5345 fis[4] = 0; 5346 if (fp[0] & 0x80) { 5347 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5348 utag_fail_fmt, __func__); 5349 } else { 5350 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5351 tag_fail_fmt, __func__, fp[0] & 0x1f); 5352 } 5353 pmcs_fis_dump(pwp, fis); 5354 pptr->need_rl_ext = 0; 5355 return (0); 5356 } 5357 5358 /* 5359 * Transform a structure from CPU to Device endian format, or 5360 * vice versa, based upon a transformation vector. 5361 * 5362 * A transformation vector is an array of bytes, each byte 5363 * of which is defined thusly: 5364 * 5365 * bit 7: from CPU to desired endian, otherwise from desired endian 5366 * to CPU format 5367 * bit 6: Big Endian, else Little Endian 5368 * bits 5-4: 5369 * 00 Undefined 5370 * 01 One Byte quantities 5371 * 02 Two Byte quantities 5372 * 03 Four Byte quantities 5373 * 5374 * bits 3-0: 5375 * 00 Undefined 5376 * Number of quantities to transform 5377 * 5378 * The vector is terminated by a 0 value. 5379 */ 5380 5381 void 5382 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in, 5383 const uint8_t *xfvec) 5384 { 5385 uint8_t c, *out = orig_out, *in = orig_in; 5386 5387 if (xfvec == NULL) { 5388 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5389 "%s: null xfvec", __func__); 5390 return; 5391 } 5392 if (out == NULL) { 5393 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5394 "%s: null out", __func__); 5395 return; 5396 } 5397 if (in == NULL) { 5398 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5399 "%s: null in", __func__); 5400 return; 5401 } 5402 while ((c = *xfvec++) != 0) { 5403 int nbyt = (c & 0xf); 5404 int size = (c >> 4) & 0x3; 5405 int bige = (c >> 4) & 0x4; 5406 5407 switch (size) { 5408 case 1: 5409 { 5410 while (nbyt-- > 0) { 5411 *out++ = *in++; 5412 } 5413 break; 5414 } 5415 case 2: 5416 { 5417 uint16_t tmp; 5418 while (nbyt-- > 0) { 5419 (void) memcpy(&tmp, in, sizeof (uint16_t)); 5420 if (bige) { 5421 tmp = BE_16(tmp); 5422 } else { 5423 tmp = LE_16(tmp); 5424 } 5425 (void) memcpy(out, &tmp, sizeof (uint16_t)); 5426 out += sizeof (uint16_t); 5427 in += sizeof (uint16_t); 5428 } 5429 break; 5430 } 5431 case 3: 5432 { 5433 uint32_t tmp; 5434 while (nbyt-- > 0) { 5435 (void) memcpy(&tmp, in, sizeof (uint32_t)); 5436 if (bige) { 5437 tmp = BE_32(tmp); 5438 } else { 5439 tmp = LE_32(tmp); 5440 } 5441 (void) memcpy(out, &tmp, sizeof (uint32_t)); 5442 out += sizeof (uint32_t); 5443 in += sizeof (uint32_t); 5444 } 5445 break; 5446 } 5447 default: 5448 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5449 "%s: bad size", __func__); 5450 return; 5451 } 5452 } 5453 } 5454 5455 const char * 5456 pmcs_get_rate(unsigned int linkrt) 5457 { 5458 const char *rate; 5459 switch (linkrt) { 5460 case SAS_LINK_RATE_1_5GBIT: 5461 rate = "1.5"; 5462 break; 5463 case SAS_LINK_RATE_3GBIT: 5464 rate = "3.0"; 5465 break; 5466 case SAS_LINK_RATE_6GBIT: 5467 rate = "6.0"; 5468 break; 5469 default: 5470 rate = "???"; 5471 break; 5472 } 5473 return (rate); 5474 } 5475 5476 const char * 5477 pmcs_get_typename(pmcs_dtype_t type) 5478 { 5479 switch (type) { 5480 case NOTHING: 5481 return ("NIL"); 5482 case SATA: 5483 return ("SATA"); 5484 case SAS: 5485 return ("SSP"); 5486 case EXPANDER: 5487 return ("EXPANDER"); 5488 } 5489 return ("????"); 5490 } 5491 5492 const char * 5493 pmcs_tmf2str(int tmf) 5494 { 5495 switch (tmf) { 5496 case SAS_ABORT_TASK: 5497 return ("Abort Task"); 5498 case SAS_ABORT_TASK_SET: 5499 return ("Abort Task Set"); 5500 case SAS_CLEAR_TASK_SET: 5501 return ("Clear Task Set"); 5502 case SAS_LOGICAL_UNIT_RESET: 5503 return ("Logical Unit Reset"); 5504 case SAS_I_T_NEXUS_RESET: 5505 return ("I_T Nexus Reset"); 5506 case SAS_CLEAR_ACA: 5507 return ("Clear ACA"); 5508 case SAS_QUERY_TASK: 5509 return ("Query Task"); 5510 case SAS_QUERY_TASK_SET: 5511 return ("Query Task Set"); 5512 case SAS_QUERY_UNIT_ATTENTION: 5513 return ("Query Unit Attention"); 5514 default: 5515 return ("Unknown"); 5516 } 5517 } 5518 5519 const char * 5520 pmcs_status_str(uint32_t status) 5521 { 5522 switch (status) { 5523 case PMCOUT_STATUS_OK: 5524 return ("OK"); 5525 case PMCOUT_STATUS_ABORTED: 5526 return ("ABORTED"); 5527 case PMCOUT_STATUS_OVERFLOW: 5528 return ("OVERFLOW"); 5529 case PMCOUT_STATUS_UNDERFLOW: 5530 return ("UNDERFLOW"); 5531 case PMCOUT_STATUS_FAILED: 5532 return ("FAILED"); 5533 case PMCOUT_STATUS_ABORT_RESET: 5534 return ("ABORT_RESET"); 5535 case PMCOUT_STATUS_IO_NOT_VALID: 5536 return ("IO_NOT_VALID"); 5537 case PMCOUT_STATUS_NO_DEVICE: 5538 return ("NO_DEVICE"); 5539 case PMCOUT_STATUS_ILLEGAL_PARAMETER: 5540 return ("ILLEGAL_PARAMETER"); 5541 case PMCOUT_STATUS_LINK_FAILURE: 5542 return ("LINK_FAILURE"); 5543 case PMCOUT_STATUS_PROG_ERROR: 5544 return ("PROG_ERROR"); 5545 case PMCOUT_STATUS_EDC_IN_ERROR: 5546 return ("EDC_IN_ERROR"); 5547 case PMCOUT_STATUS_EDC_OUT_ERROR: 5548 return ("EDC_OUT_ERROR"); 5549 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 5550 return ("ERROR_HW_TIMEOUT"); 5551 case PMCOUT_STATUS_XFER_ERR_BREAK: 5552 return ("XFER_ERR_BREAK"); 5553 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 5554 return ("XFER_ERR_PHY_NOT_READY"); 5555 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 5556 return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED"); 5557 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 5558 return ("OPEN_CNX_ERROR_ZONE_VIOLATION"); 5559 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 5560 return ("OPEN_CNX_ERROR_BREAK"); 5561 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 5562 return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS"); 5563 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 5564 return ("OPENCNX_ERROR_BAD_DESTINATION"); 5565 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 5566 return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED"); 5567 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 5568 return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY"); 5569 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 5570 return ("OPEN_CNX_ERROR_WRONG_DESTINATION"); 5571 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 5572 return ("OPEN_CNX_ERROR_UNKNOWN_ERROR"); 5573 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 5574 return ("IO_XFER_ERROR_NAK_RECEIVED"); 5575 case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT: 5576 return ("XFER_ERROR_ACK_NAK_TIMEOUT"); 5577 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 5578 return ("XFER_ERROR_PEER_ABORTED"); 5579 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 5580 return ("XFER_ERROR_RX_FRAME"); 5581 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 5582 return ("IO_XFER_ERROR_DMA"); 5583 case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT: 5584 return ("XFER_ERROR_CREDIT_TIMEOUT"); 5585 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 5586 return ("XFER_ERROR_SATA_LINK_TIMEOUT"); 5587 case PMCOUT_STATUS_XFER_ERROR_SATA: 5588 return ("XFER_ERROR_SATA"); 5589 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 5590 return ("XFER_ERROR_REJECTED_NCQ_MODE"); 5591 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 5592 return ("XFER_ERROR_ABORTED_DUE_TO_SRST"); 5593 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 5594 return ("XFER_ERROR_ABORTED_NCQ_MODE"); 5595 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 5596 return ("IO_XFER_OPEN_RETRY_TIMEOUT"); 5597 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 5598 return ("SMP_RESP_CONNECTION_ERROR"); 5599 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 5600 return ("XFER_ERROR_UNEXPECTED_PHASE"); 5601 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 5602 return ("XFER_ERROR_RDY_OVERRUN"); 5603 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 5604 return ("XFER_ERROR_RDY_NOT_EXPECTED"); 5605 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 5606 return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT"); 5607 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 5608 return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK"); 5609 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 5610 return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK"); 5611 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 5612 return ("XFER_ERROR_OFFSET_MISMATCH"); 5613 case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN: 5614 return ("XFER_ERROR_ZERO_DATA_LEN"); 5615 case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED: 5616 return ("XFER_CMD_FRAME_ISSUED"); 5617 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 5618 return ("ERROR_INTERNAL_SMP_RESOURCE"); 5619 case PMCOUT_STATUS_IO_PORT_IN_RESET: 5620 return ("IO_PORT_IN_RESET"); 5621 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 5622 return ("DEVICE STATE NON-OPERATIONAL"); 5623 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 5624 return ("DEVICE STATE IN RECOVERY"); 5625 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 5626 return ("OPEN CNX ERR HW RESOURCE BUSY"); 5627 default: 5628 return (NULL); 5629 } 5630 } 5631 5632 uint64_t 5633 pmcs_barray2wwn(uint8_t ba[8]) 5634 { 5635 uint64_t result = 0; 5636 int i; 5637 5638 for (i = 0; i < 8; i++) { 5639 result <<= 8; 5640 result |= ba[i]; 5641 } 5642 return (result); 5643 } 5644 5645 void 5646 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8]) 5647 { 5648 int i; 5649 for (i = 0; i < 8; i++) { 5650 ba[7 - i] = wwn & 0xff; 5651 wwn >>= 8; 5652 } 5653 } 5654 5655 void 5656 pmcs_report_fwversion(pmcs_hw_t *pwp) 5657 { 5658 const char *fwsupport; 5659 switch (PMCS_FW_TYPE(pwp)) { 5660 case PMCS_FW_TYPE_RELEASED: 5661 fwsupport = "Released"; 5662 break; 5663 case PMCS_FW_TYPE_DEVELOPMENT: 5664 fwsupport = "Development"; 5665 break; 5666 case PMCS_FW_TYPE_ALPHA: 5667 fwsupport = "Alpha"; 5668 break; 5669 case PMCS_FW_TYPE_BETA: 5670 fwsupport = "Beta"; 5671 break; 5672 default: 5673 fwsupport = "Special"; 5674 break; 5675 } 5676 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5677 "Chip Revision: %c; F/W Revision %x.%x.%x %s (ILA rev %08x)", 5678 'A' + pwp->chiprev, PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), 5679 PMCS_FW_MICRO(pwp), fwsupport, pwp->ila_ver); 5680 } 5681 5682 void 5683 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen) 5684 { 5685 if (pptr->parent) { 5686 pmcs_phy_name(pwp, pptr->parent, obuf, olen); 5687 (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum); 5688 } else { 5689 (void) snprintf(obuf, olen, "pp%02x", pptr->phynum); 5690 } 5691 } 5692 5693 /* 5694 * This function is called as a sanity check to ensure that a newly registered 5695 * PHY doesn't have a device_id that exists with another registered PHY. 5696 */ 5697 static boolean_t 5698 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id) 5699 { 5700 pmcs_phy_t *pptr, *pchild; 5701 boolean_t rval; 5702 5703 pptr = parent; 5704 5705 while (pptr) { 5706 if (pptr->valid_device_id && (pptr != phyp) && 5707 (pptr->device_id == device_id)) { 5708 /* 5709 * This can still be OK if both of these PHYs actually 5710 * represent the same device (e.g. expander). It could 5711 * be a case of a new "primary" PHY. If the SAS address 5712 * is the same and they have the same parent, we'll 5713 * accept this if the PHY to be registered is the 5714 * primary. 5715 */ 5716 if ((phyp->parent == pptr->parent) && 5717 (memcmp(phyp->sas_address, 5718 pptr->sas_address, 8) == 0) && (phyp->width > 1)) { 5719 /* 5720 * Move children over to the new primary and 5721 * update both PHYs 5722 */ 5723 pmcs_lock_phy(pptr); 5724 phyp->children = pptr->children; 5725 pchild = phyp->children; 5726 while (pchild) { 5727 pchild->parent = phyp; 5728 pchild = pchild->sibling; 5729 } 5730 phyp->subsidiary = 0; 5731 phyp->ncphy = pptr->ncphy; 5732 /* 5733 * device_id, valid_device_id, and configured 5734 * will be set by the caller 5735 */ 5736 pptr->children = NULL; 5737 pptr->subsidiary = 1; 5738 pptr->ncphy = 0; 5739 pmcs_unlock_phy(pptr); 5740 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5741 "%s: Moving device_id %d from PHY %s to %s", 5742 __func__, device_id, pptr->path, 5743 phyp->path); 5744 return (B_TRUE); 5745 } 5746 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5747 "%s: phy %s already exists as %s with " 5748 "device id 0x%x", __func__, phyp->path, 5749 pptr->path, device_id); 5750 return (B_FALSE); 5751 } 5752 5753 if (pptr->children) { 5754 rval = pmcs_validate_devid(pptr->children, phyp, 5755 device_id); 5756 if (rval == B_FALSE) { 5757 return (rval); 5758 } 5759 } 5760 5761 pptr = pptr->sibling; 5762 } 5763 5764 /* This PHY and device_id are valid */ 5765 return (B_TRUE); 5766 } 5767 5768 /* 5769 * If the PHY is found, it is returned locked 5770 */ 5771 static pmcs_phy_t * 5772 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn) 5773 { 5774 pmcs_phy_t *matched_phy, *cphyp, *nphyp; 5775 5776 ASSERT(!mutex_owned(&phyp->phy_lock)); 5777 5778 while (phyp) { 5779 pmcs_lock_phy(phyp); 5780 5781 if (phyp->valid_device_id) { 5782 if (memcmp(phyp->sas_address, wwn, 8) == 0) { 5783 return (phyp); 5784 } 5785 } 5786 5787 if (phyp->children) { 5788 cphyp = phyp->children; 5789 pmcs_unlock_phy(phyp); 5790 matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn); 5791 if (matched_phy) { 5792 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5793 return (matched_phy); 5794 } 5795 pmcs_lock_phy(phyp); 5796 } 5797 5798 /* 5799 * Only iterate through non-root PHYs 5800 */ 5801 if (IS_ROOT_PHY(phyp)) { 5802 pmcs_unlock_phy(phyp); 5803 phyp = NULL; 5804 } else { 5805 nphyp = phyp->sibling; 5806 pmcs_unlock_phy(phyp); 5807 phyp = nphyp; 5808 } 5809 } 5810 5811 return (NULL); 5812 } 5813 5814 pmcs_phy_t * 5815 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 5816 { 5817 uint8_t ebstr[8]; 5818 pmcs_phy_t *pptr, *matched_phy; 5819 5820 pmcs_wwn2barray(wwn, ebstr); 5821 5822 pptr = pwp->root_phys; 5823 while (pptr) { 5824 matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr); 5825 if (matched_phy) { 5826 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5827 return (matched_phy); 5828 } 5829 5830 pptr = pptr->sibling; 5831 } 5832 5833 return (NULL); 5834 } 5835 5836 5837 /* 5838 * pmcs_find_phy_by_sas_address 5839 * 5840 * Find a PHY that both matches "sas_addr" and is on "iport". 5841 * If a matching PHY is found, it is returned locked. 5842 */ 5843 pmcs_phy_t * 5844 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport, 5845 pmcs_phy_t *root, char *sas_addr) 5846 { 5847 int ua_form = 1; 5848 uint64_t wwn; 5849 char addr[PMCS_MAX_UA_SIZE]; 5850 pmcs_phy_t *pptr, *pnext, *pchild; 5851 5852 if (root == NULL) { 5853 pptr = pwp->root_phys; 5854 } else { 5855 pptr = root; 5856 } 5857 5858 while (pptr) { 5859 pmcs_lock_phy(pptr); 5860 /* 5861 * If the PHY is dead or does not have a valid device ID, 5862 * skip it. 5863 */ 5864 if ((pptr->dead) || (!pptr->valid_device_id)) { 5865 goto next_phy; 5866 } 5867 5868 if (pptr->iport != iport) { 5869 goto next_phy; 5870 } 5871 5872 wwn = pmcs_barray2wwn(pptr->sas_address); 5873 (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr); 5874 if (strncmp(addr, sas_addr, strlen(addr)) == 0) { 5875 return (pptr); 5876 } 5877 5878 if (pptr->children) { 5879 pchild = pptr->children; 5880 pmcs_unlock_phy(pptr); 5881 pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild, 5882 sas_addr); 5883 if (pnext) { 5884 return (pnext); 5885 } 5886 pmcs_lock_phy(pptr); 5887 } 5888 5889 next_phy: 5890 pnext = pptr->sibling; 5891 pmcs_unlock_phy(pptr); 5892 pptr = pnext; 5893 } 5894 5895 return (NULL); 5896 } 5897 5898 void 5899 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis) 5900 { 5901 switch (fis[0] & 0xff) { 5902 case FIS_REG_H2DEV: 5903 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5904 "FIS REGISTER HOST TO DEVICE: " 5905 "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x " 5906 "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 | 5907 BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]), 5908 (unsigned long long) 5909 (((uint64_t)fis[2] & 0x00ffffff) << 24 | 5910 ((uint64_t)fis[1] & 0x00ffffff))); 5911 break; 5912 case FIS_REG_D2H: 5913 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5914 "FIS REGISTER DEVICE TO HOST: Status=0x%02x " 5915 "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu", 5916 BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]), 5917 (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 | 5918 ((uint64_t)fis[1] & 0x00ffffff))); 5919 break; 5920 default: 5921 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5922 "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5923 fis[0], fis[1], fis[2], fis[3], fis[4]); 5924 break; 5925 } 5926 } 5927 5928 void 5929 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg) 5930 { 5931 uint32_t *mb = arg; 5932 size_t i; 5933 5934 pmcs_prt(pwp, level, NULL, NULL, msg); 5935 for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) { 5936 pmcs_prt(pwp, level, NULL, NULL, 5937 "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x", 5938 i * sizeof (uint32_t), LE_32(mb[i]), 5939 LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3])); 5940 } 5941 } 5942 5943 /* 5944 * If phyp == NULL we're being called from the worker thread, in which 5945 * case we need to check all the PHYs. In this case, the softstate lock 5946 * will be held. 5947 * If phyp is non-NULL, just issue the spinup release for the specified PHY 5948 * (which will already be locked). 5949 */ 5950 void 5951 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 5952 { 5953 uint32_t *msg; 5954 struct pmcwork *pwrk; 5955 pmcs_phy_t *tphyp; 5956 5957 if (phyp != NULL) { 5958 ASSERT(mutex_owned(&phyp->phy_lock)); 5959 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5960 "%s: Issuing spinup release only for PHY %s", __func__, 5961 phyp->path); 5962 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5963 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5964 if (msg == NULL || (pwrk = 5965 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 5966 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5967 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 5968 return; 5969 } 5970 5971 phyp->spinup_hold = 0; 5972 bzero(msg, PMCS_QENTRY_SIZE); 5973 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5974 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5975 PMCIN_LOCAL_PHY_CONTROL)); 5976 msg[1] = LE_32(pwrk->htag); 5977 msg[2] = LE_32((0x10 << 8) | phyp->phynum); 5978 5979 pwrk->dtype = phyp->dtype; 5980 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5981 mutex_exit(&pwrk->lock); 5982 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5983 return; 5984 } 5985 5986 ASSERT(mutex_owned(&pwp->lock)); 5987 5988 tphyp = pwp->root_phys; 5989 while (tphyp) { 5990 pmcs_lock_phy(tphyp); 5991 if (tphyp->spinup_hold == 0) { 5992 pmcs_unlock_phy(tphyp); 5993 tphyp = tphyp->sibling; 5994 continue; 5995 } 5996 5997 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5998 "%s: Issuing spinup release for PHY %s", __func__, 5999 phyp->path); 6000 6001 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6002 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6003 if (msg == NULL || (pwrk = 6004 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6005 pmcs_unlock_phy(tphyp); 6006 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6007 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 6008 break; 6009 } 6010 6011 tphyp->spinup_hold = 0; 6012 bzero(msg, PMCS_QENTRY_SIZE); 6013 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6014 PMCIN_LOCAL_PHY_CONTROL)); 6015 msg[1] = LE_32(pwrk->htag); 6016 msg[2] = LE_32((0x10 << 8) | tphyp->phynum); 6017 6018 pwrk->dtype = phyp->dtype; 6019 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6020 mutex_exit(&pwrk->lock); 6021 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6022 pmcs_unlock_phy(tphyp); 6023 6024 tphyp = tphyp->sibling; 6025 } 6026 } 6027 6028 /* 6029 * Abort commands on dead PHYs and deregister them as well as removing 6030 * the associated targets. 6031 */ 6032 static int 6033 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 6034 { 6035 pmcs_phy_t *pnext, *pchild; 6036 boolean_t remove_device; 6037 int rval = 0; 6038 6039 while (phyp) { 6040 pmcs_lock_phy(phyp); 6041 pchild = phyp->children; 6042 pnext = phyp->sibling; 6043 pmcs_unlock_phy(phyp); 6044 6045 if (pchild) { 6046 rval = pmcs_kill_devices(pwp, pchild); 6047 if (rval) { 6048 return (rval); 6049 } 6050 } 6051 6052 /* 6053 * pmcs_remove_device requires the softstate lock. 6054 */ 6055 mutex_enter(&pwp->lock); 6056 pmcs_lock_phy(phyp); 6057 if (phyp->dead && phyp->valid_device_id) { 6058 remove_device = B_TRUE; 6059 } else { 6060 remove_device = B_FALSE; 6061 } 6062 6063 if (remove_device) { 6064 pmcs_remove_device(pwp, phyp); 6065 mutex_exit(&pwp->lock); 6066 6067 rval = pmcs_kill_device(pwp, phyp); 6068 6069 if (rval) { 6070 pmcs_unlock_phy(phyp); 6071 return (rval); 6072 } 6073 } else { 6074 mutex_exit(&pwp->lock); 6075 } 6076 6077 pmcs_unlock_phy(phyp); 6078 phyp = pnext; 6079 } 6080 6081 return (rval); 6082 } 6083 6084 /* 6085 * Called with PHY locked 6086 */ 6087 int 6088 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 6089 { 6090 int r, result; 6091 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 6092 struct pmcwork *pwrk; 6093 6094 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s", 6095 pmcs_get_typename(pptr->dtype), pptr->path); 6096 6097 /* 6098 * There may be an outstanding ABORT_ALL running, which we wouldn't 6099 * know just by checking abort_pending. We can, however, check 6100 * abort_all_start. If it's non-zero, there is one, and we'll just 6101 * sit here and wait for it to complete. If we don't, we'll remove 6102 * the device while there are still commands pending. 6103 */ 6104 if (pptr->abort_all_start) { 6105 while (pptr->abort_all_start) { 6106 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6107 "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p", 6108 __func__, (void *)pptr); 6109 cv_wait(&pptr->abort_all_cv, &pptr->phy_lock); 6110 } 6111 } else if (pptr->abort_pending) { 6112 r = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1); 6113 6114 if (r) { 6115 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6116 "%s: ABORT_ALL returned non-zero status (%d) for " 6117 "PHY 0x%p", __func__, r, (void *)pptr); 6118 return (r); 6119 } 6120 pptr->abort_pending = 0; 6121 } 6122 6123 if (pptr->valid_device_id == 0) { 6124 return (0); 6125 } 6126 6127 if ((pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 6128 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 6129 return (ENOMEM); 6130 } 6131 pwrk->arg = msg; 6132 pwrk->dtype = pptr->dtype; 6133 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6134 PMCIN_DEREGISTER_DEVICE_HANDLE)); 6135 msg[1] = LE_32(pwrk->htag); 6136 msg[2] = LE_32(pptr->device_id); 6137 6138 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6139 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6140 if (ptr == NULL) { 6141 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6142 mutex_exit(&pwrk->lock); 6143 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 6144 return (ENOMEM); 6145 } 6146 6147 COPY_MESSAGE(ptr, msg, 3); 6148 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6149 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6150 6151 pmcs_unlock_phy(pptr); 6152 WAIT_FOR(pwrk, 250, result); 6153 pmcs_lock_phy(pptr); 6154 pmcs_pwork(pwp, pwrk); 6155 6156 if (result) { 6157 return (ETIMEDOUT); 6158 } 6159 status = LE_32(msg[2]); 6160 if (status != PMCOUT_STATUS_OK) { 6161 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6162 "%s: status 0x%x when trying to deregister device %s", 6163 __func__, status, pptr->path); 6164 } 6165 6166 pptr->device_id = PMCS_INVALID_DEVICE_ID; 6167 PHY_CHANGED(pwp, pptr); 6168 RESTART_DISCOVERY(pwp); 6169 pptr->valid_device_id = 0; 6170 return (0); 6171 } 6172 6173 /* 6174 * Acknowledge the SAS h/w events that need acknowledgement. 6175 * This is only needed for first level PHYs. 6176 */ 6177 void 6178 pmcs_ack_events(pmcs_hw_t *pwp) 6179 { 6180 uint32_t msg[PMCS_MSG_SIZE], *ptr; 6181 struct pmcwork *pwrk; 6182 pmcs_phy_t *pptr; 6183 6184 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 6185 pmcs_lock_phy(pptr); 6186 if (pptr->hw_event_ack == 0) { 6187 pmcs_unlock_phy(pptr); 6188 continue; 6189 } 6190 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6191 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6192 6193 if ((ptr == NULL) || (pwrk = 6194 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6195 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6196 pmcs_unlock_phy(pptr); 6197 SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK); 6198 break; 6199 } 6200 6201 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6202 PMCIN_SAS_HW_EVENT_ACK)); 6203 msg[1] = LE_32(pwrk->htag); 6204 msg[2] = LE_32(pptr->hw_event_ack); 6205 6206 mutex_exit(&pwrk->lock); 6207 pwrk->dtype = pptr->dtype; 6208 pptr->hw_event_ack = 0; 6209 COPY_MESSAGE(ptr, msg, 3); 6210 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6211 pmcs_unlock_phy(pptr); 6212 } 6213 } 6214 6215 /* 6216 * Load DMA 6217 */ 6218 int 6219 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg) 6220 { 6221 ddi_dma_cookie_t *sg; 6222 pmcs_dmachunk_t *tc; 6223 pmcs_dmasgl_t *sgl, *prior; 6224 int seg, tsc; 6225 uint64_t sgl_addr; 6226 6227 /* 6228 * If we have no data segments, we're done. 6229 */ 6230 if (CMD2PKT(sp)->pkt_numcookies == 0) { 6231 return (0); 6232 } 6233 6234 /* 6235 * Get the S/G list pointer. 6236 */ 6237 sg = CMD2PKT(sp)->pkt_cookies; 6238 6239 /* 6240 * If we only have one dma segment, we can directly address that 6241 * data within the Inbound message itself. 6242 */ 6243 if (CMD2PKT(sp)->pkt_numcookies == 1) { 6244 msg[12] = LE_32(DWORD0(sg->dmac_laddress)); 6245 msg[13] = LE_32(DWORD1(sg->dmac_laddress)); 6246 msg[14] = LE_32(sg->dmac_size); 6247 msg[15] = 0; 6248 return (0); 6249 } 6250 6251 /* 6252 * Otherwise, we'll need one or more external S/G list chunks. 6253 * Get the first one and its dma address into the Inbound message. 6254 */ 6255 mutex_enter(&pwp->dma_lock); 6256 tc = pwp->dma_freelist; 6257 if (tc == NULL) { 6258 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6259 mutex_exit(&pwp->dma_lock); 6260 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6261 "%s: out of SG lists", __func__); 6262 return (-1); 6263 } 6264 pwp->dma_freelist = tc->nxt; 6265 mutex_exit(&pwp->dma_lock); 6266 6267 tc->nxt = NULL; 6268 sp->cmd_clist = tc; 6269 sgl = tc->chunks; 6270 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6271 sgl_addr = tc->addr; 6272 msg[12] = LE_32(DWORD0(sgl_addr)); 6273 msg[13] = LE_32(DWORD1(sgl_addr)); 6274 msg[14] = 0; 6275 msg[15] = LE_32(PMCS_DMASGL_EXTENSION); 6276 6277 prior = sgl; 6278 tsc = 0; 6279 6280 for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) { 6281 /* 6282 * If the current segment count for this chunk is one less than 6283 * the number s/g lists per chunk and we have more than one seg 6284 * to go, we need another chunk. Get it, and make sure that the 6285 * tail end of the the previous chunk points the new chunk 6286 * (if remembering an offset can be called 'pointing to'). 6287 * 6288 * Note that we can store the offset into our command area that 6289 * represents the new chunk in the length field of the part 6290 * that points the PMC chip at the next chunk- the PMC chip 6291 * ignores this field when the EXTENSION bit is set. 6292 * 6293 * This is required for dma unloads later. 6294 */ 6295 if (tsc == (PMCS_SGL_NCHUNKS - 1) && 6296 seg < (CMD2PKT(sp)->pkt_numcookies - 1)) { 6297 mutex_enter(&pwp->dma_lock); 6298 tc = pwp->dma_freelist; 6299 if (tc == NULL) { 6300 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6301 mutex_exit(&pwp->dma_lock); 6302 pmcs_dma_unload(pwp, sp); 6303 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6304 "%s: out of SG lists", __func__); 6305 return (-1); 6306 } 6307 pwp->dma_freelist = tc->nxt; 6308 tc->nxt = sp->cmd_clist; 6309 mutex_exit(&pwp->dma_lock); 6310 6311 sp->cmd_clist = tc; 6312 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6313 sgl = tc->chunks; 6314 sgl_addr = tc->addr; 6315 prior[PMCS_SGL_NCHUNKS-1].sglal = 6316 LE_32(DWORD0(sgl_addr)); 6317 prior[PMCS_SGL_NCHUNKS-1].sglah = 6318 LE_32(DWORD1(sgl_addr)); 6319 prior[PMCS_SGL_NCHUNKS-1].sglen = 0; 6320 prior[PMCS_SGL_NCHUNKS-1].flags = 6321 LE_32(PMCS_DMASGL_EXTENSION); 6322 prior = sgl; 6323 tsc = 0; 6324 } 6325 sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress)); 6326 sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress)); 6327 sgl[tsc].sglen = LE_32(sg->dmac_size); 6328 sgl[tsc++].flags = 0; 6329 sg++; 6330 } 6331 return (0); 6332 } 6333 6334 /* 6335 * Unload DMA 6336 */ 6337 void 6338 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp) 6339 { 6340 pmcs_dmachunk_t *cp; 6341 6342 mutex_enter(&pwp->dma_lock); 6343 while ((cp = sp->cmd_clist) != NULL) { 6344 sp->cmd_clist = cp->nxt; 6345 cp->nxt = pwp->dma_freelist; 6346 pwp->dma_freelist = cp; 6347 } 6348 mutex_exit(&pwp->dma_lock); 6349 } 6350 6351 /* 6352 * Take a chunk of consistent memory that has just been allocated and inserted 6353 * into the cip indices and prepare it for DMA chunk usage and add it to the 6354 * freelist. 6355 * 6356 * Called with dma_lock locked (except during attach when it's unnecessary) 6357 */ 6358 void 6359 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp, 6360 pmcs_chunk_t *pchunk, unsigned long lim) 6361 { 6362 unsigned long off, n; 6363 pmcs_dmachunk_t *np = dcp; 6364 pmcs_chunk_t *tmp_chunk; 6365 6366 if (pwp->dma_chunklist == NULL) { 6367 pwp->dma_chunklist = pchunk; 6368 } else { 6369 tmp_chunk = pwp->dma_chunklist; 6370 while (tmp_chunk->next) { 6371 tmp_chunk = tmp_chunk->next; 6372 } 6373 tmp_chunk->next = pchunk; 6374 } 6375 6376 /* 6377 * Install offsets into chunk lists. 6378 */ 6379 for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) { 6380 np->chunks = (void *)&pchunk->addrp[off]; 6381 np->addr = pchunk->dma_addr + off; 6382 np->acc_handle = pchunk->acc_handle; 6383 np->dma_handle = pchunk->dma_handle; 6384 if ((off + PMCS_SGL_CHUNKSZ) < lim) { 6385 np = np->nxt; 6386 } 6387 } 6388 np->nxt = pwp->dma_freelist; 6389 pwp->dma_freelist = dcp; 6390 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6391 "added %lu DMA chunks ", n); 6392 } 6393 6394 /* 6395 * Change the value of the interrupt coalescing timer. This is done currently 6396 * only for I/O completions. If we're using the "auto clear" feature, it can 6397 * be turned back on when interrupt coalescing is turned off and must be 6398 * turned off when the coalescing timer is on. 6399 * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long 6400 * as that's true, we don't need to distinguish between them. 6401 */ 6402 6403 void 6404 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj) 6405 { 6406 if (adj == DECREASE_TIMER) { 6407 /* If the timer is already off, nothing to do. */ 6408 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6409 return; 6410 } 6411 6412 pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN; 6413 6414 if (pwp->io_intr_coal.intr_coal_timer == 0) { 6415 /* Disable the timer */ 6416 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0); 6417 6418 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6419 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6420 pwp->odb_auto_clear); 6421 } 6422 6423 pwp->io_intr_coal.timer_on = B_FALSE; 6424 pwp->io_intr_coal.max_io_completions = B_FALSE; 6425 pwp->io_intr_coal.num_intrs = 0; 6426 pwp->io_intr_coal.int_cleared = B_FALSE; 6427 pwp->io_intr_coal.num_io_completions = 0; 6428 6429 DTRACE_PROBE1(pmcs__intr__coalesce__timer__off, 6430 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6431 } else { 6432 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6433 pwp->io_intr_coal.intr_coal_timer); 6434 } 6435 } else { 6436 /* 6437 * If the timer isn't on yet, do the setup for it now. 6438 */ 6439 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6440 /* If auto clear is being used, turn it off. */ 6441 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6442 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6443 (pwp->odb_auto_clear & 6444 ~(1 << PMCS_MSIX_IODONE))); 6445 } 6446 6447 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 6448 (1 << PMCS_MSIX_IODONE)); 6449 pwp->io_intr_coal.timer_on = B_TRUE; 6450 pwp->io_intr_coal.intr_coal_timer = 6451 PMCS_COAL_TIMER_GRAN; 6452 6453 DTRACE_PROBE1(pmcs__intr__coalesce__timer__on, 6454 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6455 } else { 6456 pwp->io_intr_coal.intr_coal_timer += 6457 PMCS_COAL_TIMER_GRAN; 6458 } 6459 6460 if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) { 6461 pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER; 6462 } 6463 6464 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6465 pwp->io_intr_coal.intr_coal_timer); 6466 } 6467 6468 /* 6469 * Adjust the interrupt threshold based on the current timer value 6470 */ 6471 pwp->io_intr_coal.intr_threshold = 6472 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 6473 (pwp->io_intr_coal.intr_latency + 6474 (pwp->io_intr_coal.intr_coal_timer * 1000))); 6475 } 6476 6477 /* 6478 * Register Access functions 6479 */ 6480 uint32_t 6481 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum) 6482 { 6483 uint32_t iqci; 6484 6485 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6486 DDI_SUCCESS) { 6487 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6488 "%s: ddi_dma_sync failed?", __func__); 6489 } 6490 6491 iqci = LE_32( 6492 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]); 6493 6494 return (iqci); 6495 } 6496 6497 uint32_t 6498 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum) 6499 { 6500 uint32_t oqpi; 6501 6502 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6503 DDI_SUCCESS) { 6504 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6505 "%s: ddi_dma_sync failed?", __func__); 6506 } 6507 6508 oqpi = LE_32( 6509 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]); 6510 6511 return (oqpi); 6512 } 6513 6514 uint32_t 6515 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint8_t hi, uint32_t off) 6516 { 6517 uint32_t rv, newaxil, oldaxil, oldaxih; 6518 6519 newaxil = off & ~GSM_BASE_MASK; 6520 off &= GSM_BASE_MASK; 6521 mutex_enter(&pwp->axil_lock); 6522 oldaxil = ddi_get32(pwp->top_acc_handle, 6523 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6524 ddi_put32(pwp->top_acc_handle, 6525 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6526 drv_usecwait(10); 6527 if (ddi_get32(pwp->top_acc_handle, 6528 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6529 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6530 "AXIL register update failed"); 6531 } 6532 if (hi) { 6533 oldaxih = ddi_get32(pwp->top_acc_handle, 6534 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]); 6535 ddi_put32(pwp->top_acc_handle, 6536 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], hi); 6537 drv_usecwait(10); 6538 if (ddi_get32(pwp->top_acc_handle, 6539 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != hi) { 6540 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6541 "AXIH register update failed"); 6542 } 6543 } 6544 rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]); 6545 if (hi) { 6546 ddi_put32(pwp->top_acc_handle, 6547 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], oldaxih); 6548 drv_usecwait(10); 6549 if (ddi_get32(pwp->top_acc_handle, 6550 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != oldaxih) { 6551 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6552 "AXIH register restore failed"); 6553 } 6554 } 6555 ddi_put32(pwp->top_acc_handle, 6556 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6557 drv_usecwait(10); 6558 if (ddi_get32(pwp->top_acc_handle, 6559 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6560 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6561 "AXIL register restore failed"); 6562 } 6563 mutex_exit(&pwp->axil_lock); 6564 return (rv); 6565 } 6566 6567 void 6568 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6569 { 6570 uint32_t newaxil, oldaxil; 6571 6572 newaxil = off & ~GSM_BASE_MASK; 6573 off &= GSM_BASE_MASK; 6574 mutex_enter(&pwp->axil_lock); 6575 oldaxil = ddi_get32(pwp->top_acc_handle, 6576 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6577 ddi_put32(pwp->top_acc_handle, 6578 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6579 drv_usecwait(10); 6580 if (ddi_get32(pwp->top_acc_handle, 6581 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6582 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6583 "AXIL register update failed"); 6584 } 6585 ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val); 6586 ddi_put32(pwp->top_acc_handle, 6587 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6588 drv_usecwait(10); 6589 if (ddi_get32(pwp->top_acc_handle, 6590 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6591 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6592 "AXIL register restore failed"); 6593 } 6594 mutex_exit(&pwp->axil_lock); 6595 } 6596 6597 uint32_t 6598 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off) 6599 { 6600 switch (off) { 6601 case PMCS_SPC_RESET: 6602 case PMCS_SPC_BOOT_STRAP: 6603 case PMCS_SPC_DEVICE_ID: 6604 case PMCS_DEVICE_REVISION: 6605 off = pmcs_rd_gsm_reg(pwp, 0, off); 6606 break; 6607 default: 6608 off = ddi_get32(pwp->top_acc_handle, 6609 &pwp->top_regs[off >> 2]); 6610 break; 6611 } 6612 return (off); 6613 } 6614 6615 void 6616 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6617 { 6618 switch (off) { 6619 case PMCS_SPC_RESET: 6620 case PMCS_DEVICE_REVISION: 6621 pmcs_wr_gsm_reg(pwp, off, val); 6622 break; 6623 default: 6624 ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val); 6625 break; 6626 } 6627 } 6628 6629 uint32_t 6630 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off) 6631 { 6632 return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2])); 6633 } 6634 6635 uint32_t 6636 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off) 6637 { 6638 return (ddi_get32(pwp->mpi_acc_handle, 6639 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2])); 6640 } 6641 6642 uint32_t 6643 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off) 6644 { 6645 return (ddi_get32(pwp->mpi_acc_handle, 6646 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2])); 6647 } 6648 6649 uint32_t 6650 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6651 { 6652 return (ddi_get32(pwp->mpi_acc_handle, 6653 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2])); 6654 } 6655 6656 uint32_t 6657 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6658 { 6659 return (ddi_get32(pwp->mpi_acc_handle, 6660 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2])); 6661 } 6662 6663 uint32_t 6664 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum) 6665 { 6666 return (ddi_get32(pwp->mpi_acc_handle, 6667 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2])); 6668 } 6669 6670 uint32_t 6671 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum) 6672 { 6673 return (ddi_get32(pwp->mpi_acc_handle, 6674 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2])); 6675 } 6676 6677 void 6678 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6679 { 6680 ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val); 6681 } 6682 6683 void 6684 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6685 { 6686 ddi_put32(pwp->mpi_acc_handle, 6687 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val)); 6688 } 6689 6690 void 6691 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6692 { 6693 ddi_put32(pwp->mpi_acc_handle, 6694 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val); 6695 } 6696 6697 void 6698 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6699 { 6700 ddi_put32(pwp->mpi_acc_handle, 6701 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val); 6702 } 6703 6704 void 6705 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6706 { 6707 ddi_put32(pwp->mpi_acc_handle, 6708 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val); 6709 } 6710 6711 void 6712 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6713 { 6714 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val; 6715 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6716 DDI_SUCCESS) { 6717 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6718 "%s: ddi_dma_sync failed?", __func__); 6719 } 6720 } 6721 6722 void 6723 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6724 { 6725 ddi_put32(pwp->mpi_acc_handle, 6726 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val); 6727 } 6728 6729 void 6730 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6731 { 6732 ddi_put32(pwp->mpi_acc_handle, 6733 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val); 6734 } 6735 6736 void 6737 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6738 { 6739 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val; 6740 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6741 DDI_SUCCESS) { 6742 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6743 "%s: ddi_dma_sync failed?", __func__); 6744 } 6745 } 6746 6747 /* 6748 * Check the status value of an outbound IOMB and report anything bad 6749 */ 6750 6751 void 6752 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb) 6753 { 6754 uint16_t opcode; 6755 int offset; 6756 6757 if (iomb == NULL) { 6758 return; 6759 } 6760 6761 opcode = LE_32(iomb[0]) & 0xfff; 6762 6763 switch (opcode) { 6764 /* 6765 * The following have no status field, so ignore them 6766 */ 6767 case PMCOUT_ECHO: 6768 case PMCOUT_SAS_HW_EVENT: 6769 case PMCOUT_GET_DEVICE_HANDLE: 6770 case PMCOUT_SATA_EVENT: 6771 case PMCOUT_SSP_EVENT: 6772 case PMCOUT_DEVICE_HANDLE_ARRIVED: 6773 case PMCOUT_SMP_REQUEST_RECEIVED: 6774 case PMCOUT_GPIO: 6775 case PMCOUT_GPIO_EVENT: 6776 case PMCOUT_GET_TIME_STAMP: 6777 case PMCOUT_SKIP_ENTRIES: 6778 case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */ 6779 case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */ 6780 case PMCOUT_DEVICE_HANDLE_REMOVED: 6781 case PMCOUT_SSP_REQUEST_RECEIVED: 6782 return; 6783 6784 case PMCOUT_GENERAL_EVENT: 6785 offset = 1; 6786 break; 6787 6788 case PMCOUT_SSP_COMPLETION: 6789 case PMCOUT_SMP_COMPLETION: 6790 case PMCOUT_DEVICE_REGISTRATION: 6791 case PMCOUT_DEREGISTER_DEVICE_HANDLE: 6792 case PMCOUT_SATA_COMPLETION: 6793 case PMCOUT_DEVICE_INFO: 6794 case PMCOUT_FW_FLASH_UPDATE: 6795 case PMCOUT_SSP_ABORT: 6796 case PMCOUT_SATA_ABORT: 6797 case PMCOUT_SAS_DIAG_MODE_START_END: 6798 case PMCOUT_SAS_HW_EVENT_ACK_ACK: 6799 case PMCOUT_SMP_ABORT: 6800 case PMCOUT_SET_DEVICE_STATE: 6801 case PMCOUT_GET_DEVICE_STATE: 6802 case PMCOUT_SET_DEVICE_INFO: 6803 offset = 2; 6804 break; 6805 6806 case PMCOUT_LOCAL_PHY_CONTROL: 6807 case PMCOUT_SAS_DIAG_EXECUTE: 6808 case PMCOUT_PORT_CONTROL: 6809 offset = 3; 6810 break; 6811 6812 case PMCOUT_GET_INFO: 6813 case PMCOUT_GET_VPD: 6814 case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT: 6815 case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT: 6816 case PMCOUT_SET_VPD: 6817 case PMCOUT_TWI: 6818 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6819 "Got response for deprecated opcode", iomb); 6820 return; 6821 6822 default: 6823 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6824 "Got response for unknown opcode", iomb); 6825 return; 6826 } 6827 6828 if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) { 6829 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6830 "bad status on TAG_TYPE_NONE command", iomb); 6831 } 6832 } 6833 6834 /* 6835 * Called with statlock held 6836 */ 6837 void 6838 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 6839 { 6840 _NOTE(ARGUNUSED(pwp)); 6841 6842 ASSERT(mutex_owned(&xp->statlock)); 6843 6844 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.", 6845 __func__, (void *)xp); 6846 6847 /* 6848 * Clear the dip now. This keeps pmcs_remove_device from attempting 6849 * to call us on the same device while we're still flushing queues. 6850 * The only side effect is we can no longer update SM-HBA properties, 6851 * but this device is going away anyway, so no matter. 6852 */ 6853 xp->dip = NULL; 6854 xp->smpd = NULL; 6855 xp->special_running = 0; 6856 xp->recovering = 0; 6857 xp->recover_wait = 0; 6858 xp->draining = 0; 6859 xp->new = 0; 6860 xp->assigned = 0; 6861 xp->dev_state = 0; 6862 xp->tagmap = 0; 6863 xp->dev_gone = 1; 6864 xp->event_recovery = 0; 6865 xp->dtype = NOTHING; 6866 xp->wq_recovery_tail = NULL; 6867 /* Don't clear xp->phy */ 6868 /* Don't clear xp->actv_cnt */ 6869 /* Don't clear xp->actv_pkts */ 6870 6871 /* 6872 * Flush all target queues 6873 */ 6874 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 6875 } 6876 6877 static int 6878 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf) 6879 { 6880 int result = srf->srf_result; 6881 6882 switch (result) { 6883 case SMP_RES_UNKNOWN_FUNCTION: 6884 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6885 "%s: SMP DISCOVER Response " 6886 "Function Result: Unknown SMP Function(0x%x)", 6887 __func__, result); 6888 break; 6889 case SMP_RES_FUNCTION_FAILED: 6890 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6891 "%s: SMP DISCOVER Response " 6892 "Function Result: SMP Function Failed(0x%x)", 6893 __func__, result); 6894 break; 6895 case SMP_RES_INVALID_REQUEST_FRAME_LENGTH: 6896 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6897 "%s: SMP DISCOVER Response " 6898 "Function Result: Invalid Request Frame Length(0x%x)", 6899 __func__, result); 6900 break; 6901 case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST: 6902 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6903 "%s: SMP DISCOVER Response " 6904 "Function Result: Incomplete Descriptor List(0x%x)", 6905 __func__, result); 6906 break; 6907 case SMP_RES_PHY_DOES_NOT_EXIST: 6908 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6909 "%s: SMP DISCOVER Response " 6910 "Function Result: PHY does not exist(0x%x)", 6911 __func__, result); 6912 break; 6913 case SMP_RES_PHY_VACANT: 6914 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6915 "%s: SMP DISCOVER Response " 6916 "Function Result: PHY Vacant(0x%x)", 6917 __func__, result); 6918 break; 6919 default: 6920 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6921 "%s: SMP DISCOVER Response " 6922 "Function Result: (0x%x)", 6923 __func__, result); 6924 break; 6925 } 6926 6927 return (result); 6928 } 6929 6930 /* 6931 * Do all the repetitive stuff necessary to setup for DMA 6932 * 6933 * pwp: Used for dip 6934 * dma_attr: ddi_dma_attr_t to use for the mapping 6935 * acch: ddi_acc_handle_t to use for the mapping 6936 * dmah: ddi_dma_handle_t to use 6937 * length: Amount of memory for mapping 6938 * kvap: Pointer filled in with kernel virtual address on successful return 6939 * dma_addr: Pointer filled in with DMA address on successful return 6940 */ 6941 boolean_t 6942 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch, 6943 ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr) 6944 { 6945 dev_info_t *dip = pwp->dip; 6946 ddi_dma_cookie_t cookie; 6947 size_t real_length; 6948 uint_t ddma_flag = DDI_DMA_CONSISTENT; 6949 uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR; 6950 uint_t cookie_cnt; 6951 ddi_device_acc_attr_t mattr = { 6952 DDI_DEVICE_ATTR_V0, 6953 DDI_NEVERSWAP_ACC, 6954 DDI_STRICTORDER_ACC, 6955 DDI_DEFAULT_ACC 6956 }; 6957 6958 *acch = NULL; 6959 *dmah = NULL; 6960 6961 if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) != 6962 DDI_SUCCESS) { 6963 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6964 "Failed to allocate DMA handle"); 6965 return (B_FALSE); 6966 } 6967 6968 if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP, 6969 NULL, kvap, &real_length, acch) != DDI_SUCCESS) { 6970 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6971 "Failed to allocate DMA mem"); 6972 ddi_dma_free_handle(dmah); 6973 *dmah = NULL; 6974 return (B_FALSE); 6975 } 6976 6977 if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length, 6978 ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt) 6979 != DDI_DMA_MAPPED) { 6980 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA"); 6981 ddi_dma_free_handle(dmah); 6982 ddi_dma_mem_free(acch); 6983 *dmah = NULL; 6984 *acch = NULL; 6985 return (B_FALSE); 6986 } 6987 6988 if (cookie_cnt != 1) { 6989 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies"); 6990 if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) { 6991 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition " 6992 "failed at %s():%d", __func__, __LINE__); 6993 } 6994 ddi_dma_free_handle(dmah); 6995 ddi_dma_mem_free(acch); 6996 *dmah = NULL; 6997 *acch = NULL; 6998 return (B_FALSE); 6999 } 7000 7001 *dma_addr = cookie.dmac_laddress; 7002 7003 return (B_TRUE); 7004 } 7005 7006 /* 7007 * Flush requested queues for a particular target. Called with statlock held 7008 */ 7009 void 7010 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues) 7011 { 7012 pmcs_cmd_t *sp, *sp_next; 7013 pmcwork_t *pwrk; 7014 7015 ASSERT(pwp != NULL); 7016 ASSERT(tgt != NULL); 7017 7018 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt, 7019 "%s: Flushing queues (%d) for target 0x%p", __func__, 7020 queues, (void *)tgt); 7021 7022 /* 7023 * Commands on the wait queue (or the special queue below) don't have 7024 * work structures associated with them. 7025 */ 7026 if (queues & PMCS_TGT_WAIT_QUEUE) { 7027 mutex_enter(&tgt->wqlock); 7028 while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) { 7029 STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next); 7030 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt, 7031 "%s: Removing cmd 0x%p from wq for target 0x%p", 7032 __func__, (void *)sp, (void *)tgt); 7033 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7034 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7035 mutex_exit(&tgt->wqlock); 7036 pmcs_dma_unload(pwp, sp); 7037 mutex_enter(&pwp->cq_lock); 7038 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7039 PMCS_CQ_RUN_LOCKED(pwp); 7040 mutex_exit(&pwp->cq_lock); 7041 mutex_enter(&tgt->wqlock); 7042 } 7043 mutex_exit(&tgt->wqlock); 7044 } 7045 7046 /* 7047 * Commands on the active queue will have work structures associated 7048 * with them. 7049 */ 7050 if (queues & PMCS_TGT_ACTIVE_QUEUE) { 7051 mutex_exit(&tgt->statlock); 7052 mutex_enter(&tgt->aqlock); 7053 sp = STAILQ_FIRST(&tgt->aq); 7054 while (sp) { 7055 sp_next = STAILQ_NEXT(sp, cmd_next); 7056 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 7057 7058 /* 7059 * If we don't find a work structure, it's because 7060 * the command is already complete. If so, move on 7061 * to the next one. 7062 */ 7063 if (pwrk == NULL) { 7064 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7065 "%s: Not removing cmd 0x%p (htag 0x%x) " 7066 "from aq", __func__, (void *)sp, 7067 sp->cmd_tag); 7068 sp = sp_next; 7069 continue; 7070 } 7071 7072 STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next); 7073 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7074 "%s: Removing cmd 0x%p (htag 0x%x) from aq for " 7075 "target 0x%p", __func__, (void *)sp, sp->cmd_tag, 7076 (void *)tgt); 7077 mutex_exit(&tgt->aqlock); 7078 /* 7079 * Mark the work structure as dead and complete it 7080 */ 7081 pwrk->dead = 1; 7082 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7083 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7084 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 7085 pmcs_dma_unload(pwp, sp); 7086 mutex_enter(&pwp->cq_lock); 7087 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7088 mutex_exit(&pwp->cq_lock); 7089 mutex_enter(&tgt->aqlock); 7090 sp = sp_next; 7091 } 7092 mutex_exit(&tgt->aqlock); 7093 mutex_enter(&tgt->statlock); 7094 } 7095 7096 if (queues & PMCS_TGT_SPECIAL_QUEUE) { 7097 while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) { 7098 STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next); 7099 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7100 "%s: Removing cmd 0x%p from sq for target 0x%p", 7101 __func__, (void *)sp, (void *)tgt); 7102 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7103 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7104 pmcs_dma_unload(pwp, sp); 7105 mutex_enter(&pwp->cq_lock); 7106 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7107 mutex_exit(&pwp->cq_lock); 7108 } 7109 } 7110 7111 if (queues == PMCS_TGT_ALL_QUEUES) { 7112 mutex_exit(&tgt->statlock); 7113 (void) pmcs_flush_nonio_cmds(pwp); 7114 mutex_enter(&tgt->statlock); 7115 } 7116 } 7117 7118 /* 7119 * Clean up work structures with no associated pmcs_cmd_t struct 7120 */ 7121 void 7122 pmcs_flush_nonio_cmds(pmcs_hw_t *pwp) 7123 { 7124 int i; 7125 pmcwork_t *p; 7126 7127 for (i = 0; i < pwp->max_cmd; i++) { 7128 p = &pwp->work[i]; 7129 mutex_enter(&p->lock); 7130 if (p->htag & PMCS_TAG_NONIO_CMD) { 7131 if (!PMCS_COMMAND_ACTIVE(p) || PMCS_COMMAND_DONE(p)) { 7132 mutex_exit(&p->lock); 7133 continue; 7134 } 7135 pmcs_prt(pwp, PMCS_PRT_DEBUG, p->phy, p->xp, 7136 "%s: Completing non-io cmd with HTAG 0x%x", 7137 __func__, p->htag); 7138 pmcs_complete_work_impl(pwp, p, NULL, 0); 7139 } else { 7140 mutex_exit(&p->lock); 7141 } 7142 } 7143 } 7144 7145 void 7146 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb, 7147 size_t amt) 7148 { 7149 switch (PMCS_TAG_TYPE(pwrk->htag)) { 7150 case PMCS_TAG_TYPE_CBACK: 7151 { 7152 pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr; 7153 (*callback)(pwp, pwrk, iomb); 7154 break; 7155 } 7156 case PMCS_TAG_TYPE_WAIT: 7157 if (pwrk->arg && iomb && amt) { 7158 (void) memcpy(pwrk->arg, iomb, amt); 7159 } 7160 cv_signal(&pwrk->sleep_cv); 7161 mutex_exit(&pwrk->lock); 7162 break; 7163 case PMCS_TAG_TYPE_NONE: 7164 #ifdef DEBUG 7165 pmcs_check_iomb_status(pwp, iomb); 7166 #endif 7167 pmcs_pwork(pwp, pwrk); 7168 break; 7169 default: 7170 /* 7171 * We will leak a structure here if we don't know 7172 * what happened 7173 */ 7174 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7175 "%s: Unknown PMCS_TAG_TYPE (%x)", 7176 __func__, PMCS_TAG_TYPE(pwrk->htag)); 7177 break; 7178 } 7179 } 7180 7181 /* 7182 * Determine if iport still has targets. During detach(9E), if SCSA is 7183 * successfull in its guarantee of tran_tgt_free(9E) before detach(9E), 7184 * this should always return B_FALSE. 7185 */ 7186 boolean_t 7187 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport) 7188 { 7189 pmcs_xscsi_t *xp; 7190 int i; 7191 7192 mutex_enter(&pwp->lock); 7193 7194 if (!pwp->targets || !pwp->max_dev) { 7195 mutex_exit(&pwp->lock); 7196 return (B_FALSE); 7197 } 7198 7199 for (i = 0; i < pwp->max_dev; i++) { 7200 xp = pwp->targets[i]; 7201 if ((xp == NULL) || (xp->phy == NULL) || 7202 (xp->phy->iport != iport)) { 7203 continue; 7204 } 7205 7206 mutex_exit(&pwp->lock); 7207 return (B_TRUE); 7208 } 7209 7210 mutex_exit(&pwp->lock); 7211 return (B_FALSE); 7212 } 7213 7214 /* 7215 * Called with softstate lock held 7216 */ 7217 void 7218 pmcs_destroy_target(pmcs_xscsi_t *target) 7219 { 7220 pmcs_hw_t *pwp = target->pwp; 7221 pmcs_iport_t *iport; 7222 7223 ASSERT(pwp); 7224 ASSERT(mutex_owned(&pwp->lock)); 7225 7226 if (!target->ua) { 7227 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7228 "%s: target %p iport address is null", 7229 __func__, (void *)target); 7230 } 7231 7232 iport = pmcs_get_iport_by_ua(pwp, target->ua); 7233 if (iport == NULL) { 7234 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7235 "%s: no iport associated with tgt(0x%p)", 7236 __func__, (void *)target); 7237 return; 7238 } 7239 7240 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, 7241 "%s: free target %p", __func__, (void *)target); 7242 if (target->ua) { 7243 strfree(target->ua); 7244 } 7245 7246 mutex_destroy(&target->wqlock); 7247 mutex_destroy(&target->aqlock); 7248 mutex_destroy(&target->statlock); 7249 cv_destroy(&target->reset_cv); 7250 cv_destroy(&target->abort_cv); 7251 ddi_soft_state_bystr_fini(&target->lun_sstate); 7252 ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address); 7253 pmcs_rele_iport(iport); 7254 } 7255 7256 /* 7257 * pmcs_lock_phy_impl 7258 * 7259 * This function is what does the actual work for pmcs_lock_phy. It will 7260 * lock all PHYs from phyp down in a top-down fashion. 7261 * 7262 * Locking notes: 7263 * 1. level starts from 0 for the PHY ("parent") that's passed in. It is 7264 * not a reflection of the actual level of the PHY in the SAS topology. 7265 * 2. If parent is an expander, then parent is locked along with all its 7266 * descendents. 7267 * 3. Expander subsidiary PHYs at level 0 are not locked. It is the 7268 * responsibility of the caller to individually lock expander subsidiary PHYs 7269 * at level 0 if necessary. 7270 * 4. Siblings at level 0 are not traversed due to the possibility that we're 7271 * locking a PHY on the dead list. The siblings could be pointing to invalid 7272 * PHYs. We don't lock siblings at level 0 anyway. 7273 */ 7274 static void 7275 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level) 7276 { 7277 pmcs_phy_t *tphyp; 7278 7279 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7280 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7281 7282 /* 7283 * Start walking the PHYs. 7284 */ 7285 tphyp = phyp; 7286 while (tphyp) { 7287 /* 7288 * If we're at the top level, only lock ourselves. For anything 7289 * at level > 0, traverse children while locking everything. 7290 */ 7291 if ((level > 0) || (tphyp == phyp)) { 7292 pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp, 7293 NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7294 __func__, (void *)tphyp, (void *)tphyp->parent, 7295 tphyp->path, level); 7296 mutex_enter(&tphyp->phy_lock); 7297 7298 if (tphyp->children) { 7299 pmcs_lock_phy_impl(tphyp->children, level + 1); 7300 } 7301 } 7302 7303 if (level == 0) { 7304 return; 7305 } 7306 7307 tphyp = tphyp->sibling; 7308 } 7309 } 7310 7311 /* 7312 * pmcs_lock_phy 7313 * 7314 * This function is responsible for locking a PHY and all its descendents 7315 */ 7316 void 7317 pmcs_lock_phy(pmcs_phy_t *phyp) 7318 { 7319 #ifdef DEBUG 7320 char *callername = NULL; 7321 ulong_t off; 7322 7323 ASSERT(phyp != NULL); 7324 7325 callername = modgetsymname((uintptr_t)caller(), &off); 7326 7327 if (callername == NULL) { 7328 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7329 "%s: PHY 0x%p path %s caller: unknown", __func__, 7330 (void *)phyp, phyp->path); 7331 } else { 7332 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7333 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7334 (void *)phyp, phyp->path, callername, off); 7335 } 7336 #else 7337 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7338 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7339 #endif 7340 pmcs_lock_phy_impl(phyp, 0); 7341 } 7342 7343 /* 7344 * pmcs_unlock_phy_impl 7345 * 7346 * Unlock all PHYs from phyp down in a bottom-up fashion. 7347 */ 7348 static void 7349 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level) 7350 { 7351 pmcs_phy_t *phy_next; 7352 7353 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7354 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7355 7356 /* 7357 * Recurse down to the bottom PHYs 7358 */ 7359 if (level == 0) { 7360 if (phyp->children) { 7361 pmcs_unlock_phy_impl(phyp->children, level + 1); 7362 } 7363 } else { 7364 phy_next = phyp; 7365 while (phy_next) { 7366 if (phy_next->children) { 7367 pmcs_unlock_phy_impl(phy_next->children, 7368 level + 1); 7369 } 7370 phy_next = phy_next->sibling; 7371 } 7372 } 7373 7374 /* 7375 * Iterate through PHYs unlocking all at level > 0 as well the top PHY 7376 */ 7377 phy_next = phyp; 7378 while (phy_next) { 7379 if ((level > 0) || (phy_next == phyp)) { 7380 pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, 7381 phy_next, NULL, 7382 "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7383 __func__, (void *)phy_next, 7384 (void *)phy_next->parent, phy_next->path, level); 7385 mutex_exit(&phy_next->phy_lock); 7386 } 7387 7388 if (level == 0) { 7389 return; 7390 } 7391 7392 phy_next = phy_next->sibling; 7393 } 7394 } 7395 7396 /* 7397 * pmcs_unlock_phy 7398 * 7399 * Unlock a PHY and all its descendents 7400 */ 7401 void 7402 pmcs_unlock_phy(pmcs_phy_t *phyp) 7403 { 7404 #ifdef DEBUG 7405 char *callername = NULL; 7406 ulong_t off; 7407 7408 ASSERT(phyp != NULL); 7409 7410 callername = modgetsymname((uintptr_t)caller(), &off); 7411 7412 if (callername == NULL) { 7413 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7414 "%s: PHY 0x%p path %s caller: unknown", __func__, 7415 (void *)phyp, phyp->path); 7416 } else { 7417 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7418 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7419 (void *)phyp, phyp->path, callername, off); 7420 } 7421 #else 7422 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7423 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7424 #endif 7425 pmcs_unlock_phy_impl(phyp, 0); 7426 } 7427 7428 /* 7429 * pmcs_get_root_phy 7430 * 7431 * For a given phy pointer return its root phy. 7432 * This function must only be called during discovery in order to ensure that 7433 * the chain of PHYs from phyp up to the root PHY doesn't change. 7434 */ 7435 pmcs_phy_t * 7436 pmcs_get_root_phy(pmcs_phy_t *phyp) 7437 { 7438 ASSERT(phyp); 7439 7440 while (phyp) { 7441 if (IS_ROOT_PHY(phyp)) { 7442 break; 7443 } 7444 phyp = phyp->parent; 7445 } 7446 7447 return (phyp); 7448 } 7449 7450 /* 7451 * pmcs_free_dma_chunklist 7452 * 7453 * Free DMA S/G chunk list 7454 */ 7455 void 7456 pmcs_free_dma_chunklist(pmcs_hw_t *pwp) 7457 { 7458 pmcs_chunk_t *pchunk; 7459 7460 while (pwp->dma_chunklist) { 7461 pchunk = pwp->dma_chunklist; 7462 pwp->dma_chunklist = pwp->dma_chunklist->next; 7463 if (pchunk->dma_handle) { 7464 if (ddi_dma_unbind_handle(pchunk->dma_handle) != 7465 DDI_SUCCESS) { 7466 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7467 "Condition failed at %s():%d", 7468 __func__, __LINE__); 7469 } 7470 ddi_dma_free_handle(&pchunk->dma_handle); 7471 ddi_dma_mem_free(&pchunk->acc_handle); 7472 } 7473 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 7474 } 7475 } 7476 7477 /*ARGSUSED2*/ 7478 int 7479 pmcs_phy_constructor(void *buf, void *arg, int kmflags) 7480 { 7481 pmcs_hw_t *pwp = (pmcs_hw_t *)arg; 7482 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7483 7484 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 7485 DDI_INTR_PRI(pwp->intr_pri)); 7486 cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL); 7487 return (0); 7488 } 7489 7490 /*ARGSUSED1*/ 7491 void 7492 pmcs_phy_destructor(void *buf, void *arg) 7493 { 7494 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7495 7496 cv_destroy(&phyp->abort_all_cv); 7497 mutex_destroy(&phyp->phy_lock); 7498 } 7499 7500 /* 7501 * Free all PHYs from the kmem_cache starting at phyp as well as everything 7502 * on the dead_phys list. 7503 * 7504 * NOTE: This function does not free root PHYs as they are not allocated 7505 * from the kmem_cache. 7506 * 7507 * No PHY locks are acquired as this should only be called during DDI_DETACH 7508 * or soft reset (while pmcs interrupts are disabled). 7509 */ 7510 void 7511 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7512 { 7513 pmcs_phy_t *tphyp, *nphyp, *cphyp; 7514 7515 if (phyp == NULL) { 7516 return; 7517 } 7518 7519 for (tphyp = phyp; tphyp; tphyp = nphyp) { 7520 nphyp = tphyp->sibling; 7521 cphyp = tphyp->children; 7522 7523 if (cphyp) { 7524 tphyp->children = NULL; 7525 pmcs_free_all_phys(pwp, cphyp); 7526 } 7527 7528 if (!IS_ROOT_PHY(tphyp)) { 7529 kmem_cache_free(pwp->phy_cache, tphyp); 7530 } 7531 } 7532 7533 mutex_enter(&pwp->dead_phylist_lock); 7534 for (tphyp = pwp->dead_phys; tphyp; tphyp = nphyp) { 7535 nphyp = tphyp->dead_next; 7536 kmem_cache_free(pwp->phy_cache, tphyp); 7537 } 7538 pwp->dead_phys = NULL; 7539 mutex_exit(&pwp->dead_phylist_lock); 7540 } 7541 7542 /* 7543 * Free a list of PHYs linked together by the sibling pointer back to the 7544 * kmem cache from whence they came. This function does not recurse, so the 7545 * caller must ensure there are no children. 7546 */ 7547 void 7548 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7549 { 7550 pmcs_phy_t *next_phy; 7551 7552 while (phyp) { 7553 next_phy = phyp->sibling; 7554 ASSERT(!mutex_owned(&phyp->phy_lock)); 7555 kmem_cache_free(pwp->phy_cache, phyp); 7556 phyp = next_phy; 7557 } 7558 } 7559 7560 /* 7561 * Make a copy of an existing PHY structure. This is used primarily in 7562 * discovery to compare the contents of an existing PHY with what gets 7563 * reported back by an expander. 7564 * 7565 * This function must not be called from any context where sleeping is 7566 * not possible. 7567 * 7568 * The new PHY is returned unlocked. 7569 */ 7570 static pmcs_phy_t * 7571 pmcs_clone_phy(pmcs_phy_t *orig_phy) 7572 { 7573 pmcs_phy_t *local; 7574 7575 local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP); 7576 7577 /* 7578 * Go ahead and just copy everything... 7579 */ 7580 *local = *orig_phy; 7581 7582 /* 7583 * But the following must be set appropriately for this copy 7584 */ 7585 local->sibling = NULL; 7586 local->children = NULL; 7587 mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER, 7588 DDI_INTR_PRI(orig_phy->pwp->intr_pri)); 7589 7590 return (local); 7591 } 7592 7593 int 7594 pmcs_check_acc_handle(ddi_acc_handle_t handle) 7595 { 7596 ddi_fm_error_t de; 7597 7598 if (handle == NULL) { 7599 return (DDI_FAILURE); 7600 } 7601 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0); 7602 return (de.fme_status); 7603 } 7604 7605 int 7606 pmcs_check_dma_handle(ddi_dma_handle_t handle) 7607 { 7608 ddi_fm_error_t de; 7609 7610 if (handle == NULL) { 7611 return (DDI_FAILURE); 7612 } 7613 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0); 7614 return (de.fme_status); 7615 } 7616 7617 7618 void 7619 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail) 7620 { 7621 uint64_t ena; 7622 char buf[FM_MAX_CLASS]; 7623 7624 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7625 ena = fm_ena_generate(0, FM_ENA_FMT1); 7626 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) { 7627 ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP, 7628 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 7629 } 7630 } 7631 7632 int 7633 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp) 7634 { 7635 pmcs_chunk_t *pchunk; 7636 int i; 7637 7638 /* check all acc & dma handles allocated in attach */ 7639 if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) || 7640 (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) || 7641 (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) || 7642 (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) || 7643 (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) { 7644 goto check_failed; 7645 } 7646 7647 for (i = 0; i < PMCS_NIQ; i++) { 7648 if ((pmcs_check_dma_handle( 7649 pwp->iqp_handles[i]) != DDI_SUCCESS) || 7650 (pmcs_check_acc_handle( 7651 pwp->iqp_acchdls[i]) != DDI_SUCCESS)) { 7652 goto check_failed; 7653 } 7654 } 7655 7656 for (i = 0; i < PMCS_NOQ; i++) { 7657 if ((pmcs_check_dma_handle( 7658 pwp->oqp_handles[i]) != DDI_SUCCESS) || 7659 (pmcs_check_acc_handle( 7660 pwp->oqp_acchdls[i]) != DDI_SUCCESS)) { 7661 goto check_failed; 7662 } 7663 } 7664 7665 if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) || 7666 (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) { 7667 goto check_failed; 7668 } 7669 7670 if (pwp->fwlog && 7671 ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) || 7672 (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) { 7673 goto check_failed; 7674 } 7675 7676 if (pwp->regdump_hndl && pwp->regdump_acchdl && 7677 ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) || 7678 (pmcs_check_acc_handle(pwp->regdump_acchdl) 7679 != DDI_SUCCESS))) { 7680 goto check_failed; 7681 } 7682 7683 7684 pchunk = pwp->dma_chunklist; 7685 while (pchunk) { 7686 if ((pmcs_check_acc_handle(pchunk->acc_handle) 7687 != DDI_SUCCESS) || 7688 (pmcs_check_dma_handle(pchunk->dma_handle) 7689 != DDI_SUCCESS)) { 7690 goto check_failed; 7691 } 7692 pchunk = pchunk->next; 7693 } 7694 7695 return (0); 7696 7697 check_failed: 7698 7699 return (1); 7700 } 7701 7702 /* 7703 * pmcs_handle_dead_phys 7704 * 7705 * If the PHY has no outstanding work associated with it, remove it from 7706 * the dead PHY list and free it. 7707 * 7708 * If pwp->ds_err_recovering or pwp->configuring is set, don't run. 7709 * This keeps routines that need to submit work to the chip from having to 7710 * hold PHY locks to ensure that PHYs don't disappear while they do their work. 7711 */ 7712 void 7713 pmcs_handle_dead_phys(pmcs_hw_t *pwp) 7714 { 7715 pmcs_phy_t *phyp, *nphyp, *pphyp; 7716 7717 mutex_enter(&pwp->lock); 7718 mutex_enter(&pwp->config_lock); 7719 7720 if (pwp->configuring | pwp->ds_err_recovering) { 7721 mutex_exit(&pwp->config_lock); 7722 mutex_exit(&pwp->lock); 7723 return; 7724 } 7725 7726 /* 7727 * Check every PHY in the dead PHY list 7728 */ 7729 mutex_enter(&pwp->dead_phylist_lock); 7730 phyp = pwp->dead_phys; 7731 pphyp = NULL; /* Set previous PHY to NULL */ 7732 7733 while (phyp != NULL) { 7734 pmcs_lock_phy(phyp); 7735 ASSERT(phyp->dead); 7736 7737 nphyp = phyp->dead_next; 7738 7739 /* 7740 * Check for outstanding work 7741 */ 7742 if (phyp->ref_count > 0) { 7743 pmcs_unlock_phy(phyp); 7744 pphyp = phyp; /* This PHY becomes "previous" */ 7745 } else if (phyp->target) { 7746 pmcs_unlock_phy(phyp); 7747 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target, 7748 "%s: Not freeing PHY 0x%p: target 0x%p is not free", 7749 __func__, (void *)phyp, (void *)phyp->target); 7750 pphyp = phyp; 7751 } else { 7752 /* 7753 * No outstanding work or target references. Remove it 7754 * from the list and free it 7755 */ 7756 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 7757 "%s: Freeing inactive dead PHY 0x%p @ %s " 7758 "target = 0x%p", __func__, (void *)phyp, 7759 phyp->path, (void *)phyp->target); 7760 /* 7761 * If pphyp is NULL, then phyp was the head of the list, 7762 * so just reset the head to nphyp. Otherwise, the 7763 * previous PHY will now point to nphyp (the next PHY) 7764 */ 7765 if (pphyp == NULL) { 7766 pwp->dead_phys = nphyp; 7767 } else { 7768 pphyp->dead_next = nphyp; 7769 } 7770 /* 7771 * If the target still points to this PHY, remove 7772 * that linkage now. 7773 */ 7774 if (phyp->target) { 7775 mutex_enter(&phyp->target->statlock); 7776 if (phyp->target->phy == phyp) { 7777 phyp->target->phy = NULL; 7778 } 7779 mutex_exit(&phyp->target->statlock); 7780 } 7781 pmcs_unlock_phy(phyp); 7782 kmem_cache_free(pwp->phy_cache, phyp); 7783 } 7784 7785 phyp = nphyp; 7786 } 7787 7788 mutex_exit(&pwp->dead_phylist_lock); 7789 mutex_exit(&pwp->config_lock); 7790 mutex_exit(&pwp->lock); 7791 } 7792 7793 void 7794 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp) 7795 { 7796 atomic_inc_32(&phyp->ref_count); 7797 } 7798 7799 void 7800 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp) 7801 { 7802 ASSERT(phyp->ref_count != 0); 7803 atomic_dec_32(&phyp->ref_count); 7804 } 7805 7806 /* 7807 * pmcs_reap_dead_phy 7808 * 7809 * This function is called from pmcs_new_tport when we have a PHY 7810 * without a target pointer. It's possible in that case that this PHY 7811 * may have a "brother" on the dead_phys list. That is, it may be the same as 7812 * this one but with a different root PHY number (e.g. pp05 vs. pp04). If 7813 * that's the case, update the dead PHY and this new PHY. If that's not the 7814 * case, we should get a tran_tgt_init on this after it's reported to SCSA. 7815 * 7816 * Called with PHY locked. 7817 */ 7818 static void 7819 pmcs_reap_dead_phy(pmcs_phy_t *phyp) 7820 { 7821 pmcs_hw_t *pwp = phyp->pwp; 7822 pmcs_phy_t *ctmp; 7823 pmcs_iport_t *iport_cmp; 7824 7825 ASSERT(mutex_owned(&phyp->phy_lock)); 7826 7827 /* 7828 * Check the dead PHYs list 7829 */ 7830 mutex_enter(&pwp->dead_phylist_lock); 7831 ctmp = pwp->dead_phys; 7832 while (ctmp) { 7833 /* 7834 * If the iport is NULL, compare against last_iport. 7835 */ 7836 if (ctmp->iport) { 7837 iport_cmp = ctmp->iport; 7838 } else { 7839 iport_cmp = ctmp->last_iport; 7840 } 7841 7842 if ((iport_cmp != phyp->iport) || 7843 (memcmp((void *)&ctmp->sas_address[0], 7844 (void *)&phyp->sas_address[0], 8))) { 7845 ctmp = ctmp->dead_next; 7846 continue; 7847 } 7848 7849 /* 7850 * Same SAS address on same iport. Now check to see if 7851 * the PHY path is the same with the possible exception 7852 * of the root PHY number. 7853 * The "5" is the string length of "pp00." 7854 */ 7855 if ((strnlen(phyp->path, 5) >= 5) && 7856 (strnlen(ctmp->path, 5) >= 5)) { 7857 if (memcmp((void *)&phyp->path[5], 7858 (void *)&ctmp->path[5], 7859 strnlen(phyp->path, 32) - 5) == 0) { 7860 break; 7861 } 7862 } 7863 7864 ctmp = ctmp->dead_next; 7865 } 7866 mutex_exit(&pwp->dead_phylist_lock); 7867 7868 /* 7869 * Found a match. Remove the target linkage and drop the 7870 * ref count on the old PHY. Then, increment the ref count 7871 * on the new PHY to compensate. 7872 */ 7873 if (ctmp) { 7874 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 7875 "%s: Found match in dead PHY list (0x%p) for new PHY %s", 7876 __func__, (void *)ctmp, phyp->path); 7877 /* 7878 * If there is a pointer to the target in the dead PHY, move 7879 * all reference counts to the new PHY. 7880 */ 7881 if (ctmp->target) { 7882 mutex_enter(&ctmp->target->statlock); 7883 phyp->target = ctmp->target; 7884 7885 while (ctmp->ref_count != 0) { 7886 pmcs_inc_phy_ref_count(phyp); 7887 pmcs_dec_phy_ref_count(ctmp); 7888 } 7889 /* 7890 * Update the target's linkage as well 7891 */ 7892 phyp->target->phy = phyp; 7893 phyp->target->dtype = phyp->dtype; 7894 ctmp->target = NULL; 7895 mutex_exit(&phyp->target->statlock); 7896 } 7897 } 7898 } 7899 7900 /* 7901 * Called with iport lock held 7902 */ 7903 void 7904 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7905 { 7906 ASSERT(mutex_owned(&iport->lock)); 7907 ASSERT(phyp); 7908 ASSERT(!list_link_active(&phyp->list_node)); 7909 7910 iport->nphy++; 7911 list_insert_tail(&iport->phys, phyp); 7912 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7913 &iport->nphy); 7914 mutex_enter(&phyp->phy_lock); 7915 pmcs_create_one_phy_stats(iport, phyp); 7916 mutex_exit(&phyp->phy_lock); 7917 mutex_enter(&iport->refcnt_lock); 7918 iport->refcnt++; 7919 mutex_exit(&iport->refcnt_lock); 7920 } 7921 7922 /* 7923 * Called with the iport lock held 7924 */ 7925 void 7926 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7927 { 7928 pmcs_phy_t *pptr, *next_pptr; 7929 7930 ASSERT(mutex_owned(&iport->lock)); 7931 7932 /* 7933 * If phyp is NULL, remove all PHYs from the iport 7934 */ 7935 if (phyp == NULL) { 7936 for (pptr = list_head(&iport->phys); pptr != NULL; 7937 pptr = next_pptr) { 7938 next_pptr = list_next(&iport->phys, pptr); 7939 mutex_enter(&pptr->phy_lock); 7940 if (pptr->phy_stats != NULL) { 7941 kstat_delete(pptr->phy_stats); 7942 pptr->phy_stats = NULL; 7943 } 7944 pptr->iport = NULL; 7945 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 7946 pptr->tgt_port_pm_tmp, B_FALSE); 7947 mutex_exit(&pptr->phy_lock); 7948 pmcs_rele_iport(iport); 7949 list_remove(&iport->phys, pptr); 7950 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, 7951 PMCS_NUM_PHYS, &iport->nphy); 7952 } 7953 iport->nphy = 0; 7954 return; 7955 } 7956 7957 ASSERT(phyp); 7958 ASSERT(iport->nphy > 0); 7959 ASSERT(list_link_active(&phyp->list_node)); 7960 iport->nphy--; 7961 list_remove(&iport->phys, phyp); 7962 pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp, 7963 phyp->tgt_port_pm_tmp, B_FALSE); 7964 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7965 &iport->nphy); 7966 pmcs_rele_iport(iport); 7967 } 7968 7969 /* 7970 * This function checks to see if the target pointed to by phyp is still 7971 * correct. This is done by comparing the target's unit address with the 7972 * SAS address in phyp. 7973 * 7974 * Called with PHY locked and target statlock held 7975 */ 7976 static boolean_t 7977 pmcs_phy_target_match(pmcs_phy_t *phyp) 7978 { 7979 uint64_t wwn; 7980 char unit_address[PMCS_MAX_UA_SIZE]; 7981 boolean_t rval = B_FALSE; 7982 7983 ASSERT(phyp); 7984 ASSERT(phyp->target); 7985 ASSERT(mutex_owned(&phyp->phy_lock)); 7986 ASSERT(mutex_owned(&phyp->target->statlock)); 7987 7988 wwn = pmcs_barray2wwn(phyp->sas_address); 7989 (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address); 7990 7991 if (memcmp((void *)unit_address, (void *)phyp->target->unit_address, 7992 strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) { 7993 rval = B_TRUE; 7994 } 7995 7996 return (rval); 7997 } 7998 /* 7999 * Commands used to serialize SMP requests. 8000 * 8001 * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd 8002 * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an 8003 * SMP target that already has a SMP cmd pending and one queued, then the 8004 * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response. 8005 * 8006 * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP 8007 * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The 8008 * SPC default is 1 SMP command/port (iport). These 2 queued SMP cmds would 8009 * have to be for different SMP targets. The INTERNAL_SMP_RESOURCE error will 8010 * also be returned if a 2nd SMP cmd is sent to the controller when there is 8011 * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the 8012 * queue if there are already 2 queued SMP cmds. 8013 */ 8014 void 8015 pmcs_smp_acquire(pmcs_iport_t *iport) 8016 { 8017 if (iport == NULL) { 8018 return; 8019 } 8020 8021 mutex_enter(&iport->smp_lock); 8022 while (iport->smp_active) { 8023 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8024 "%s: SMP is active on thread 0x%p, waiting", __func__, 8025 (void *)iport->smp_active_thread); 8026 cv_wait(&iport->smp_cv, &iport->smp_lock); 8027 } 8028 iport->smp_active = B_TRUE; 8029 iport->smp_active_thread = curthread; 8030 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8031 "%s: SMP acquired by thread 0x%p", __func__, 8032 (void *)iport->smp_active_thread); 8033 mutex_exit(&iport->smp_lock); 8034 } 8035 8036 void 8037 pmcs_smp_release(pmcs_iport_t *iport) 8038 { 8039 if (iport == NULL) { 8040 return; 8041 } 8042 8043 mutex_enter(&iport->smp_lock); 8044 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8045 "%s: SMP released by thread 0x%p", __func__, (void *)curthread); 8046 iport->smp_active = B_FALSE; 8047 iport->smp_active_thread = NULL; 8048 cv_signal(&iport->smp_cv); 8049 mutex_exit(&iport->smp_lock); 8050 } 8051 8052 /* 8053 * Update a PHY's attached-port-pm and target-port-pm properties 8054 * 8055 * phyp: PHY whose properties are to be updated 8056 * 8057 * att_bv: Bit value of the attached-port-pm property to be updated in the 8058 * 64-bit holding area for the PHY. 8059 * 8060 * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit 8061 * holding area for the PHY. 8062 * 8063 * prop_add_val: If TRUE, we're adding bits into the property value. 8064 * Otherwise, we're taking them out. Either way, the properties for this 8065 * PHY will be updated. 8066 */ 8067 void 8068 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv, 8069 boolean_t prop_add_val) 8070 { 8071 if (prop_add_val) { 8072 /* 8073 * If the values are currently 0, then we're setting the 8074 * phymask for just this PHY as well. 8075 */ 8076 if (phyp->att_port_pm_tmp == 0) { 8077 phyp->att_port_pm = att_bv; 8078 phyp->tgt_port_pm = tgt_bv; 8079 } 8080 phyp->att_port_pm_tmp |= att_bv; 8081 phyp->tgt_port_pm_tmp |= tgt_bv; 8082 (void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN, 8083 "%"PRIx64, phyp->att_port_pm_tmp); 8084 (void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN, 8085 "%"PRIx64, phyp->tgt_port_pm_tmp); 8086 } else { 8087 phyp->att_port_pm_tmp &= ~att_bv; 8088 phyp->tgt_port_pm_tmp &= ~tgt_bv; 8089 if (phyp->att_port_pm_tmp) { 8090 (void) snprintf(phyp->att_port_pm_str, 8091 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8092 phyp->att_port_pm_tmp); 8093 } else { 8094 phyp->att_port_pm_str[0] = '\0'; 8095 phyp->att_port_pm = 0; 8096 } 8097 if (phyp->tgt_port_pm_tmp) { 8098 (void) snprintf(phyp->tgt_port_pm_str, 8099 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8100 phyp->tgt_port_pm_tmp); 8101 } else { 8102 phyp->tgt_port_pm_str[0] = '\0'; 8103 phyp->tgt_port_pm = 0; 8104 } 8105 } 8106 8107 if (phyp->target == NULL) { 8108 return; 8109 } 8110 8111 mutex_enter(&phyp->target->statlock); 8112 if (!list_is_empty(&phyp->target->lun_list)) { 8113 pmcs_lun_t *lunp; 8114 8115 lunp = list_head(&phyp->target->lun_list); 8116 while (lunp) { 8117 (void) scsi_device_prop_update_string(lunp->sd, 8118 SCSI_DEVICE_PROP_PATH, 8119 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8120 phyp->att_port_pm_str); 8121 (void) scsi_device_prop_update_string(lunp->sd, 8122 SCSI_DEVICE_PROP_PATH, 8123 SCSI_ADDR_PROP_TARGET_PORT_PM, 8124 phyp->tgt_port_pm_str); 8125 lunp = list_next(&phyp->target->lun_list, lunp); 8126 } 8127 } else if (phyp->target->smpd) { 8128 (void) smp_device_prop_update_string(phyp->target->smpd, 8129 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8130 phyp->att_port_pm_str); 8131 (void) smp_device_prop_update_string(phyp->target->smpd, 8132 SCSI_ADDR_PROP_TARGET_PORT_PM, 8133 phyp->tgt_port_pm_str); 8134 } 8135 mutex_exit(&phyp->target->statlock); 8136 } 8137 8138 /* ARGSUSED */ 8139 void 8140 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8141 { 8142 pmcs_phy_t *pptr; 8143 8144 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 8145 pmcs_lock_phy(pptr); 8146 if (pptr->deregister_wait) { 8147 pmcs_deregister_device(pwp, pptr); 8148 } 8149 pmcs_unlock_phy(pptr); 8150 } 8151 } 8152 8153 /* 8154 * pmcs_iport_active 8155 * 8156 * Mark this iport as active. Called with the iport lock held. 8157 */ 8158 static void 8159 pmcs_iport_active(pmcs_iport_t *iport) 8160 { 8161 ASSERT(mutex_owned(&iport->lock)); 8162 8163 iport->ua_state = UA_ACTIVE; 8164 iport->smp_active = B_FALSE; 8165 iport->smp_active_thread = NULL; 8166 } 8167 8168 /* ARGSUSED */ 8169 static void 8170 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr, 8171 scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp) 8172 { 8173 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8174 pmcs_hw_t *pwp = iport->pwp; 8175 pmcs_xscsi_t *target; 8176 8177 /* 8178 * Look up the target. If there is one, and it doesn't have a PHY 8179 * pointer, re-establish that linkage here. 8180 */ 8181 mutex_enter(&pwp->lock); 8182 target = pmcs_get_target(iport, tgt_addr, B_FALSE); 8183 mutex_exit(&pwp->lock); 8184 8185 /* 8186 * If we got a target, it will now have a PHY pointer and the PHY 8187 * will point to the target. The PHY will be locked, so we'll need 8188 * to unlock it. 8189 */ 8190 if (target) { 8191 pmcs_unlock_phy(target->phy); 8192 } 8193 8194 /* 8195 * Update config_restart_time so we don't try to restart discovery 8196 * while enumeration is still in progress. 8197 */ 8198 mutex_enter(&pwp->config_lock); 8199 pwp->config_restart_time = ddi_get_lbolt() + 8200 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8201 mutex_exit(&pwp->config_lock); 8202 } 8203 8204 /* ARGSUSED */ 8205 static boolean_t 8206 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr, 8207 scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv, 8208 scsi_tgtmap_deact_rsn_t tgt_deact_rsn) 8209 { 8210 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8211 pmcs_phy_t *phyp; 8212 boolean_t rediscover = B_FALSE; 8213 8214 ASSERT(iport); 8215 8216 phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr); 8217 if (phyp == NULL) { 8218 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8219 "%s: Couldn't find PHY at %s", __func__, tgt_addr); 8220 return (rediscover); 8221 } 8222 /* phyp is locked */ 8223 8224 if (!phyp->reenumerate && phyp->configured) { 8225 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target, 8226 "%s: PHY @ %s is configured... re-enumerate", __func__, 8227 tgt_addr); 8228 phyp->reenumerate = 1; 8229 } 8230 8231 /* 8232 * Check to see if reenumerate is set, and if so, if we've reached our 8233 * maximum number of retries. 8234 */ 8235 if (phyp->reenumerate) { 8236 if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) { 8237 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8238 phyp->target, 8239 "%s: No more enumeration attempts for %s", __func__, 8240 tgt_addr); 8241 } else { 8242 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8243 phyp->target, "%s: Re-attempt enumeration for %s", 8244 __func__, tgt_addr); 8245 ++phyp->enum_attempts; 8246 rediscover = B_TRUE; 8247 } 8248 8249 phyp->reenumerate = 0; 8250 } 8251 8252 pmcs_unlock_phy(phyp); 8253 8254 mutex_enter(&iport->pwp->config_lock); 8255 iport->pwp->config_restart_time = ddi_get_lbolt() + 8256 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8257 if (rediscover) { 8258 iport->pwp->config_restart = B_TRUE; 8259 } else if (iport->pwp->config_restart == B_TRUE) { 8260 /* 8261 * If we aren't asking for rediscovery because of this PHY, 8262 * check to see if we're already asking for it on behalf of 8263 * some other PHY. If so, we'll want to return TRUE, so reset 8264 * "rediscover" here. 8265 */ 8266 rediscover = B_TRUE; 8267 } 8268 8269 mutex_exit(&iport->pwp->config_lock); 8270 8271 return (rediscover); 8272 } 8273 8274 void 8275 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status) 8276 { 8277 ASSERT(phyp); 8278 ASSERT(!mutex_owned(&phyp->phy_lock)); 8279 8280 if (phyp == NULL) { 8281 return; 8282 } 8283 8284 pmcs_lock_phy(phyp); 8285 8286 /* 8287 * XXX: Do we need to call this function from an SSP_EVENT? 8288 */ 8289 8290 switch (status) { 8291 case PMCOUT_STATUS_NO_DEVICE: 8292 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 8293 case PMCOUT_STATUS_XFER_ERR_BREAK: 8294 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 8295 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 8296 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 8297 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 8298 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 8299 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 8300 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 8301 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 8302 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 8303 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 8304 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 8305 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 8306 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 8307 case PMCOUT_STATUS_IO_PORT_IN_RESET: 8308 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 8309 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 8310 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 8311 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8312 "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate", 8313 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8314 phyp->reenumerate = 1; 8315 break; 8316 8317 default: 8318 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8319 "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration", 8320 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8321 break; 8322 } 8323 8324 pmcs_unlock_phy(phyp); 8325 } 8326 8327 /* 8328 * Add the list of PHYs pointed to by phyp to the dead_phys_list 8329 * 8330 * Called with all PHYs in the list locked 8331 */ 8332 static void 8333 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8334 { 8335 mutex_enter(&pwp->dead_phylist_lock); 8336 while (phyp) { 8337 pmcs_phy_t *nxt = phyp->sibling; 8338 ASSERT(phyp->dead); 8339 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 8340 "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__, 8341 (void *)phyp, phyp->path, phyp->ref_count); 8342 /* 8343 * Put this PHY on the dead PHY list for the watchdog to 8344 * clean up after any outstanding work has completed. 8345 */ 8346 phyp->dead_next = pwp->dead_phys; 8347 pwp->dead_phys = phyp; 8348 pmcs_unlock_phy(phyp); 8349 phyp = nxt; 8350 } 8351 mutex_exit(&pwp->dead_phylist_lock); 8352 } 8353 8354 static void 8355 pmcs_get_fw_version(pmcs_hw_t *pwp) 8356 { 8357 uint32_t ila_len, ver_hi, ver_lo; 8358 uint8_t ila_ver_string[9], img_flag; 8359 char uc, *ucp = &uc; 8360 unsigned long ila_ver; 8361 uint64_t ver_hilo; 8362 8363 /* Firmware version is easy. */ 8364 pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW); 8365 8366 /* 8367 * Get the image size (2nd to last dword) 8368 * NOTE: The GSM registers are mapped little-endian, but the data 8369 * on the flash is actually big-endian, so we need to swap these values 8370 * regardless of which platform we're on. 8371 */ 8372 ila_len = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8373 GSM_FLASH_BASE + GSM_SM_BLKSZ - (2 << 2))); 8374 if (ila_len > 65535) { 8375 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 8376 "%s: Invalid ILA image size (0x%x)?", __func__, ila_len); 8377 return; 8378 } 8379 8380 /* 8381 * The numeric version is at ila_len - PMCS_ILA_VER_OFFSET 8382 */ 8383 ver_hi = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8384 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET)); 8385 ver_lo = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8386 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET + 4)); 8387 ver_hilo = BE_64(((uint64_t)ver_hi << 32) | ver_lo); 8388 bcopy((const void *)&ver_hilo, &ila_ver_string[0], 8); 8389 ila_ver_string[8] = '\0'; 8390 8391 (void) ddi_strtoul((const char *)ila_ver_string, &ucp, 16, &ila_ver); 8392 pwp->ila_ver = (int)(ila_ver & 0xffffffff); 8393 8394 img_flag = (BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8395 GSM_FLASH_IMG_FLAGS)) & 0xff000000) >> 24; 8396 if (img_flag & PMCS_IMG_FLAG_A) { 8397 pwp->fw_active_img = 1; 8398 } else { 8399 pwp->fw_active_img = 0; 8400 } 8401 } 8402