1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 * 49 * 50 * 51 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 52 */ 53 54 #include "sili.h" 55 56 void sili_port_interrupt_enable(struct sili_port *ap); 57 void sili_port_interrupt_redisable(struct sili_port *ap); 58 void sili_port_interrupt_reenable(struct sili_port *ap); 59 60 int sili_load_prb(struct sili_ccb *); 61 void sili_unload_prb(struct sili_ccb *); 62 static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs, 63 int nsegs, int error); 64 void sili_start(struct sili_ccb *); 65 int sili_port_softreset(struct sili_port *ap); 66 int sili_port_hardreset(struct sili_port *ap); 67 void sili_port_hardstop(struct sili_port *ap); 68 void sili_port_listen(struct sili_port *ap); 69 70 static void sili_ata_cmd_timeout_unserialized(void *); 71 static int sili_core_timeout(struct sili_ccb *ccb, int really_error); 72 void sili_check_active_timeouts(struct sili_port *ap); 73 74 #if 0 75 void sili_beg_exclusive_access(struct sili_port *ap, struct ata_port *at); 76 void sili_end_exclusive_access(struct sili_port *ap, struct ata_port *at); 77 #endif 78 void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb); 79 80 void sili_port_read_ncq_error(struct sili_port *, int); 81 82 struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag); 83 void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *); 84 static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error); 85 86 static void sili_dummy_done(struct ata_xfer *xa); 87 static void sili_empty_done(struct sili_ccb *ccb); 88 static void sili_ata_cmd_done(struct sili_ccb *ccb); 89 90 /* 91 * Initialize the global SILI hardware. This code does not set up any of 92 * its ports. 93 */ 94 int 95 sili_init(struct sili_softc *sc) 96 { 97 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b", 98 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC); 99 100 /* 101 * Reset the entire chip. This also resets all ports. 102 * 103 * The spec doesn't say anything about how long we have to 104 * wait, so wait 10ms. 105 */ 106 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET); 107 sili_os_sleep(10); 108 sili_write(sc, SILI_REG_GCTL, 0); 109 sili_os_sleep(10); 110 111 return (0); 112 } 113 114 /* 115 * Allocate and initialize an SILI port. 116 */ 117 int 118 sili_port_alloc(struct sili_softc *sc, u_int port) 119 { 120 struct sili_port *ap; 121 struct ata_port *at; 122 struct sili_prb *prb; 123 struct sili_ccb *ccb; 124 int rc = ENOMEM; 125 int error; 126 int i; 127 128 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 129 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO); 130 131 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d", 132 device_get_name(sc->sc_dev), 133 device_get_unit(sc->sc_dev), 134 port); 135 sc->sc_ports[port] = ap; 136 137 /* 138 * Allocate enough so we never have to reallocate, it makes 139 * it easier. 140 * 141 * ap_pmcount will be reduced by the scan if we encounter the 142 * port multiplier port prior to target 15. 143 */ 144 if (ap->ap_ata == NULL) { 145 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS, 146 M_DEVBUF, M_INTWAIT | M_ZERO); 147 for (i = 0; i < SILI_MAX_PMPORTS; ++i) { 148 at = &ap->ap_ata[i]; 149 at->at_sili_port = ap; 150 at->at_target = i; 151 at->at_probe = ATA_PROBE_NEED_INIT; 152 at->at_features |= ATA_PORT_F_RESCAN; 153 ksnprintf(at->at_name, sizeof(at->at_name), 154 "%s.%d", ap->ap_name, i); 155 } 156 } 157 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh, 158 SILI_PORT_REGION(port), SILI_PORT_SIZE, 159 &ap->ap_ioh) != 0) { 160 device_printf(sc->sc_dev, 161 "unable to create register window for port %d\n", 162 port); 163 goto freeport; 164 } 165 166 ap->ap_sc = sc; 167 ap->ap_num = port; 168 ap->ap_probe = ATA_PROBE_NEED_INIT; 169 TAILQ_INIT(&ap->ap_ccb_free); 170 TAILQ_INIT(&ap->ap_ccb_pending); 171 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0); 172 173 /* Disable port interrupts */ 174 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 175 176 /* 177 * Reset the port. This is similar to a Device Reset but far 178 * more invasive. We use Device Reset in our hardreset function. 179 * This function also does the same OOB initialization sequence 180 * that Device Reset does. 181 * 182 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until 183 * a device is connected to the port, so we can't use it to 184 * verify that the port exists. 185 */ 186 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 187 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 188 device_printf(sc->sc_dev, 189 "Port %d will not go into reset\n", port); 190 goto freeport; 191 } 192 sili_os_sleep(10); 193 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 194 195 /* 196 * Allocate the SGE Table 197 */ 198 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs); 199 if (ap->ap_dmamem_prbs == NULL) { 200 kprintf("%s: NOSGET\n", PORTNAME(ap)); 201 goto freeport; 202 } 203 204 /* 205 * Set up the SGE table base address 206 */ 207 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs); 208 209 /* 210 * Allocate a CCB for each command slot 211 */ 212 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF, 213 M_WAITOK | M_ZERO); 214 if (ap->ap_ccbs == NULL) { 215 device_printf(sc->sc_dev, 216 "unable to allocate command list for port %d\n", 217 port); 218 goto freeport; 219 } 220 221 /* 222 * Most structures are in the port BAR. Assign convenient 223 * pointers in the CCBs 224 */ 225 for (i = 0; i < sc->sc_ncmds; i++) { 226 ccb = &ap->ap_ccbs[i]; 227 228 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW, 229 &ccb->ccb_dmamap); 230 if (error) { 231 device_printf(sc->sc_dev, 232 "unable to create dmamap for port %d " 233 "ccb %d\n", port, i); 234 goto freeport; 235 } 236 237 /* 238 * WARNING!!! Access to the rfis is only allowed under very 239 * carefully controlled circumstances because it 240 * is located in the LRAM and reading from the 241 * LRAM has hardware issues which can blow the 242 * port up. I kid you not (from Linux, and 243 * verified by testing here). 244 */ 245 callout_init(&ccb->ccb_timeout); 246 ccb->ccb_slot = i; 247 ccb->ccb_port = ap; 248 ccb->ccb_prb = &ap->ap_prbs[i]; 249 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) + 250 sizeof(*ccb->ccb_prb) * i; 251 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d; 252 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh, 253 SILI_PREG_LRAM_SLOT(i)); 254 ccb->ccb_prb_lram = prb; 255 /* 256 * Point our rfis to host-memory instead of the LRAM PRB. 257 * It will be copied back if ATA_F_AUTOSENSE is set. The 258 * LRAM PRB is buggy. 259 */ 260 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/ 261 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis; 262 263 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb); 264 ccb->ccb_xa.tag = i; 265 266 ccb->ccb_xa.state = ATA_S_COMPLETE; 267 268 /* 269 * Reserve CCB[1] as the error CCB. It doesn't matter 270 * which one we use for the Sili controllers. 271 */ 272 if (i == 1) 273 ap->ap_err_ccb = ccb; 274 else 275 sili_put_ccb(ccb); 276 } 277 /* 278 * Do not call sili_port_init() here, the helper thread will 279 * call it for the parallel probe 280 */ 281 sili_os_start_port(ap); 282 return(0); 283 freeport: 284 sili_port_free(sc, port); 285 return (rc); 286 } 287 288 /* 289 * This is called once by the low level attach (from the helper thread) 290 * to get the port state machine rolling, and typically only called again 291 * on a hot-plug insertion event. 292 * 293 * This is called for PM attachments and hot-plug insertion events, and 294 * typically not called again until after an unplug/replug sequence. 295 * 296 * Returns 0 if a device is successfully detected. 297 */ 298 int 299 sili_port_init(struct sili_port *ap) 300 { 301 /* 302 * Do a very hard reset of the port 303 */ 304 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 305 sili_os_sleep(10); 306 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 307 308 /* 309 * Register initialization 310 */ 311 sili_pwrite(ap, SILI_PREG_FIFO_CTL, 312 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024)); 313 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA | 314 SILI_PREG_CTL_PMA); 315 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC); 316 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 317 sili_pwrite(ap, SILI_PREG_SNTF, -1); 318 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET; 319 ap->ap_pmcount = 0; 320 sili_port_interrupt_enable(ap); 321 return (0); 322 } 323 324 /* 325 * Handle an errored port. This routine is called when the only 326 * commands left on the queue are expired, meaning we can safely 327 * go through a port init to clear its state. 328 * 329 * We complete the expired CCBs and then restart the queue. 330 */ 331 static 332 void 333 sili_port_reinit(struct sili_port *ap) 334 { 335 struct sili_ccb *ccb; 336 struct ata_port *at; 337 int slot; 338 int target; 339 u_int32_t data; 340 int reentrant; 341 342 reentrant = (ap->ap_flags & AP_F_ERR_CCB_RESERVED) ? 1 : 0; 343 344 if (bootverbose || 1) { 345 kprintf("%s: reiniting port after error reent=%d " 346 "expired=%08x\n", 347 PORTNAME(ap), reentrant, ap->ap_expired); 348 } 349 350 /* 351 * Clear port resume, clear bits 16:13 in the port device status 352 * register. This is from the data sheet. 353 * 354 * Data sheet does not specify a delay but it seems prudent. 355 */ 356 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 357 sili_os_sleep(10); 358 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 359 data = sili_pread(ap, SILI_PREG_PM_STATUS(target)); 360 data &= ~(SILI_PREG_PM_STATUS_SERVICE | 361 SILI_PREG_PM_STATUS_LEGACY | 362 SILI_PREG_PM_STATUS_NATIVE | 363 SILI_PREG_PM_STATUS_VBSY); 364 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data); 365 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0); 366 } 367 368 /* 369 * Issue a Port Initialize and wait for it to clear. This flushes 370 * commands but does not reset the port. Then wait for port ready. 371 */ 372 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT); 373 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) { 374 kprintf("%s: Unable to reinit, port failed\n", 375 PORTNAME(ap)); 376 } 377 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) { 378 kprintf("%s: Unable to reinit, port will not come ready\n", 379 PORTNAME(ap)); 380 } 381 382 /* 383 * If reentrant, stop here. Otherwise the state for the original 384 * ahci_port_reinit() will get ripped out from under it. 385 */ 386 if (reentrant) 387 return; 388 389 /* 390 * Read the LOG ERROR page for targets that returned a specific 391 * D2H FIS with ERR set. 392 */ 393 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 394 at = &ap->ap_ata[target]; 395 if (at->at_features & ATA_PORT_F_READLOG) { 396 at->at_features &= ~ATA_PORT_F_READLOG; 397 sili_port_read_ncq_error(ap, target); 398 } 399 } 400 401 /* 402 * Finally clean out the expired commands, we've probed the error 403 * status (or hopefully probed the error status). Well, ok, 404 * we probably didn't XXX. 405 */ 406 while (ap->ap_expired) { 407 slot = ffs(ap->ap_expired) - 1; 408 ap->ap_expired &= ~(1 << slot); 409 KKASSERT(ap->ap_active & (1 << slot)); 410 ap->ap_active &= ~(1 << slot); 411 --ap->ap_active_cnt; 412 ccb = &ap->ap_ccbs[slot]; 413 ccb->ccb_xa.state = ATA_S_TIMEOUT; 414 ccb->ccb_done(ccb); 415 ccb->ccb_xa.complete(&ccb->ccb_xa); 416 } 417 418 /* 419 * Wow. All done. We can get the port moving again. 420 */ 421 if (ap->ap_probe == ATA_PROBE_FAILED) { 422 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap)); 423 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 424 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 425 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 426 ccb->ccb_xa.state = ATA_S_TIMEOUT; 427 ccb->ccb_done(ccb); 428 ccb->ccb_xa.complete(&ccb->ccb_xa); 429 } 430 } else { 431 sili_issue_pending_commands(ap, NULL); 432 } 433 } 434 435 /* 436 * Enable or re-enable interrupts on a port. 437 * 438 * This routine is called from the port initialization code or from the 439 * helper thread as the real interrupt may be forced to turn off certain 440 * interrupt sources. 441 */ 442 void 443 sili_port_interrupt_enable(struct sili_port *ap) 444 { 445 u_int32_t data; 446 447 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR | 448 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG | 449 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC | 450 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE; 451 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 452 data |= SILI_PREG_INT_SDB; 453 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data); 454 } 455 456 void 457 sili_port_interrupt_redisable(struct sili_port *ap) 458 { 459 u_int32_t data; 460 461 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 462 data &= SILI_REG_GINT_PORTMASK; 463 data &= ~(1 << ap->ap_num); 464 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 465 } 466 467 void 468 sili_port_interrupt_reenable(struct sili_port *ap) 469 { 470 u_int32_t data; 471 472 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 473 data &= SILI_REG_GINT_PORTMASK; 474 data |= (1 << ap->ap_num); 475 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 476 } 477 478 /* 479 * Run the port / target state machine from a main context. 480 * 481 * The state machine for the port is always run. 482 * 483 * If atx is non-NULL run the state machine for a particular target. 484 * If atx is NULL run the state machine for all targets. 485 */ 486 void 487 sili_port_state_machine(struct sili_port *ap, int initial) 488 { 489 struct ata_port *at; 490 u_int32_t data; 491 int target; 492 int didsleep; 493 int loop; 494 495 /* 496 * State machine for port. Note that CAM is not yet associated 497 * during the initial parallel probe and the port's probe state 498 * will not get past ATA_PROBE_NEED_IDENT. 499 */ 500 { 501 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) { 502 kprintf("%s: Waiting 7 seconds on insertion\n", 503 PORTNAME(ap)); 504 sili_os_sleep(7000); 505 initial = 1; 506 } 507 if (ap->ap_probe == ATA_PROBE_NEED_INIT) 508 sili_port_init(ap); 509 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET) 510 sili_port_reset(ap, NULL, 1); 511 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET) 512 sili_port_reset(ap, NULL, 0); 513 if (ap->ap_probe == ATA_PROBE_NEED_IDENT) 514 sili_cam_probe(ap, NULL); 515 } 516 if (ap->ap_type != ATA_PORT_T_PM) { 517 if (ap->ap_probe == ATA_PROBE_FAILED) { 518 sili_cam_changed(ap, NULL, 0); 519 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) { 520 sili_cam_changed(ap, NULL, 1); 521 } 522 return; 523 } 524 525 /* 526 * Port Multiplier state machine. 527 * 528 * Get a mask of changed targets and combine with any runnable 529 * states already present. 530 */ 531 for (loop = 0; ;++loop) { 532 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) { 533 kprintf("%s: PM unable to read hot-plug bitmap\n", 534 PORTNAME(ap)); 535 break; 536 } 537 538 /* 539 * Do at least one loop, then stop if no more state changes 540 * have occured. The PM might not generate a new 541 * notification until we clear the entire bitmap. 542 */ 543 if (loop && data == 0) 544 break; 545 546 /* 547 * New devices showing up in the bitmap require some spin-up 548 * time before we start probing them. Reset didsleep. The 549 * first new device we detect will sleep before probing. 550 * 551 * This only applies to devices whos change bit is set in 552 * the data, and does not apply to the initial boot-time 553 * probe. 554 */ 555 didsleep = 0; 556 557 for (target = 0; target < ap->ap_pmcount; ++target) { 558 at = &ap->ap_ata[target]; 559 560 /* 561 * Check the target state for targets behind the PM 562 * which have changed state. This will adjust 563 * at_probe and set ATA_PORT_F_RESCAN 564 * 565 * We want to wait at least 10 seconds before probing 566 * a newly inserted device. If the check status 567 * indicates a device is present and in need of a 568 * hard reset, we make sure we have slept before 569 * continuing. 570 * 571 * We also need to wait at least 1 second for the 572 * PHY state to change after insertion, if we 573 * haven't already waited the 10 seconds. 574 * 575 * NOTE: When pm_check_good finds a good port it 576 * typically starts us in probe state 577 * NEED_HARD_RESET rather than INIT. 578 */ 579 if (data & (1 << target)) { 580 if (initial == 0 && didsleep == 0) 581 sili_os_sleep(1000); 582 sili_pm_check_good(ap, target); 583 if (initial == 0 && didsleep == 0 && 584 at->at_probe <= ATA_PROBE_NEED_HARD_RESET 585 ) { 586 didsleep = 1; 587 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap)); 588 sili_os_sleep(10000); 589 } 590 } 591 592 /* 593 * Report hot-plug events before the probe state 594 * really gets hot. Only actual events are reported 595 * here to reduce spew. 596 */ 597 if (data & (1 << target)) { 598 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at)); 599 switch(at->at_probe) { 600 case ATA_PROBE_NEED_INIT: 601 case ATA_PROBE_NEED_HARD_RESET: 602 kprintf("Device inserted\n"); 603 break; 604 case ATA_PROBE_FAILED: 605 kprintf("Device removed\n"); 606 break; 607 default: 608 kprintf("Device probe in progress\n"); 609 break; 610 } 611 } 612 613 /* 614 * Run through the state machine as necessary if 615 * the port is not marked failed. 616 * 617 * The state machine may stop at NEED_IDENT if 618 * CAM is not yet attached. 619 * 620 * Acquire exclusive access to the port while we 621 * are doing this. This prevents command-completion 622 * from queueing commands for non-polled targets 623 * inbetween our probe steps. We need to do this 624 * because the reset probes can generate severe PHY 625 * and protocol errors and soft-brick the port. 626 */ 627 if (at->at_probe != ATA_PROBE_FAILED && 628 at->at_probe != ATA_PROBE_GOOD) { 629 if (at->at_probe == ATA_PROBE_NEED_INIT) 630 sili_pm_port_init(ap, at); 631 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET) 632 sili_port_reset(ap, at, 1); 633 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET) 634 sili_port_reset(ap, at, 0); 635 if (at->at_probe == ATA_PROBE_NEED_IDENT) 636 sili_cam_probe(ap, at); 637 } 638 639 /* 640 * Add or remove from CAM 641 */ 642 if (at->at_features & ATA_PORT_F_RESCAN) { 643 at->at_features &= ~ATA_PORT_F_RESCAN; 644 if (at->at_probe == ATA_PROBE_FAILED) { 645 sili_cam_changed(ap, at, 0); 646 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) { 647 sili_cam_changed(ap, at, 1); 648 } 649 } 650 data &= ~(1 << target); 651 } 652 if (data) { 653 kprintf("%s: WARNING (PM): extra bits set in " 654 "EINFO: %08x\n", PORTNAME(ap), data); 655 while (target < SILI_MAX_PMPORTS) { 656 sili_pm_check_good(ap, target); 657 ++target; 658 } 659 } 660 } 661 } 662 663 /* 664 * De-initialize and detach a port. 665 */ 666 void 667 sili_port_free(struct sili_softc *sc, u_int port) 668 { 669 struct sili_port *ap = sc->sc_ports[port]; 670 struct sili_ccb *ccb; 671 672 /* 673 * Ensure port is disabled and its interrupts are all flushed. 674 */ 675 if (ap->ap_sc) { 676 sili_os_stop_port(ap); 677 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 678 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 679 sili_write(ap->ap_sc, SILI_REG_GCTL, 680 sili_read(ap->ap_sc, SILI_REG_GCTL) & 681 ~SILI_REG_GINT_PORTST(ap->ap_num)); 682 } 683 684 if (ap->ap_ccbs) { 685 while ((ccb = sili_get_ccb(ap)) != NULL) { 686 if (ccb->ccb_dmamap) { 687 bus_dmamap_destroy(sc->sc_tag_data, 688 ccb->ccb_dmamap); 689 ccb->ccb_dmamap = NULL; 690 } 691 } 692 if ((ccb = ap->ap_err_ccb) != NULL) { 693 if (ccb->ccb_dmamap) { 694 bus_dmamap_destroy(sc->sc_tag_data, 695 ccb->ccb_dmamap); 696 ccb->ccb_dmamap = NULL; 697 } 698 ap->ap_err_ccb = NULL; 699 } 700 kfree(ap->ap_ccbs, M_DEVBUF); 701 ap->ap_ccbs = NULL; 702 } 703 704 if (ap->ap_dmamem_prbs) { 705 sili_dmamem_free(sc, ap->ap_dmamem_prbs); 706 ap->ap_dmamem_prbs = NULL; 707 } 708 if (ap->ap_ata) { 709 kfree(ap->ap_ata, M_DEVBUF); 710 ap->ap_ata = NULL; 711 } 712 if (ap->ap_err_scratch) { 713 kfree(ap->ap_err_scratch, M_DEVBUF); 714 ap->ap_err_scratch = NULL; 715 } 716 717 /* bus_space(9) says we dont free the subregions handle */ 718 719 kfree(ap, M_DEVBUF); 720 sc->sc_ports[port] = NULL; 721 } 722 723 /* 724 * Reset a port. 725 * 726 * If hard is 0 perform a softreset of the port. 727 * If hard is 1 perform a hard reset of the port. 728 * If hard is 2 perform a hard reset of the port and cycle the phy. 729 * 730 * If at is non-NULL an indirect port via a port-multiplier is being 731 * reset, otherwise a direct port is being reset. 732 * 733 * NOTE: Indirect ports can only be soft-reset. 734 */ 735 int 736 sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard) 737 { 738 int rc; 739 740 if (hard) { 741 if (at) 742 rc = sili_pm_hardreset(ap, at->at_target, hard); 743 else 744 rc = sili_port_hardreset(ap); 745 } else { 746 if (at) 747 rc = sili_pm_softreset(ap, at->at_target); 748 else 749 rc = sili_port_softreset(ap); 750 } 751 return(rc); 752 } 753 754 /* 755 * SILI soft reset, Section 10.4.1 756 * 757 * (at) will be NULL when soft-resetting a directly-attached device, and 758 * non-NULL when soft-resetting a device through a port multiplier. 759 * 760 * This function keeps port communications intact and attempts to generate 761 * a reset to the connected device using device commands. 762 */ 763 int 764 sili_port_softreset(struct sili_port *ap) 765 { 766 struct sili_ccb *ccb = NULL; 767 struct sili_prb *prb; 768 int error; 769 u_int32_t sig; 770 771 error = EIO; 772 773 if (bootverbose) 774 kprintf("%s: START SOFTRESET\n", PORTNAME(ap)); 775 776 crit_enter(); 777 ap->ap_state = AP_S_NORMAL; 778 779 /* 780 * Prep the special soft-reset SII command. 781 */ 782 ccb = sili_get_err_ccb(ap); 783 ccb->ccb_done = sili_empty_done; 784 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE; 785 ccb->ccb_xa.complete = sili_dummy_done; 786 ccb->ccb_xa.at = NULL; 787 788 prb = ccb->ccb_prb; 789 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d)); 790 prb->prb_h2d.flags = 0; 791 prb->prb_control = SILI_PRB_CTRL_SOFTRESET; 792 prb->prb_override = 0; 793 prb->prb_xfer_count = 0; 794 795 ccb->ccb_xa.state = ATA_S_PENDING; 796 797 /* 798 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 799 */ 800 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) { 801 kprintf("%s: First FIS failed\n", PORTNAME(ap)); 802 goto err; 803 } 804 805 sig = (prb->prb_d2h.lba_high << 24) | 806 (prb->prb_d2h.lba_mid << 16) | 807 (prb->prb_d2h.lba_low << 8) | 808 (prb->prb_d2h.sector_count); 809 if (bootverbose) 810 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig); 811 812 /* 813 * If the softreset is trying to clear a BSY condition after a 814 * normal portreset we assign the port type. 815 * 816 * If the softreset is being run first as part of the ccb error 817 * processing code then report if the device signature changed 818 * unexpectedly. 819 */ 820 if (ap->ap_type == ATA_PORT_T_NONE) { 821 ap->ap_type = sili_port_signature(ap, NULL, sig); 822 } else { 823 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) { 824 kprintf("%s: device signature unexpectedly " 825 "changed\n", PORTNAME(ap)); 826 error = EBUSY; /* XXX */ 827 } 828 } 829 error = 0; 830 err: 831 if (ccb != NULL) { 832 sili_put_err_ccb(ccb); 833 } 834 835 /* 836 * If we failed to softreset make the port quiescent, otherwise 837 * make sure the port's start/stop state matches what it was on 838 * entry. 839 * 840 * Don't kill the port if the softreset is on a port multiplier 841 * target, that would kill all the targets! 842 */ 843 if (bootverbose) { 844 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n", 845 PORTNAME(ap), error, ap->ap_probe, ap->ap_state); 846 } 847 if (error) { 848 sili_port_hardstop(ap); 849 /* ap_probe set to failed */ 850 } else { 851 ap->ap_probe = ATA_PROBE_NEED_IDENT; 852 ap->ap_pmcount = 1; 853 } 854 crit_exit(); 855 856 sili_pwrite(ap, SILI_PREG_SERR, -1); 857 if (bootverbose) 858 kprintf("%s: END SOFTRESET\n", PORTNAME(ap)); 859 860 return (error); 861 } 862 863 /* 864 * This function does a hard reset of the port. Note that the device 865 * connected to the port could still end-up hung. Phy detection is 866 * used to short-cut longer operations. 867 */ 868 int 869 sili_port_hardreset(struct sili_port *ap) 870 { 871 u_int32_t data; 872 int error; 873 int loop; 874 875 if (bootverbose) 876 kprintf("%s: START HARDRESET\n", PORTNAME(ap)); 877 878 ap->ap_state = AP_S_NORMAL; 879 880 /* 881 * Set SCTL up for any speed restrictions before issuing the 882 * device reset. This may also take us out of an INIT state 883 * (if we were previously in a continuous reset state from 884 * sili_port_listen()). 885 */ 886 data = SILI_PREG_SCTL_SPM_NONE | 887 SILI_PREG_SCTL_IPM_NONE | 888 SILI_PREG_SCTL_SPD_NONE | 889 SILI_PREG_SCTL_DET_NONE; 890 if (SiliForceGen1 & (1 << ap->ap_num)) { 891 data &= ~SILI_PREG_SCTL_SPD_NONE; 892 data |= SILI_PREG_SCTL_SPD_GEN1; 893 } 894 sili_pwrite(ap, SILI_PREG_SCTL, data); 895 896 /* 897 * The transition from a continuous COMRESET state from 898 * sili_port_listen() back to device detect can take a 899 * few seconds. It's quite non-deterministic. Most of 900 * the time it takes far less. Use a polling loop to 901 * wait. 902 */ 903 loop = 4000; 904 while (loop > 0) { 905 data = sili_pread(ap, SILI_PREG_SSTS); 906 if (data & SILI_PREG_SSTS_DET) 907 break; 908 loop -= sili_os_softsleep(); 909 } 910 sili_os_sleep(100); 911 912 /* 913 * Issue Device Reset, give the phy a little time to settle down. 914 * 915 * NOTE: Unlike Port Reset, the port ready signal will not 916 * go active unless a device is established to be on 917 * the port. 918 */ 919 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 920 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 921 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET); 922 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) { 923 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap)); 924 } 925 sili_os_sleep(20); 926 927 /* 928 * Try to determine if there is a device on the port. 929 * 930 * Give the device 3/10 second to at least be detected. 931 */ 932 loop = 300; 933 while (loop > 0) { 934 data = sili_pread(ap, SILI_PREG_SSTS); 935 if (data & SILI_PREG_SSTS_DET) 936 break; 937 loop -= sili_os_softsleep(); 938 } 939 if (loop <= 0) { 940 if (bootverbose) { 941 kprintf("%s: Port appears to be unplugged\n", 942 PORTNAME(ap)); 943 } 944 error = ENODEV; 945 goto done; 946 } 947 948 /* 949 * There is something on the port. Give the device 3 seconds 950 * to detect. 951 */ 952 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS, 953 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) { 954 if (bootverbose) { 955 kprintf("%s: Device may be powered down\n", 956 PORTNAME(ap)); 957 } 958 error = ENODEV; 959 goto pmdetect; 960 } 961 962 /* 963 * We got something that definitely looks like a device. Give 964 * the device time to send us its first D2H FIS. 965 * 966 * This effectively waits for BSY to clear. 967 */ 968 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS, 969 SILI_PREG_STATUS_READY)) { 970 error = EBUSY; 971 } else { 972 error = 0; 973 } 974 975 pmdetect: 976 /* 977 * Do the PM port probe regardless of how things turned out above. 978 * 979 * If the PM port probe fails it will return the original error 980 * from above. 981 */ 982 if (ap->ap_sc->sc_flags & SILI_F_SPM) { 983 error = sili_pm_port_probe(ap, error); 984 } 985 986 done: 987 /* 988 * Finish up 989 */ 990 switch(error) { 991 case 0: 992 if (ap->ap_type == ATA_PORT_T_PM) 993 ap->ap_probe = ATA_PROBE_GOOD; 994 else 995 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET; 996 break; 997 case ENODEV: 998 /* 999 * No device detected. 1000 */ 1001 data = sili_pread(ap, SILI_PREG_SSTS); 1002 1003 switch(data & SATA_PM_SSTS_DET) { 1004 case SILI_PREG_SSTS_DET_DEV_NE: 1005 kprintf("%s: Device not communicating\n", 1006 PORTNAME(ap)); 1007 break; 1008 case SILI_PREG_SSTS_DET_OFFLINE: 1009 kprintf("%s: PHY offline\n", 1010 PORTNAME(ap)); 1011 break; 1012 default: 1013 kprintf("%s: No device detected\n", 1014 PORTNAME(ap)); 1015 break; 1016 } 1017 sili_port_hardstop(ap); 1018 break; 1019 default: 1020 /* 1021 * (EBUSY) 1022 */ 1023 kprintf("%s: Device on port is bricked\n", 1024 PORTNAME(ap)); 1025 sili_port_hardstop(ap); 1026 break; 1027 } 1028 sili_pwrite(ap, SILI_PREG_SERR, -1); 1029 1030 if (bootverbose) 1031 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error); 1032 return (error); 1033 } 1034 1035 /* 1036 * Hard-stop on hot-swap device removal. See 10.10.1 1037 * 1038 * Place the port in a mode that will allow it to detect hot-swap insertions. 1039 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't 1040 * seem to do the job. 1041 */ 1042 void 1043 sili_port_hardstop(struct sili_port *ap) 1044 { 1045 struct sili_ccb *ccb; 1046 struct ata_port *at; 1047 int i; 1048 int slot; 1049 1050 ap->ap_state = AP_S_FATAL_ERROR; 1051 ap->ap_probe = ATA_PROBE_FAILED; 1052 ap->ap_type = ATA_PORT_T_NONE; 1053 1054 /* 1055 * Clean up AT sub-ports on SATA port. 1056 */ 1057 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) { 1058 at = &ap->ap_ata[i]; 1059 at->at_type = ATA_PORT_T_NONE; 1060 at->at_probe = ATA_PROBE_FAILED; 1061 at->at_features &= ~ATA_PORT_F_READLOG; 1062 } 1063 1064 /* 1065 * Kill the port. Don't bother waiting for it to transition 1066 * back up. 1067 */ 1068 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 1069 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 1070 kprintf("%s: Port will not go into reset\n", 1071 PORTNAME(ap)); 1072 } 1073 sili_os_sleep(10); 1074 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 1075 1076 /* 1077 * Turn off port-multiplier control bit 1078 */ 1079 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 1080 1081 /* 1082 * Clean up the command list. 1083 */ 1084 while (ap->ap_active) { 1085 slot = ffs(ap->ap_active) - 1; 1086 ap->ap_active &= ~(1 << slot); 1087 ap->ap_expired &= ~(1 << slot); 1088 --ap->ap_active_cnt; 1089 ccb = &ap->ap_ccbs[slot]; 1090 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) { 1091 callout_stop(&ccb->ccb_timeout); 1092 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 1093 } 1094 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED | 1095 ATA_F_TIMEOUT_EXPIRED); 1096 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1097 ccb->ccb_done(ccb); 1098 ccb->ccb_xa.complete(&ccb->ccb_xa); 1099 } 1100 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1101 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1102 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1103 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 1104 ccb->ccb_done(ccb); 1105 ccb->ccb_xa.complete(&ccb->ccb_xa); 1106 } 1107 KKASSERT(ap->ap_active_cnt == 0); 1108 1109 /* 1110 * Put the port into a listen mode, we want to get insertion/removal 1111 * events. 1112 */ 1113 sili_port_listen(ap); 1114 } 1115 1116 /* 1117 * Place port into a listen mode for hotplug events only. The port has 1118 * already been reset and the command processor may not be ready due 1119 * to the lack of a device. 1120 */ 1121 void 1122 sili_port_listen(struct sili_port *ap) 1123 { 1124 u_int32_t data; 1125 1126 #if 1 1127 data = SILI_PREG_SCTL_SPM_NONE | 1128 SILI_PREG_SCTL_IPM_NONE | 1129 SILI_PREG_SCTL_SPD_NONE | 1130 SILI_PREG_SCTL_DET_INIT; 1131 if (SiliForceGen1 & (1 << ap->ap_num)) { 1132 data &= ~SILI_PREG_SCTL_SPD_NONE; 1133 data |= SILI_PREG_SCTL_SPD_GEN1; 1134 } 1135 #endif 1136 sili_os_sleep(20); 1137 sili_pwrite(ap, SILI_PREG_SERR, -1); 1138 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG | 1139 SILI_PREG_INT_DEVEXCHG); 1140 } 1141 1142 /* 1143 * Figure out what type of device is connected to the port, ATAPI or 1144 * DISK. 1145 */ 1146 int 1147 sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig) 1148 { 1149 if (bootverbose) 1150 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig); 1151 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) { 1152 return(ATA_PORT_T_ATAPI); 1153 } else if ((sig & 0xffff0000) == 1154 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) { 1155 return(ATA_PORT_T_PM); 1156 } else { 1157 return(ATA_PORT_T_DISK); 1158 } 1159 } 1160 1161 /* 1162 * Load the DMA descriptor table for a CCB's buffer. 1163 * 1164 * NOTE: ATA_F_PIO is auto-selected by sili part. 1165 */ 1166 int 1167 sili_load_prb(struct sili_ccb *ccb) 1168 { 1169 struct sili_port *ap = ccb->ccb_port; 1170 struct sili_softc *sc = ap->ap_sc; 1171 struct ata_xfer *xa = &ccb->ccb_xa; 1172 struct sili_prb *prb = ccb->ccb_prb; 1173 struct sili_sge *sge; 1174 bus_dmamap_t dmap = ccb->ccb_dmamap; 1175 int error; 1176 1177 /* 1178 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI 1179 * command). The SGE must be set up to link to the rest of our 1180 * SGE array, in blocks of four SGEs (a SGE table) starting at 1181 */ 1182 prb->prb_xfer_count = 0; 1183 prb->prb_control = 0; 1184 prb->prb_override = 0; 1185 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ? 1186 &prb->prb_sge_packet : &prb->prb_sge_normal; 1187 if (xa->datalen == 0) { 1188 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD; 1189 sge->sge_count = 0; 1190 return (0); 1191 } 1192 1193 if (ccb->ccb_xa.flags & ATA_F_READ) 1194 prb->prb_control |= SILI_PRB_CTRL_READ; 1195 if (ccb->ccb_xa.flags & ATA_F_WRITE) 1196 prb->prb_control |= SILI_PRB_CTRL_WRITE; 1197 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1198 sge->sge_count = 0; 1199 sge->sge_paddr = ccb->ccb_prb_paddr + 1200 offsetof(struct sili_prb, prb_sge[0]); 1201 1202 /* 1203 * Load our sge array. 1204 */ 1205 error = bus_dmamap_load(sc->sc_tag_data, dmap, 1206 xa->data, xa->datalen, 1207 sili_load_prb_callback, 1208 ccb, 1209 ((xa->flags & ATA_F_NOWAIT) ? 1210 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1211 if (error != 0) { 1212 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error); 1213 return (1); 1214 } 1215 1216 bus_dmamap_sync(sc->sc_tag_data, dmap, 1217 (xa->flags & ATA_F_READ) ? 1218 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1219 1220 return (0); 1221 1222 #ifdef DIAGNOSTIC 1223 diagerr: 1224 bus_dmamap_unload(sc->sc_tag_data, dmap); 1225 return (1); 1226 #endif 1227 } 1228 1229 /* 1230 * Callback from BUSDMA system to load the segment list. 1231 * 1232 * The scatter/gather table is loaded by the sili chip in blocks of 1233 * four SGE's. If a continuance is required the last entry in each 1234 * block must point to the next block. 1235 */ 1236 static 1237 void 1238 sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs, 1239 int error) 1240 { 1241 struct sili_ccb *ccb = info; 1242 struct sili_sge *sge; 1243 int sgi; 1244 1245 KKASSERT(nsegs <= SILI_MAX_SGET); 1246 1247 sgi = 0; 1248 sge = &ccb->ccb_prb->prb_sge[0]; 1249 while (nsegs) { 1250 if ((sgi & 3) == 3) { 1251 sge->sge_paddr = htole64(ccb->ccb_prb_paddr + 1252 offsetof(struct sili_prb, 1253 prb_sge[sgi + 1])); 1254 sge->sge_count = 0; 1255 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1256 } else { 1257 sge->sge_paddr = htole64(segs->ds_addr); 1258 sge->sge_count = htole32(segs->ds_len); 1259 sge->sge_flags = 0; 1260 --nsegs; 1261 ++segs; 1262 } 1263 ++sge; 1264 ++sgi; 1265 } 1266 --sge; 1267 sge->sge_flags |= SILI_SGE_FLAGS_TRM; 1268 } 1269 1270 void 1271 sili_unload_prb(struct sili_ccb *ccb) 1272 { 1273 struct sili_port *ap = ccb->ccb_port; 1274 struct sili_softc *sc = ap->ap_sc; 1275 struct ata_xfer *xa = &ccb->ccb_xa; 1276 bus_dmamap_t dmap = ccb->ccb_dmamap; 1277 1278 if (xa->datalen != 0) { 1279 bus_dmamap_sync(sc->sc_tag_data, dmap, 1280 (xa->flags & ATA_F_READ) ? 1281 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1282 1283 bus_dmamap_unload(sc->sc_tag_data, dmap); 1284 1285 if (ccb->ccb_xa.flags & ATA_F_NCQ) 1286 xa->resid = 0; 1287 else 1288 xa->resid = xa->datalen - 1289 le32toh(ccb->ccb_prb->prb_xfer_count); 1290 } 1291 } 1292 1293 /* 1294 * Start a command and poll for completion. 1295 * 1296 * timeout is in ms and only counts once the command gets on-chip. 1297 * 1298 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine 1299 * that no error occured. 1300 * 1301 * NOTE: If the caller specifies a NULL timeout function the caller is 1302 * responsible for clearing hardware state on failure, but we will 1303 * deal with removing the ccb from any pending queue. 1304 * 1305 * NOTE: NCQ should never be used with this function. 1306 * 1307 * NOTE: If the port is in a failed state and stopped we do not try 1308 * to activate the ccb. 1309 */ 1310 int 1311 sili_poll(struct sili_ccb *ccb, int timeout, 1312 void (*timeout_fn)(struct sili_ccb *)) 1313 { 1314 struct sili_port *ap = ccb->ccb_port; 1315 1316 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) { 1317 ccb->ccb_xa.state = ATA_S_ERROR; 1318 return(ccb->ccb_xa.state); 1319 } 1320 1321 sili_start(ccb); 1322 1323 do { 1324 sili_port_intr(ap, 1); 1325 switch(ccb->ccb_xa.state) { 1326 case ATA_S_ONCHIP: 1327 timeout -= sili_os_softsleep(); 1328 break; 1329 case ATA_S_PENDING: 1330 /* 1331 * The packet can get stuck on the pending queue 1332 * if the port refuses to come ready. XXX 1333 */ 1334 #if 0 1335 if (xxx AP_F_EXCLUSIVE_ACCESS) 1336 timeout -= sili_os_softsleep(); 1337 else 1338 #endif 1339 sili_os_softsleep(); 1340 sili_check_active_timeouts(ap); 1341 break; 1342 default: 1343 return (ccb->ccb_xa.state); 1344 } 1345 } while (timeout > 0); 1346 1347 /* 1348 * Don't spew if this is a probe during hard reset 1349 */ 1350 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) { 1351 kprintf("%s: Poll timeout slot %d\n", 1352 ATANAME(ap, ccb->ccb_xa.at), 1353 ccb->ccb_slot); 1354 } 1355 1356 timeout_fn(ccb); 1357 1358 return(ccb->ccb_xa.state); 1359 } 1360 1361 /* 1362 * When polling we have to check if the currently active CCB(s) 1363 * have timed out as the callout will be deadlocked while we 1364 * hold the port lock. 1365 */ 1366 void 1367 sili_check_active_timeouts(struct sili_port *ap) 1368 { 1369 struct sili_ccb *ccb; 1370 u_int32_t mask; 1371 int tag; 1372 1373 mask = ap->ap_active; 1374 while (mask) { 1375 tag = ffs(mask) - 1; 1376 mask &= ~(1 << tag); 1377 ccb = &ap->ap_ccbs[tag]; 1378 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) { 1379 sili_core_timeout(ccb, 0); 1380 } 1381 } 1382 } 1383 1384 static 1385 __inline 1386 void 1387 sili_start_timeout(struct sili_ccb *ccb) 1388 { 1389 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) { 1390 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING; 1391 callout_reset(&ccb->ccb_timeout, 1392 (ccb->ccb_xa.timeout * hz + 999) / 1000, 1393 sili_ata_cmd_timeout_unserialized, ccb); 1394 } 1395 } 1396 1397 void 1398 sili_start(struct sili_ccb *ccb) 1399 { 1400 struct sili_port *ap = ccb->ccb_port; 1401 #if 0 1402 struct sili_softc *sc = ap->ap_sc; 1403 #endif 1404 1405 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING); 1406 1407 /* 1408 * Sync our SGE table and PRB 1409 */ 1410 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag, 1411 ap->ap_dmamem_prbs->adm_map, 1412 BUS_DMASYNC_PREWRITE); 1413 1414 /* 1415 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE 1416 */ 1417 1418 /* 1419 * Controller will update shared memory! 1420 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ... 1421 */ 1422 /* Prepare RFIS area for write by controller */ 1423 1424 /* 1425 * There's no point trying to optimize this, it only shaves a few 1426 * nanoseconds so just queue the command and call our generic issue. 1427 */ 1428 sili_issue_pending_commands(ap, ccb); 1429 } 1430 1431 #if 0 1432 /* 1433 * While holding the port lock acquire exclusive access to the port. 1434 * 1435 * This is used when running the state machine to initialize and identify 1436 * targets over a port multiplier. Setting exclusive access prevents 1437 * sili_port_intr() from activating any requests sitting on the pending 1438 * queue. 1439 */ 1440 void 1441 sili_beg_exclusive_access(struct sili_port *ap, struct ata_port *at) 1442 { 1443 KKASSERT((ap->ap_flags & AP_F_EXCLUSIVE_ACCESS) == 0); 1444 ap->ap_flags |= AP_F_EXCLUSIVE_ACCESS; 1445 while (ap->ap_active) { 1446 sili_port_intr(ap, 1); 1447 sili_os_softsleep(); 1448 } 1449 } 1450 1451 void 1452 sili_end_exclusive_access(struct sili_port *ap, struct ata_port *at) 1453 { 1454 KKASSERT((ap->ap_flags & AP_F_EXCLUSIVE_ACCESS) != 0); 1455 ap->ap_flags &= ~AP_F_EXCLUSIVE_ACCESS; 1456 sili_issue_pending_commands(ap, NULL); 1457 } 1458 #endif 1459 1460 /* 1461 * If ccb is not NULL enqueue and/or issue it. 1462 * 1463 * If ccb is NULL issue whatever we can from the queue. However, nothing 1464 * new is issued if the exclusive access flag is set or expired ccb's are 1465 * present. 1466 * 1467 * If existing commands are still active (ap_active) we can only 1468 * issue matching new commands. 1469 */ 1470 void 1471 sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb) 1472 { 1473 /* 1474 * Enqueue the ccb. 1475 * 1476 * If just running the queue and in exclusive access mode we 1477 * just return. Also in this case if there are any expired ccb's 1478 * we want to clear the queue so the port can be safely stopped. 1479 * 1480 * XXX sili chip - expiration needs to be per-target if PM supports 1481 * FBSS? 1482 */ 1483 if (ccb) { 1484 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry); 1485 } else if (ap->ap_expired) { 1486 return; 1487 } 1488 1489 /* 1490 * Pull the next ccb off the queue and run it if possible. 1491 * If the port is not ready to accept commands enable the 1492 * ready interrupt instead of starting a new command. 1493 * 1494 * XXX limit ncqdepth for attached devices behind PM 1495 */ 1496 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1497 /* 1498 * Port may be wedged. 1499 */ 1500 if ((sili_pread(ap, SILI_PREG_STATUS) & 1501 SILI_PREG_STATUS_READY) == 0) { 1502 kprintf("%s: slot %d NOT READY\n", 1503 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 1504 sili_pwrite(ap, SILI_PREG_INT_ENABLE, 1505 SILI_PREG_INT_READY); 1506 break; 1507 } 1508 1509 /* 1510 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used 1511 * when we may have to access the rfis which is stored in 1512 * the LRAM PRB. Unfortunately reading the LRAM PRB is 1513 * highly problematic, so requests (like PM requests) which 1514 * need to access the rfis use exclusive mode and then 1515 * access the copy made by the port interrupt code back in 1516 * host memory. 1517 */ 1518 if (ap->ap_active & ~ap->ap_expired) { 1519 /* 1520 * There may be multiple ccb's already running, 1521 * if any are running and ap_run_flags sets 1522 * one of these flags then we know only one is 1523 * running. 1524 * 1525 * XXX Current AUTOSENSE code forces exclusivity 1526 * to simplify the code. 1527 */ 1528 if (ap->ap_run_flags & 1529 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1530 break; 1531 } 1532 1533 /* 1534 * If the ccb we want to run is exclusive and ccb's 1535 * are still active on the port, we can't queue it 1536 * yet. 1537 * 1538 * XXX Current AUTOSENSE code forces exclusivity 1539 * to simplify the code. 1540 */ 1541 if (ccb->ccb_xa.flags & 1542 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1543 break; 1544 } 1545 } 1546 1547 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1548 ccb->ccb_xa.state = ATA_S_ONCHIP; 1549 ap->ap_active |= 1 << ccb->ccb_slot; 1550 ap->ap_active_cnt++; 1551 ap->ap_run_flags = ccb->ccb_xa.flags; 1552 1553 /* 1554 * We can't use the CMD_FIFO method because it requires us 1555 * building the PRB in the LRAM, and the LRAM is buggy. So 1556 * we use host memory for the PRB. 1557 */ 1558 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot), 1559 (u_int32_t)ccb->ccb_prb_paddr); 1560 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4, 1561 (u_int32_t)(ccb->ccb_prb_paddr >> 32)); 1562 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */ 1563 sili_start_timeout(ccb); 1564 } 1565 } 1566 1567 void 1568 sili_intr(void *arg) 1569 { 1570 struct sili_softc *sc = arg; 1571 struct sili_port *ap; 1572 u_int32_t gint; 1573 int port; 1574 1575 /* 1576 * Check if the master enable is up, and whether any interrupts are 1577 * pending. 1578 * 1579 * Clear the ints we got. 1580 */ 1581 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0) 1582 return; 1583 gint = sili_read(sc, SILI_REG_GINT); 1584 if (gint == 0 || gint == 0xffffffff) 1585 return; 1586 sili_write(sc, SILI_REG_GINT, gint); 1587 1588 /* 1589 * Process interrupts for each port in a non-blocking fashion. 1590 */ 1591 while (gint & SILI_REG_GINT_PORTMASK) { 1592 port = ffs(gint) - 1; 1593 ap = sc->sc_ports[port]; 1594 if (ap) { 1595 if (sili_os_lock_port_nb(ap) == 0) { 1596 sili_port_intr(ap, 0); 1597 sili_os_unlock_port(ap); 1598 } else { 1599 sili_port_interrupt_redisable(ap); 1600 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1601 } 1602 } 1603 gint &= ~(1 << port); 1604 } 1605 } 1606 1607 /* 1608 * Core called from helper thread. 1609 */ 1610 void 1611 sili_port_thread_core(struct sili_port *ap, int mask) 1612 { 1613 /* 1614 * Process any expired timedouts. 1615 */ 1616 sili_os_lock_port(ap); 1617 if (mask & AP_SIGF_TIMEOUT) { 1618 sili_check_active_timeouts(ap); 1619 } 1620 1621 /* 1622 * Process port interrupts which require a higher level of 1623 * intervention. 1624 */ 1625 if (mask & AP_SIGF_PORTINT) { 1626 sili_port_intr(ap, 1); 1627 sili_port_interrupt_reenable(ap); 1628 sili_os_unlock_port(ap); 1629 } else { 1630 sili_os_unlock_port(ap); 1631 } 1632 } 1633 1634 /* 1635 * Core per-port interrupt handler. 1636 * 1637 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only 1638 * deal with normal command completions which do not require blocking. 1639 */ 1640 void 1641 sili_port_intr(struct sili_port *ap, int blockable) 1642 { 1643 struct sili_softc *sc = ap->ap_sc; 1644 u_int32_t is; 1645 int slot; 1646 struct sili_ccb *ccb = NULL; 1647 struct ata_port *ccb_at = NULL; 1648 #ifdef DIAGNOSTIC 1649 u_int32_t tmp; 1650 #endif 1651 u_int32_t active; 1652 u_int32_t finished; 1653 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG | 1654 SILI_PREG_IST_DEVEXCHG | 1655 SILI_PREG_IST_CERROR | 1656 SILI_PREG_IST_DECODE | 1657 SILI_PREG_IST_CRC | 1658 SILI_PREG_IST_HANDSHK; 1659 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG | 1660 SILI_PREG_IST_DEVEXCHG | 1661 SILI_PREG_IST_DECODE | 1662 SILI_PREG_IST_CRC | 1663 SILI_PREG_IST_HANDSHK; 1664 1665 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT, 1666 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING; 1667 1668 /* 1669 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS. 1670 */ 1671 is = sili_pread(ap, SILI_PREG_INT_STATUS); 1672 is &= SILI_PREG_IST_MASK; 1673 if (is & SILI_PREG_IST_CCOMPLETE) 1674 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE); 1675 1676 /* 1677 * If we can't block then we can't handle these here. Disable 1678 * the interrupts in question so we don't live-lock, the helper 1679 * thread will re-enable them. 1680 * 1681 * If the port is in a completely failed state we do not want 1682 * to drop through to failed-command-processing if blockable is 0, 1683 * just let the thread deal with it all. 1684 * 1685 * Otherwise we fall through and still handle DHRS and any commands 1686 * which completed normally. Even if we are errored we haven't 1687 * stopped the port yet so CI/SACT are still good. 1688 */ 1689 if (blockable == 0) { 1690 if (ap->ap_state == AP_S_FATAL_ERROR) { 1691 sili_port_interrupt_redisable(ap); 1692 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1693 /*is &= ~blockable_mask;*/ 1694 return; 1695 } 1696 if (is & blockable_mask) { 1697 sili_port_interrupt_redisable(ap); 1698 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1699 /*is &= ~blockable_mask;*/ 1700 return; 1701 } 1702 } 1703 1704 if (is & SILI_PREG_IST_CERROR) { 1705 /* 1706 * Command failed (blockable). 1707 * 1708 * This stops command processing. We can extract the PM 1709 * target from the PMP field in SILI_PREG_CONTEXT. The 1710 * tag is not necessarily valid so don't use that. 1711 * 1712 * We must then expire all CCB's for that target and resume 1713 * processing if any other targets have active commands. 1714 * Particular error codes can be recovered by reading the LOG 1715 * page. 1716 * 1717 * The expire handling code will do the rest, which is 1718 * basically to reset the port once the only active 1719 * commands remaining are all expired. 1720 */ 1721 u_int32_t error; 1722 int target; 1723 int resume = 1; 1724 1725 target = (sili_pread(ap, SILI_PREG_CONTEXT) >> 1726 SILI_PREG_CONTEXT_PMPORT_SHIFT) & 1727 SILI_PREG_CONTEXT_PMPORT_MASK; 1728 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR); 1729 active = ap->ap_active & ~ap->ap_expired; 1730 error = sili_pread(ap, SILI_PREG_CERROR); 1731 kprintf("%s.%d target error %d active=%08x hactive=%08x " 1732 "SERR=%b\n", 1733 PORTNAME(ap), target, error, 1734 active, sili_pread(ap, SILI_PREG_SLOTST), 1735 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR); 1736 1737 while (active) { 1738 slot = ffs(active) - 1; 1739 ccb = &ap->ap_ccbs[slot]; 1740 if ((ccb_at = ccb->ccb_xa.at) == NULL) 1741 ccb_at = &ap->ap_ata[0]; 1742 if (target == ccb_at->at_target) { 1743 if (ccb->ccb_xa.flags & ATA_F_NCQ && 1744 (error == SILI_PREG_CERROR_DEVICE || 1745 error == SILI_PREG_CERROR_SDBERROR)) { 1746 ccb_at->at_features |= ATA_PORT_F_READLOG; 1747 } 1748 if (sili_core_timeout(ccb, 1) == 0) 1749 resume = 0; 1750 } 1751 active &= ~(1 << slot); 1752 } 1753 1754 /* 1755 * Resume will be 0 if the timeout reinited and restarted 1756 * the port. Otherwise we resume the port to allow other 1757 * commands to complete. 1758 */ 1759 if (resume) 1760 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME); 1761 } 1762 1763 /* 1764 * Device notification to us (non-blocking) 1765 * 1766 * This is interrupt status SILIPREG_IST_SDB 1767 * 1768 * NOTE! On some parts notification bits can get set without 1769 * generating an interrupt. It is unclear whether this is 1770 * a bug in the PM (sending a DTOH device setbits with 'N' set 1771 * and 'I' not set), or a bug in the host controller. 1772 * 1773 * It only seems to occur under load. 1774 */ 1775 if (sc->sc_flags & SILI_F_SSNTF) { 1776 u_int32_t data; 1777 const char *xstr; 1778 1779 data = sili_pread(ap, SILI_PREG_SNTF); 1780 if (is & SILI_PREG_IST_SDB) { 1781 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1782 SILI_PREG_IST_SDB); 1783 is &= ~SILI_PREG_IST_SDB; 1784 xstr = " (no SDBS!)"; 1785 } else { 1786 xstr = ""; 1787 } 1788 if (data) { 1789 kprintf("%s: NOTIFY %08x%s\n", 1790 PORTNAME(ap), data, xstr); 1791 sili_pwrite(ap, SILI_PREG_SNTF, data); 1792 sili_cam_changed(ap, NULL, -1); 1793 } 1794 } 1795 1796 /* 1797 * Port change (hot-plug) (blockable). 1798 * 1799 * A PCS interrupt will occur on hot-plug once communication is 1800 * established. 1801 * 1802 * A PRCS interrupt will occur on hot-unplug (and possibly also 1803 * on hot-plug). 1804 * 1805 * XXX We can then check the CPS (Cold Presence State) bit, if 1806 * supported, to determine if a device is plugged in or not and do 1807 * the right thing. 1808 * 1809 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and 1810 * can also occur if an unsolicited COMINIT is received. 1811 * If this occurs command processing is automatically 1812 * stopped (CR goes inactive) and the port must be stopped 1813 * and restarted. 1814 */ 1815 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) { 1816 /* XXX */ 1817 sili_pwrite(ap, SILI_PREG_SERR, 1818 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X)); 1819 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1820 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)); 1821 1822 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG); 1823 kprintf("%s: Port change\n", PORTNAME(ap)); 1824 1825 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) { 1826 case SILI_PREG_SSTS_DET_DEV: 1827 if (ap->ap_type == ATA_PORT_T_NONE && 1828 ap->ap_probe == ATA_PROBE_FAILED) { 1829 need = NEED_HOTPLUG_INSERT; 1830 goto fatal; 1831 } 1832 break; 1833 default: 1834 kprintf("%s: Device lost\n", PORTNAME(ap)); 1835 if (ap->ap_type != ATA_PORT_T_NONE) { 1836 need = NEED_HOTPLUG_REMOVE; 1837 goto fatal; 1838 } 1839 break; 1840 } 1841 } 1842 1843 /* 1844 * Check for remaining errors - they are fatal. (blockable) 1845 */ 1846 if (is & fatal_mask) { 1847 u_int32_t serr; 1848 1849 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask); 1850 1851 serr = sili_pread(ap, SILI_PREG_SERR); 1852 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), " 1853 "disabling port.\n", 1854 PORTNAME(ap), 1855 is, SILI_PFMT_INT_STATUS, 1856 serr, SILI_PFMT_SERR 1857 ); 1858 is &= ~fatal_mask; 1859 /* XXX try recovery first */ 1860 goto fatal; 1861 } 1862 1863 /* 1864 * Fail all outstanding commands if we know the port won't recover. 1865 * 1866 * We may have a ccb_at if the failed command is known and was 1867 * being sent to a device over a port multiplier (PM). In this 1868 * case if the port itself has not completely failed we fail just 1869 * the commands related to that target. 1870 */ 1871 if (ap->ap_state == AP_S_FATAL_ERROR && 1872 (ap->ap_active & ~ap->ap_expired)) { 1873 kprintf("%s: Fatal port error, expiring %08x\n", 1874 PORTNAME(ap), ap->ap_active & ~ap->ap_expired); 1875 fatal: 1876 ap->ap_state = AP_S_FATAL_ERROR; 1877 1878 /* 1879 * Error all the active slots. If running across a PM 1880 * try to error out just the slots related to the target. 1881 */ 1882 active = ap->ap_active & ~ap->ap_expired; 1883 1884 while (active) { 1885 slot = ffs(active) - 1; 1886 active &= ~(1 << slot); 1887 ccb = &ap->ap_ccbs[slot]; 1888 sili_core_timeout(ccb, 1); 1889 } 1890 } 1891 1892 /* 1893 * CCB completion (non blocking). 1894 * 1895 * CCB completion is detected by noticing the slot bit in 1896 * the port slot status register has cleared while the bit 1897 * is still set in our ap_active variable. 1898 * 1899 * When completing expired events we must remember to reinit 1900 * the port once everything is clear. 1901 * 1902 * Due to a single-level recursion when reading the log page, 1903 * it is possible for the slot to already have been cleared 1904 * for some expired tags, do not include expired tags in 1905 * the list. 1906 */ 1907 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST); 1908 active &= ~ap->ap_expired; 1909 1910 finished = active; 1911 while (active) { 1912 slot = ffs(active) - 1; 1913 ccb = &ap->ap_ccbs[slot]; 1914 1915 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n", 1916 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ? 1917 " (error)" : ""); 1918 1919 active &= ~(1 << slot); 1920 1921 /* 1922 * XXX sync POSTREAD for return data? 1923 */ 1924 ap->ap_active &= ~(1 << ccb->ccb_slot); 1925 --ap->ap_active_cnt; 1926 1927 /* 1928 * Complete the ccb. If the ccb was marked expired it 1929 * may or may not have been cleared from the port, 1930 * make sure we mark it as having timed out. 1931 * 1932 * In a normal completion if AUTOSENSE is set we copy 1933 * the PRB LRAM rfis back to the rfis in host-memory. 1934 * 1935 * XXX Currently AUTOSENSE also forces exclusivity so we 1936 * can safely work around a hardware bug when reading 1937 * the LRAM. 1938 */ 1939 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 1940 ap->ap_expired &= ~(1 << ccb->ccb_slot); 1941 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1942 ccb->ccb_done(ccb); 1943 ccb->ccb_xa.complete(&ccb->ccb_xa); 1944 } else { 1945 if (ccb->ccb_xa.state == ATA_S_ONCHIP) { 1946 ccb->ccb_xa.state = ATA_S_COMPLETE; 1947 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) { 1948 memcpy(ccb->ccb_xa.rfis, 1949 &ccb->ccb_prb_lram->prb_d2h, 1950 sizeof(ccb->ccb_prb_lram->prb_d2h)); 1951 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 1952 ccb->ccb_xa.state = ATA_S_ERROR; 1953 } 1954 } 1955 ccb->ccb_done(ccb); 1956 } 1957 } 1958 if (is & SILI_PREG_IST_READY) { 1959 is &= ~SILI_PREG_IST_READY; 1960 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY); 1961 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY); 1962 } 1963 1964 /* 1965 * If we had expired commands and were waiting for 1966 * remaining commands to complete, and they have now 1967 * completed, we can reinit the port. 1968 * 1969 * This will also clean out the expired commands. 1970 * The timeout code also calls sili_port_reinit() if 1971 * the only commands remaining after a timeout are all 1972 * now expired commands. 1973 * 1974 * Otherwise just reissue. 1975 */ 1976 if (ap->ap_expired && ap->ap_active == ap->ap_expired) { 1977 if (finished) 1978 sili_port_reinit(ap); 1979 } else { 1980 sili_issue_pending_commands(ap, NULL); 1981 } 1982 1983 /* 1984 * Cleanup. Will not be set if non-blocking. 1985 */ 1986 switch(need) { 1987 case NEED_HOTPLUG_INSERT: 1988 /* 1989 * A hot-plug insertion event has occured and all 1990 * outstanding commands have already been revoked. 1991 * 1992 * Don't recurse if this occurs while we are 1993 * resetting the port. 1994 * 1995 * Place the port in a continuous COMRESET state 1996 * until the INIT code gets to it. 1997 */ 1998 kprintf("%s: HOTPLUG - Device inserted\n", 1999 PORTNAME(ap)); 2000 ap->ap_probe = ATA_PROBE_NEED_INIT; 2001 sili_cam_changed(ap, NULL, -1); 2002 break; 2003 case NEED_HOTPLUG_REMOVE: 2004 /* 2005 * A hot-plug removal event has occured and all 2006 * outstanding commands have already been revoked. 2007 * 2008 * Don't recurse if this occurs while we are 2009 * resetting the port. 2010 */ 2011 kprintf("%s: HOTPLUG - Device removed\n", 2012 PORTNAME(ap)); 2013 sili_port_hardstop(ap); 2014 /* ap_probe set to failed */ 2015 sili_cam_changed(ap, NULL, -1); 2016 break; 2017 default: 2018 break; 2019 } 2020 } 2021 2022 struct sili_ccb * 2023 sili_get_ccb(struct sili_port *ap) 2024 { 2025 struct sili_ccb *ccb; 2026 2027 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2028 ccb = TAILQ_FIRST(&ap->ap_ccb_free); 2029 if (ccb != NULL) { 2030 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT); 2031 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry); 2032 ccb->ccb_xa.state = ATA_S_SETUP; 2033 ccb->ccb_xa.at = NULL; 2034 } 2035 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2036 2037 return (ccb); 2038 } 2039 2040 void 2041 sili_put_ccb(struct sili_ccb *ccb) 2042 { 2043 struct sili_port *ap = ccb->ccb_port; 2044 2045 #ifdef DIAGNOSTIC 2046 if (ccb->ccb_xa.state != ATA_S_COMPLETE && 2047 ccb->ccb_xa.state != ATA_S_TIMEOUT && 2048 ccb->ccb_xa.state != ATA_S_ERROR) { 2049 kprintf("%s: invalid ata_xfer state %02x in sili_put_ccb, " 2050 "slot %d\n", 2051 PORTNAME(ccb->ccb_port), ccb->ccb_xa.state, 2052 ccb->ccb_slot); 2053 } 2054 #endif 2055 2056 ccb->ccb_xa.state = ATA_S_PUT; 2057 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2058 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry); 2059 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2060 } 2061 2062 struct sili_ccb * 2063 sili_get_err_ccb(struct sili_port *ap) 2064 { 2065 struct sili_ccb *err_ccb; 2066 2067 KKASSERT(sili_pread(ap, SILI_PREG_CI) == 0); 2068 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0); 2069 ap->ap_flags |= AP_F_ERR_CCB_RESERVED; 2070 2071 #ifdef DIAGNOSTIC 2072 KKASSERT(ap->ap_err_busy == 0); 2073 ap->ap_err_busy = 1; 2074 #endif 2075 /* 2076 * Grab a CCB to use for error recovery. This should never fail, as 2077 * we ask atascsi to reserve one for us at init time. 2078 */ 2079 err_ccb = ap->ap_err_ccb; 2080 KKASSERT(err_ccb != NULL); 2081 err_ccb->ccb_xa.flags = 0; 2082 err_ccb->ccb_done = sili_empty_done; 2083 2084 return err_ccb; 2085 } 2086 2087 void 2088 sili_put_err_ccb(struct sili_ccb *ccb) 2089 { 2090 struct sili_port *ap = ccb->ccb_port; 2091 2092 #ifdef DIAGNOSTIC 2093 KKASSERT(ap->ap_err_busy); 2094 #endif 2095 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0); 2096 2097 KKASSERT(ccb == ap->ap_err_ccb); 2098 2099 #ifdef DIAGNOSTIC 2100 ap->ap_err_busy = 0; 2101 #endif 2102 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED; 2103 } 2104 2105 /* 2106 * Read log page to get NCQ error. 2107 * 2108 * Return 0 on success 2109 */ 2110 void 2111 sili_port_read_ncq_error(struct sili_port *ap, int target) 2112 { 2113 struct sili_ccb *ccb; 2114 struct ata_fis_h2d *fis; 2115 int status; 2116 2117 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap)); 2118 2119 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */ 2120 ccb = sili_get_err_ccb(ap); 2121 ccb->ccb_done = sili_empty_done; 2122 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL; 2123 ccb->ccb_xa.data = ap->ap_err_scratch; 2124 ccb->ccb_xa.datalen = 512; 2125 ccb->ccb_xa.complete = sili_dummy_done; 2126 ccb->ccb_xa.at = &ap->ap_ata[target]; 2127 fis = &ccb->ccb_prb->prb_h2d; 2128 bzero(fis, sizeof(*fis)); 2129 2130 fis->type = ATA_FIS_TYPE_H2D; 2131 fis->flags = ATA_H2D_FLAGS_CMD | target; 2132 fis->command = ATA_C_READ_LOG_EXT; 2133 fis->lba_low = 0x10; /* queued error log page (10h) */ 2134 fis->sector_count = 1; /* number of sectors (1) */ 2135 fis->sector_count_exp = 0; 2136 fis->lba_mid = 0; /* starting offset */ 2137 fis->lba_mid_exp = 0; 2138 fis->device = 0; 2139 2140 /* 2141 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 2142 */ 2143 if (sili_load_prb(ccb) != 0) { 2144 status = ATA_S_ERROR; 2145 } else { 2146 ccb->ccb_xa.state = ATA_S_PENDING; 2147 status = sili_poll(ccb, 1000, sili_quick_timeout); 2148 } 2149 2150 /* 2151 * Just spew if it fails, there isn't much we can do at this point. 2152 */ 2153 if (status != ATA_S_COMPLETE) { 2154 kprintf("%s: log page read failed, slot %d was still active.\n", 2155 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 2156 } 2157 2158 /* Done with the error CCB now. */ 2159 sili_unload_prb(ccb); 2160 sili_put_err_ccb(ccb); 2161 2162 /* Extract failed register set and tags from the scratch space. */ 2163 if (status == ATA_S_COMPLETE) { 2164 struct ata_log_page_10h *log; 2165 int err_slot; 2166 2167 log = (struct ata_log_page_10h *)ap->ap_err_scratch; 2168 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) { 2169 /* 2170 * Not queued bit was set - wasn't an NCQ error? 2171 * 2172 * XXX This bit seems to be set a lot even for NCQ 2173 * errors? 2174 */ 2175 } else { 2176 /* 2177 * Copy back the log record as a D2H register FIS. 2178 */ 2179 err_slot = log->err_regs.type & 2180 ATA_LOG_10H_TYPE_TAG_MASK; 2181 ccb = &ap->ap_ccbs[err_slot]; 2182 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 2183 kprintf("%s: read NCQ error page slot=%d\n", 2184 ATANAME(ap, ccb->ccb_xa.at), err_slot 2185 ); 2186 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs, 2187 sizeof(struct ata_fis_d2h)); 2188 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H; 2189 ccb->ccb_prb->prb_d2h.flags = 0; 2190 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 2191 ccb->ccb_xa.state = ATA_S_ERROR; 2192 } else { 2193 kprintf("%s: read NCQ error page slot=%d, " 2194 "slot does not match any cmds\n", 2195 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2196 err_slot 2197 ); 2198 } 2199 } 2200 } 2201 } 2202 2203 /* 2204 * Allocate memory for various structures DMAd by hardware. The maximum 2205 * number of segments for these tags is 1 so the DMA memory will have a 2206 * single physical base address. 2207 */ 2208 struct sili_dmamem * 2209 sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag) 2210 { 2211 struct sili_dmamem *adm; 2212 int error; 2213 2214 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO); 2215 2216 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva, 2217 BUS_DMA_ZERO, &adm->adm_map); 2218 if (error == 0) { 2219 adm->adm_tag = tag; 2220 error = bus_dmamap_load(tag, adm->adm_map, 2221 adm->adm_kva, 2222 bus_dma_tag_getmaxsize(tag), 2223 sili_dmamem_saveseg, &adm->adm_busaddr, 2224 0); 2225 } 2226 if (error) { 2227 if (adm->adm_map) { 2228 bus_dmamap_destroy(tag, adm->adm_map); 2229 adm->adm_map = NULL; 2230 adm->adm_tag = NULL; 2231 adm->adm_kva = NULL; 2232 } 2233 kfree(adm, M_DEVBUF); 2234 adm = NULL; 2235 } 2236 return (adm); 2237 } 2238 2239 static 2240 void 2241 sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error) 2242 { 2243 KKASSERT(error == 0); 2244 KKASSERT(nsegs == 1); 2245 *(bus_addr_t *)info = segs->ds_addr; 2246 } 2247 2248 2249 void 2250 sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm) 2251 { 2252 if (adm->adm_map) { 2253 bus_dmamap_unload(adm->adm_tag, adm->adm_map); 2254 bus_dmamap_destroy(adm->adm_tag, adm->adm_map); 2255 adm->adm_map = NULL; 2256 adm->adm_tag = NULL; 2257 adm->adm_kva = NULL; 2258 } 2259 kfree(adm, M_DEVBUF); 2260 } 2261 2262 u_int32_t 2263 sili_read(struct sili_softc *sc, bus_size_t r) 2264 { 2265 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2266 BUS_SPACE_BARRIER_READ); 2267 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)); 2268 } 2269 2270 void 2271 sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v) 2272 { 2273 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 2274 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2275 BUS_SPACE_BARRIER_WRITE); 2276 } 2277 2278 u_int32_t 2279 sili_pread(struct sili_port *ap, bus_size_t r) 2280 { 2281 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2282 BUS_SPACE_BARRIER_READ); 2283 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r)); 2284 } 2285 2286 void 2287 sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v) 2288 { 2289 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v); 2290 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2291 BUS_SPACE_BARRIER_WRITE); 2292 } 2293 2294 /* 2295 * Wait up to (timeout) milliseconds for the masked port register to 2296 * match the target. 2297 * 2298 * Timeout is in milliseconds. 2299 */ 2300 int 2301 sili_pwait_eq(struct sili_port *ap, int timeout, 2302 bus_size_t r, u_int32_t mask, u_int32_t target) 2303 { 2304 int t; 2305 2306 /* 2307 * Loop hard up to 100uS 2308 */ 2309 for (t = 0; t < 100; ++t) { 2310 if ((sili_pread(ap, r) & mask) == target) 2311 return (0); 2312 sili_os_hardsleep(1); /* us */ 2313 } 2314 2315 do { 2316 timeout -= sili_os_softsleep(); 2317 if ((sili_pread(ap, r) & mask) == target) 2318 return (0); 2319 } while (timeout > 0); 2320 return (1); 2321 } 2322 2323 int 2324 sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask, 2325 u_int32_t target) 2326 { 2327 int t; 2328 2329 /* 2330 * Loop hard up to 100uS 2331 */ 2332 for (t = 0; t < 100; ++t) { 2333 if ((sili_read(sc, r) & mask) != target) 2334 return (0); 2335 sili_os_hardsleep(1); /* us */ 2336 } 2337 2338 /* 2339 * And one millisecond the slow way 2340 */ 2341 t = 1000; 2342 do { 2343 t -= sili_os_softsleep(); 2344 if ((sili_read(sc, r) & mask) != target) 2345 return (0); 2346 } while (t > 0); 2347 2348 return (1); 2349 } 2350 2351 2352 /* 2353 * Acquire an ata transfer. 2354 * 2355 * Pass a NULL at for direct-attached transfers, and a non-NULL at for 2356 * targets that go through the port multiplier. 2357 */ 2358 struct ata_xfer * 2359 sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at) 2360 { 2361 struct sili_ccb *ccb; 2362 2363 ccb = sili_get_ccb(ap); 2364 if (ccb == NULL) { 2365 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n", 2366 PORTNAME(ap)); 2367 return (NULL); 2368 } 2369 2370 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n", 2371 PORTNAME(ap), ccb->ccb_slot); 2372 2373 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis)); 2374 ccb->ccb_xa.at = at; 2375 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D; 2376 2377 return (&ccb->ccb_xa); 2378 } 2379 2380 void 2381 sili_ata_put_xfer(struct ata_xfer *xa) 2382 { 2383 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2384 2385 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot); 2386 2387 sili_put_ccb(ccb); 2388 } 2389 2390 int 2391 sili_ata_cmd(struct ata_xfer *xa) 2392 { 2393 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2394 2395 KKASSERT(xa->state == ATA_S_SETUP); 2396 2397 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) 2398 goto failcmd; 2399 #if 0 2400 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n", 2401 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2402 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD, 2403 ccb->ccb_slot, 2404 ccb->ccb_xa.at, 2405 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1); 2406 #endif 2407 2408 ccb->ccb_done = sili_ata_cmd_done; 2409 2410 if (sili_load_prb(ccb) != 0) 2411 goto failcmd; 2412 2413 xa->state = ATA_S_PENDING; 2414 2415 if (xa->flags & ATA_F_POLL) 2416 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout)); 2417 2418 crit_enter(); 2419 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0); 2420 xa->flags |= ATA_F_TIMEOUT_DESIRED; 2421 sili_start(ccb); 2422 crit_exit(); 2423 return (xa->state); 2424 2425 failcmd: 2426 crit_enter(); 2427 xa->state = ATA_S_ERROR; 2428 xa->complete(xa); 2429 crit_exit(); 2430 return (ATA_S_ERROR); 2431 } 2432 2433 static void 2434 sili_ata_cmd_done(struct sili_ccb *ccb) 2435 { 2436 struct ata_xfer *xa = &ccb->ccb_xa; 2437 2438 /* 2439 * NOTE: callout does not lock port and may race us modifying 2440 * the flags, so make sure its stopped. 2441 */ 2442 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2443 callout_stop(&ccb->ccb_timeout); 2444 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2445 } 2446 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED); 2447 2448 KKASSERT(xa->state != ATA_S_ONCHIP); 2449 sili_unload_prb(ccb); 2450 2451 #ifdef DIAGNOSTIC 2452 else if (xa->state != ATA_S_ERROR && xa->state != ATA_S_TIMEOUT) 2453 kprintf("%s: invalid ata_xfer state %02x in sili_ata_cmd_done, " 2454 "slot %d\n", 2455 PORTNAME(ccb->ccb_port), xa->state, ccb->ccb_slot); 2456 #endif 2457 if (xa->state != ATA_S_TIMEOUT) 2458 xa->complete(xa); 2459 } 2460 2461 /* 2462 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags 2463 * while the callout is runing. 2464 * 2465 * We can't safely get the port lock here or delay, we could block 2466 * the callout thread. 2467 */ 2468 static void 2469 sili_ata_cmd_timeout_unserialized(void *arg) 2470 { 2471 struct sili_ccb *ccb = arg; 2472 struct sili_port *ap = ccb->ccb_port; 2473 2474 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 2475 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED; 2476 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT); 2477 } 2478 2479 void 2480 sili_ata_cmd_timeout(struct sili_ccb *ccb) 2481 { 2482 sili_core_timeout(ccb, 0); 2483 } 2484 2485 /* 2486 * Timeout code, typically called when the port command processor is running. 2487 * 2488 * Returns 0 if all timeout processing completed, non-zero if it is still 2489 * in progress. 2490 */ 2491 static 2492 int 2493 sili_core_timeout(struct sili_ccb *ccb, int really_error) 2494 { 2495 struct ata_xfer *xa = &ccb->ccb_xa; 2496 struct sili_port *ap = ccb->ccb_port; 2497 struct ata_port *at; 2498 2499 at = ccb->ccb_xa.at; 2500 2501 kprintf("%s: CMD %s state=%d slot=%d\n" 2502 "\t active=%08x\n" 2503 "\texpired=%08x\n" 2504 "\thactive=%08x\n", 2505 ATANAME(ap, at), 2506 (really_error ? "ERROR" : "TIMEOUT"), 2507 ccb->ccb_xa.state, ccb->ccb_slot, 2508 ap->ap_active, 2509 ap->ap_expired, 2510 sili_pread(ap, SILI_PREG_SLOTST) 2511 ); 2512 2513 /* 2514 * NOTE: Timeout will not be running if the command was polled. 2515 * If we got here at least one of these flags should be set. 2516 * 2517 * However, it might be running if we are called from the 2518 * interrupt error handling code. 2519 */ 2520 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED | 2521 ATA_F_TIMEOUT_RUNNING)); 2522 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2523 callout_stop(&ccb->ccb_timeout); 2524 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2525 } 2526 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED; 2527 2528 if (ccb->ccb_xa.state == ATA_S_PENDING) { 2529 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2530 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2531 ccb->ccb_done(ccb); 2532 xa->complete(xa); 2533 sili_issue_pending_commands(ap, NULL); 2534 return(1); 2535 } 2536 if (ccb->ccb_xa.state != ATA_S_ONCHIP) { 2537 kprintf("%s: Unexpected state during timeout: %d\n", 2538 ATANAME(ap, at), ccb->ccb_xa.state); 2539 return(1); 2540 } 2541 2542 /* 2543 * We can't process timeouts while other commands are running. 2544 */ 2545 ap->ap_expired |= 1 << ccb->ccb_slot; 2546 2547 if (ap->ap_active != ap->ap_expired) { 2548 kprintf("%s: Deferred timeout until its safe, slot %d\n", 2549 ATANAME(ap, at), ccb->ccb_slot); 2550 return(1); 2551 } 2552 2553 /* 2554 * We have to issue a Port reinit. We don't read an error log 2555 * page for timeouts. Reiniting the port will clear all pending 2556 * commands. 2557 */ 2558 sili_port_reinit(ap); 2559 return(0); 2560 } 2561 2562 /* 2563 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very 2564 * specialized, controlled circumstances. 2565 */ 2566 void 2567 sili_quick_timeout(struct sili_ccb *ccb) 2568 { 2569 struct sili_port *ap = ccb->ccb_port; 2570 2571 switch (ccb->ccb_xa.state) { 2572 case ATA_S_PENDING: 2573 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2574 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2575 break; 2576 case ATA_S_ONCHIP: 2577 KKASSERT((ap->ap_active & ~ap->ap_expired) == 2578 (1 << ccb->ccb_slot)); 2579 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2580 ap->ap_active &= ~(1 << ccb->ccb_slot); 2581 KKASSERT(ap->ap_active_cnt > 0); 2582 --ap->ap_active_cnt; 2583 sili_port_reinit(ap); 2584 break; 2585 default: 2586 panic("%s: sili_quick_timeout: ccb in bad state %d", 2587 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state); 2588 } 2589 } 2590 2591 static void 2592 sili_dummy_done(struct ata_xfer *xa) 2593 { 2594 } 2595 2596 static void 2597 sili_empty_done(struct sili_ccb *ccb) 2598 { 2599 } 2600