1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 38 * 39 * Permission to use, copy, modify, and distribute this software for any 40 * purpose with or without fee is hereby granted, provided that the above 41 * copyright notice and this permission notice appear in all copies. 42 * 43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 50 * 51 * 52 * 53 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 54 */ 55 56 #include "sili.h" 57 58 void sili_port_interrupt_enable(struct sili_port *ap); 59 void sili_port_interrupt_redisable(struct sili_port *ap); 60 void sili_port_interrupt_reenable(struct sili_port *ap); 61 62 int sili_load_prb(struct sili_ccb *); 63 void sili_unload_prb(struct sili_ccb *); 64 static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs, 65 int nsegs, int error); 66 void sili_start(struct sili_ccb *); 67 static void sili_port_reinit(struct sili_port *ap); 68 int sili_port_softreset(struct sili_port *ap); 69 int sili_port_hardreset(struct sili_port *ap); 70 void sili_port_hardstop(struct sili_port *ap); 71 void sili_port_listen(struct sili_port *ap); 72 73 static void sili_ata_cmd_timeout_unserialized(void *); 74 static int sili_core_timeout(struct sili_ccb *ccb, int really_error); 75 void sili_check_active_timeouts(struct sili_port *ap); 76 77 void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb); 78 79 void sili_port_read_ncq_error(struct sili_port *, int); 80 81 struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag); 82 void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *); 83 static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error); 84 85 static void sili_dummy_done(struct ata_xfer *xa); 86 static void sili_empty_done(struct sili_ccb *ccb); 87 static void sili_ata_cmd_done(struct sili_ccb *ccb); 88 89 /* 90 * Initialize the global SILI hardware. This code does not set up any of 91 * its ports. 92 */ 93 int 94 sili_init(struct sili_softc *sc) 95 { 96 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b", 97 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC); 98 99 /* 100 * Reset the entire chip. This also resets all ports. 101 * 102 * The spec doesn't say anything about how long we have to 103 * wait, so wait 10ms. 104 */ 105 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET); 106 sili_os_sleep(10); 107 sili_write(sc, SILI_REG_GCTL, 0); 108 sili_os_sleep(10); 109 110 return (0); 111 } 112 113 /* 114 * Allocate and initialize an SILI port. 115 */ 116 int 117 sili_port_alloc(struct sili_softc *sc, u_int port) 118 { 119 struct sili_port *ap; 120 struct ata_port *at; 121 struct sili_prb *prb; 122 struct sili_ccb *ccb; 123 int rc = ENOMEM; 124 int error; 125 int i; 126 127 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 128 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO); 129 130 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d", 131 device_get_name(sc->sc_dev), 132 device_get_unit(sc->sc_dev), 133 port); 134 sc->sc_ports[port] = ap; 135 136 /* 137 * Allocate enough so we never have to reallocate, it makes 138 * it easier. 139 * 140 * ap_pmcount will be reduced by the scan if we encounter the 141 * port multiplier port prior to target 15. 142 */ 143 if (ap->ap_ata == NULL) { 144 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS, 145 M_DEVBUF, M_INTWAIT | M_ZERO); 146 for (i = 0; i < SILI_MAX_PMPORTS; ++i) { 147 at = &ap->ap_ata[i]; 148 at->at_sili_port = ap; 149 at->at_target = i; 150 at->at_probe = ATA_PROBE_NEED_INIT; 151 at->at_features |= ATA_PORT_F_RESCAN; 152 ksnprintf(at->at_name, sizeof(at->at_name), 153 "%s.%d", ap->ap_name, i); 154 } 155 } 156 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh, 157 SILI_PORT_REGION(port), SILI_PORT_SIZE, 158 &ap->ap_ioh) != 0) { 159 device_printf(sc->sc_dev, 160 "unable to create register window for port %d\n", 161 port); 162 goto freeport; 163 } 164 165 ap->ap_sc = sc; 166 ap->ap_num = port; 167 ap->ap_probe = ATA_PROBE_NEED_INIT; 168 TAILQ_INIT(&ap->ap_ccb_free); 169 TAILQ_INIT(&ap->ap_ccb_pending); 170 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0); 171 172 /* Disable port interrupts */ 173 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 174 175 /* 176 * Reset the port. This is similar to a Device Reset but far 177 * more invasive. We use Device Reset in our hardreset function. 178 * This function also does the same OOB initialization sequence 179 * that Device Reset does. 180 * 181 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until 182 * a device is connected to the port, so we can't use it to 183 * verify that the port exists. 184 */ 185 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 186 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 187 device_printf(sc->sc_dev, 188 "Port %d will not go into reset\n", port); 189 goto freeport; 190 } 191 sili_os_sleep(10); 192 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 193 194 /* 195 * Allocate the SGE Table 196 */ 197 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs); 198 if (ap->ap_dmamem_prbs == NULL) { 199 kprintf("%s: NOSGET\n", PORTNAME(ap)); 200 goto freeport; 201 } 202 203 /* 204 * Set up the SGE table base address 205 */ 206 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs); 207 208 /* 209 * Allocate a CCB for each command slot 210 */ 211 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF, 212 M_WAITOK | M_ZERO); 213 if (ap->ap_ccbs == NULL) { 214 device_printf(sc->sc_dev, 215 "unable to allocate command list for port %d\n", 216 port); 217 goto freeport; 218 } 219 220 /* 221 * Most structures are in the port BAR. Assign convenient 222 * pointers in the CCBs 223 */ 224 for (i = 0; i < sc->sc_ncmds; i++) { 225 ccb = &ap->ap_ccbs[i]; 226 227 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW, 228 &ccb->ccb_dmamap); 229 if (error) { 230 device_printf(sc->sc_dev, 231 "unable to create dmamap for port %d " 232 "ccb %d\n", port, i); 233 goto freeport; 234 } 235 236 /* 237 * WARNING!!! Access to the rfis is only allowed under very 238 * carefully controlled circumstances because it 239 * is located in the LRAM and reading from the 240 * LRAM has hardware issues which can blow the 241 * port up. I kid you not (from Linux, and 242 * verified by testing here). 243 */ 244 callout_init(&ccb->ccb_timeout); 245 ccb->ccb_slot = i; 246 ccb->ccb_port = ap; 247 ccb->ccb_prb = &ap->ap_prbs[i]; 248 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) + 249 sizeof(*ccb->ccb_prb) * i; 250 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d; 251 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh, 252 SILI_PREG_LRAM_SLOT(i)); 253 ccb->ccb_prb_lram = prb; 254 /* 255 * Point our rfis to host-memory instead of the LRAM PRB. 256 * It will be copied back if ATA_F_AUTOSENSE is set. The 257 * LRAM PRB is buggy. 258 */ 259 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/ 260 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis; 261 262 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb); 263 ccb->ccb_xa.tag = i; 264 265 ccb->ccb_xa.state = ATA_S_COMPLETE; 266 267 /* 268 * Reserve CCB[1] as the error CCB. It doesn't matter 269 * which one we use for the Sili controllers. 270 */ 271 if (i == 1) 272 ap->ap_err_ccb = ccb; 273 else 274 sili_put_ccb(ccb); 275 } 276 /* 277 * Do not call sili_port_init() here, the helper thread will 278 * call it for the parallel probe 279 */ 280 sili_os_start_port(ap); 281 return(0); 282 freeport: 283 sili_port_free(sc, port); 284 return (rc); 285 } 286 287 /* 288 * This is called once by the low level attach (from the helper thread) 289 * to get the port state machine rolling, and typically only called again 290 * on a hot-plug insertion event. 291 * 292 * This is called for PM attachments and hot-plug insertion events, and 293 * typically not called again until after an unplug/replug sequence. 294 * 295 * Returns 0 if a device is successfully detected. 296 */ 297 int 298 sili_port_init(struct sili_port *ap) 299 { 300 /* 301 * Do a very hard reset of the port 302 */ 303 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 304 sili_os_sleep(10); 305 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 306 307 /* 308 * Register initialization 309 */ 310 sili_pwrite(ap, SILI_PREG_FIFO_CTL, 311 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024)); 312 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA | 313 SILI_PREG_CTL_PMA); 314 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC); 315 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 316 sili_pwrite(ap, SILI_PREG_SNTF, -1); 317 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET; 318 ap->ap_pmcount = 0; 319 sili_port_interrupt_enable(ap); 320 return (0); 321 } 322 323 /* 324 * Handle an errored port. This routine is called when the only 325 * commands left on the queue are expired, meaning we can safely 326 * go through a port init to clear its state. 327 * 328 * We complete the expired CCBs and then restart the queue. 329 */ 330 static 331 void 332 sili_port_reinit(struct sili_port *ap) 333 { 334 struct sili_ccb *ccb; 335 struct ata_port *at; 336 int slot; 337 int target; 338 u_int32_t data; 339 340 if (bootverbose || 1) { 341 kprintf("%s: reiniting port after error reent=%d " 342 "expired=%08x\n", 343 PORTNAME(ap), 344 (ap->ap_flags & AP_F_REINIT_ACTIVE), 345 ap->ap_expired); 346 } 347 348 /* 349 * Clear port resume, clear bits 16:13 in the port device status 350 * register. This is from the data sheet. 351 * 352 * Data sheet does not specify a delay but it seems prudent. 353 */ 354 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 355 sili_os_sleep(10); 356 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 357 data = sili_pread(ap, SILI_PREG_PM_STATUS(target)); 358 data &= ~(SILI_PREG_PM_STATUS_SERVICE | 359 SILI_PREG_PM_STATUS_LEGACY | 360 SILI_PREG_PM_STATUS_NATIVE | 361 SILI_PREG_PM_STATUS_VBSY); 362 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data); 363 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0); 364 } 365 366 /* 367 * Issue a Port Initialize and wait for it to clear. This flushes 368 * commands but does not reset the port. Then wait for port ready. 369 */ 370 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT); 371 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) { 372 kprintf("%s: Unable to reinit, port failed\n", 373 PORTNAME(ap)); 374 } 375 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) { 376 kprintf("%s: Unable to reinit, port will not come ready\n", 377 PORTNAME(ap)); 378 } 379 380 /* 381 * If reentrant, stop here. Otherwise the state for the original 382 * ahci_port_reinit() will get ripped out from under it. 383 */ 384 if (ap->ap_flags & AP_F_REINIT_ACTIVE) 385 return; 386 ap->ap_flags |= AP_F_REINIT_ACTIVE; 387 388 /* 389 * Read the LOG ERROR page for targets that returned a specific 390 * D2H FIS with ERR set. 391 * 392 * Don't bother if we are already using the error CCB. 393 */ 394 if ((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0) { 395 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 396 at = &ap->ap_ata[target]; 397 if (at->at_features & ATA_PORT_F_READLOG) { 398 at->at_features &= ~ATA_PORT_F_READLOG; 399 sili_port_read_ncq_error(ap, target); 400 } 401 } 402 } 403 404 /* 405 * Finally clean out the expired commands, we've probed the error 406 * status (or hopefully probed the error status). Well, ok, 407 * we probably didn't XXX. 408 */ 409 while (ap->ap_expired) { 410 slot = ffs(ap->ap_expired) - 1; 411 ap->ap_expired &= ~(1 << slot); 412 KKASSERT(ap->ap_active & (1 << slot)); 413 ap->ap_active &= ~(1 << slot); 414 --ap->ap_active_cnt; 415 ccb = &ap->ap_ccbs[slot]; 416 ccb->ccb_xa.state = ATA_S_TIMEOUT; 417 ccb->ccb_done(ccb); 418 ccb->ccb_xa.complete(&ccb->ccb_xa); 419 } 420 ap->ap_flags &= ~AP_F_REINIT_ACTIVE; 421 422 /* 423 * Wow. All done. We can get the port moving again. 424 */ 425 if (ap->ap_probe == ATA_PROBE_FAILED) { 426 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap)); 427 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 428 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 429 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 430 ccb->ccb_xa.state = ATA_S_TIMEOUT; 431 ccb->ccb_done(ccb); 432 ccb->ccb_xa.complete(&ccb->ccb_xa); 433 } 434 } else { 435 sili_issue_pending_commands(ap, NULL); 436 } 437 } 438 439 /* 440 * Enable or re-enable interrupts on a port. 441 * 442 * This routine is called from the port initialization code or from the 443 * helper thread as the real interrupt may be forced to turn off certain 444 * interrupt sources. 445 */ 446 void 447 sili_port_interrupt_enable(struct sili_port *ap) 448 { 449 u_int32_t data; 450 451 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR | 452 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG | 453 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC | 454 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE; 455 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 456 data |= SILI_PREG_INT_SDB; 457 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data); 458 } 459 460 void 461 sili_port_interrupt_redisable(struct sili_port *ap) 462 { 463 u_int32_t data; 464 465 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 466 data &= SILI_REG_GINT_PORTMASK; 467 data &= ~(1 << ap->ap_num); 468 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 469 } 470 471 void 472 sili_port_interrupt_reenable(struct sili_port *ap) 473 { 474 u_int32_t data; 475 476 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 477 data &= SILI_REG_GINT_PORTMASK; 478 data |= (1 << ap->ap_num); 479 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 480 } 481 482 /* 483 * Run the port / target state machine from a main context. 484 * 485 * The state machine for the port is always run. 486 * 487 * If atx is non-NULL run the state machine for a particular target. 488 * If atx is NULL run the state machine for all targets. 489 */ 490 void 491 sili_port_state_machine(struct sili_port *ap, int initial) 492 { 493 struct ata_port *at; 494 u_int32_t data; 495 int target; 496 int didsleep; 497 int loop; 498 499 /* 500 * State machine for port. Note that CAM is not yet associated 501 * during the initial parallel probe and the port's probe state 502 * will not get past ATA_PROBE_NEED_IDENT. 503 */ 504 { 505 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) { 506 kprintf("%s: Waiting 7 seconds on insertion\n", 507 PORTNAME(ap)); 508 sili_os_sleep(7000); 509 initial = 1; 510 } 511 if (ap->ap_probe == ATA_PROBE_NEED_INIT) 512 sili_port_init(ap); 513 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET) 514 sili_port_reset(ap, NULL, 1); 515 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET) 516 sili_port_reset(ap, NULL, 0); 517 if (ap->ap_probe == ATA_PROBE_NEED_IDENT) 518 sili_cam_probe(ap, NULL); 519 } 520 if (ap->ap_type != ATA_PORT_T_PM) { 521 if (ap->ap_probe == ATA_PROBE_FAILED) { 522 sili_cam_changed(ap, NULL, 0); 523 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) { 524 sili_cam_changed(ap, NULL, 1); 525 } 526 return; 527 } 528 529 /* 530 * Port Multiplier state machine. 531 * 532 * Get a mask of changed targets and combine with any runnable 533 * states already present. 534 */ 535 for (loop = 0; ;++loop) { 536 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) { 537 kprintf("%s: PM unable to read hot-plug bitmap\n", 538 PORTNAME(ap)); 539 break; 540 } 541 542 /* 543 * Do at least one loop, then stop if no more state changes 544 * have occured. The PM might not generate a new 545 * notification until we clear the entire bitmap. 546 */ 547 if (loop && data == 0) 548 break; 549 550 /* 551 * New devices showing up in the bitmap require some spin-up 552 * time before we start probing them. Reset didsleep. The 553 * first new device we detect will sleep before probing. 554 * 555 * This only applies to devices whos change bit is set in 556 * the data, and does not apply to the initial boot-time 557 * probe. 558 */ 559 didsleep = 0; 560 561 for (target = 0; target < ap->ap_pmcount; ++target) { 562 at = &ap->ap_ata[target]; 563 564 /* 565 * Check the target state for targets behind the PM 566 * which have changed state. This will adjust 567 * at_probe and set ATA_PORT_F_RESCAN 568 * 569 * We want to wait at least 10 seconds before probing 570 * a newly inserted device. If the check status 571 * indicates a device is present and in need of a 572 * hard reset, we make sure we have slept before 573 * continuing. 574 * 575 * We also need to wait at least 1 second for the 576 * PHY state to change after insertion, if we 577 * haven't already waited the 10 seconds. 578 * 579 * NOTE: When pm_check_good finds a good port it 580 * typically starts us in probe state 581 * NEED_HARD_RESET rather than INIT. 582 */ 583 if (data & (1 << target)) { 584 if (initial == 0 && didsleep == 0) 585 sili_os_sleep(1000); 586 sili_pm_check_good(ap, target); 587 if (initial == 0 && didsleep == 0 && 588 at->at_probe <= ATA_PROBE_NEED_HARD_RESET 589 ) { 590 didsleep = 1; 591 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap)); 592 sili_os_sleep(10000); 593 } 594 } 595 596 /* 597 * Report hot-plug events before the probe state 598 * really gets hot. Only actual events are reported 599 * here to reduce spew. 600 */ 601 if (data & (1 << target)) { 602 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at)); 603 switch(at->at_probe) { 604 case ATA_PROBE_NEED_INIT: 605 case ATA_PROBE_NEED_HARD_RESET: 606 kprintf("Device inserted\n"); 607 break; 608 case ATA_PROBE_FAILED: 609 kprintf("Device removed\n"); 610 break; 611 default: 612 kprintf("Device probe in progress\n"); 613 break; 614 } 615 } 616 617 /* 618 * Run through the state machine as necessary if 619 * the port is not marked failed. 620 * 621 * The state machine may stop at NEED_IDENT if 622 * CAM is not yet attached. 623 * 624 * Acquire exclusive access to the port while we 625 * are doing this. This prevents command-completion 626 * from queueing commands for non-polled targets 627 * inbetween our probe steps. We need to do this 628 * because the reset probes can generate severe PHY 629 * and protocol errors and soft-brick the port. 630 */ 631 if (at->at_probe != ATA_PROBE_FAILED && 632 at->at_probe != ATA_PROBE_GOOD) { 633 if (at->at_probe == ATA_PROBE_NEED_INIT) 634 sili_pm_port_init(ap, at); 635 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET) 636 sili_port_reset(ap, at, 1); 637 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET) 638 sili_port_reset(ap, at, 0); 639 if (at->at_probe == ATA_PROBE_NEED_IDENT) 640 sili_cam_probe(ap, at); 641 } 642 643 /* 644 * Add or remove from CAM 645 */ 646 if (at->at_features & ATA_PORT_F_RESCAN) { 647 at->at_features &= ~ATA_PORT_F_RESCAN; 648 if (at->at_probe == ATA_PROBE_FAILED) { 649 sili_cam_changed(ap, at, 0); 650 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) { 651 sili_cam_changed(ap, at, 1); 652 } 653 } 654 data &= ~(1 << target); 655 } 656 if (data) { 657 kprintf("%s: WARNING (PM): extra bits set in " 658 "EINFO: %08x\n", PORTNAME(ap), data); 659 while (target < SILI_MAX_PMPORTS) { 660 sili_pm_check_good(ap, target); 661 ++target; 662 } 663 } 664 } 665 } 666 667 /* 668 * De-initialize and detach a port. 669 */ 670 void 671 sili_port_free(struct sili_softc *sc, u_int port) 672 { 673 struct sili_port *ap = sc->sc_ports[port]; 674 struct sili_ccb *ccb; 675 676 /* 677 * Ensure port is disabled and its interrupts are all flushed. 678 */ 679 if (ap->ap_sc) { 680 sili_os_stop_port(ap); 681 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 682 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 683 sili_write(ap->ap_sc, SILI_REG_GCTL, 684 sili_read(ap->ap_sc, SILI_REG_GCTL) & 685 ~SILI_REG_GINT_PORTST(ap->ap_num)); 686 } 687 688 if (ap->ap_ccbs) { 689 while ((ccb = sili_get_ccb(ap)) != NULL) { 690 if (ccb->ccb_dmamap) { 691 bus_dmamap_destroy(sc->sc_tag_data, 692 ccb->ccb_dmamap); 693 ccb->ccb_dmamap = NULL; 694 } 695 } 696 if ((ccb = ap->ap_err_ccb) != NULL) { 697 if (ccb->ccb_dmamap) { 698 bus_dmamap_destroy(sc->sc_tag_data, 699 ccb->ccb_dmamap); 700 ccb->ccb_dmamap = NULL; 701 } 702 ap->ap_err_ccb = NULL; 703 } 704 kfree(ap->ap_ccbs, M_DEVBUF); 705 ap->ap_ccbs = NULL; 706 } 707 708 if (ap->ap_dmamem_prbs) { 709 sili_dmamem_free(sc, ap->ap_dmamem_prbs); 710 ap->ap_dmamem_prbs = NULL; 711 } 712 if (ap->ap_ata) { 713 kfree(ap->ap_ata, M_DEVBUF); 714 ap->ap_ata = NULL; 715 } 716 if (ap->ap_err_scratch) { 717 kfree(ap->ap_err_scratch, M_DEVBUF); 718 ap->ap_err_scratch = NULL; 719 } 720 721 /* bus_space(9) says we dont free the subregions handle */ 722 723 kfree(ap, M_DEVBUF); 724 sc->sc_ports[port] = NULL; 725 } 726 727 /* 728 * Reset a port. 729 * 730 * If hard is 0 perform a softreset of the port. 731 * If hard is 1 perform a hard reset of the port. 732 * If hard is 2 perform a hard reset of the port and cycle the phy. 733 * 734 * If at is non-NULL an indirect port via a port-multiplier is being 735 * reset, otherwise a direct port is being reset. 736 * 737 * NOTE: Indirect ports can only be soft-reset. 738 */ 739 int 740 sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard) 741 { 742 int rc; 743 744 if (hard) { 745 if (at) 746 rc = sili_pm_hardreset(ap, at->at_target, hard); 747 else 748 rc = sili_port_hardreset(ap); 749 } else { 750 if (at) 751 rc = sili_pm_softreset(ap, at->at_target); 752 else 753 rc = sili_port_softreset(ap); 754 } 755 return(rc); 756 } 757 758 /* 759 * SILI soft reset, Section 10.4.1 760 * 761 * (at) will be NULL when soft-resetting a directly-attached device, and 762 * non-NULL when soft-resetting a device through a port multiplier. 763 * 764 * This function keeps port communications intact and attempts to generate 765 * a reset to the connected device using device commands. 766 */ 767 int 768 sili_port_softreset(struct sili_port *ap) 769 { 770 struct sili_ccb *ccb = NULL; 771 struct sili_prb *prb; 772 int error; 773 u_int32_t sig; 774 775 error = EIO; 776 777 if (bootverbose) 778 kprintf("%s: START SOFTRESET\n", PORTNAME(ap)); 779 780 crit_enter(); 781 ap->ap_state = AP_S_NORMAL; 782 783 /* 784 * Prep the special soft-reset SII command. 785 */ 786 ccb = sili_get_err_ccb(ap); 787 ccb->ccb_done = sili_empty_done; 788 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE; 789 ccb->ccb_xa.complete = sili_dummy_done; 790 ccb->ccb_xa.at = NULL; 791 792 prb = ccb->ccb_prb; 793 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d)); 794 prb->prb_h2d.flags = 0; 795 prb->prb_control = SILI_PRB_CTRL_SOFTRESET; 796 prb->prb_override = 0; 797 prb->prb_xfer_count = 0; 798 799 ccb->ccb_xa.state = ATA_S_PENDING; 800 801 /* 802 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 803 */ 804 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) { 805 kprintf("%s: First FIS failed\n", PORTNAME(ap)); 806 goto err; 807 } 808 809 sig = (prb->prb_d2h.lba_high << 24) | 810 (prb->prb_d2h.lba_mid << 16) | 811 (prb->prb_d2h.lba_low << 8) | 812 (prb->prb_d2h.sector_count); 813 if (bootverbose) 814 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig); 815 816 /* 817 * If the softreset is trying to clear a BSY condition after a 818 * normal portreset we assign the port type. 819 * 820 * If the softreset is being run first as part of the ccb error 821 * processing code then report if the device signature changed 822 * unexpectedly. 823 */ 824 if (ap->ap_type == ATA_PORT_T_NONE) { 825 ap->ap_type = sili_port_signature(ap, NULL, sig); 826 } else { 827 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) { 828 kprintf("%s: device signature unexpectedly " 829 "changed\n", PORTNAME(ap)); 830 error = EBUSY; /* XXX */ 831 } 832 } 833 error = 0; 834 err: 835 if (ccb != NULL) { 836 sili_put_err_ccb(ccb); 837 } 838 839 /* 840 * If we failed to softreset make the port quiescent, otherwise 841 * make sure the port's start/stop state matches what it was on 842 * entry. 843 * 844 * Don't kill the port if the softreset is on a port multiplier 845 * target, that would kill all the targets! 846 */ 847 if (bootverbose) { 848 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n", 849 PORTNAME(ap), error, ap->ap_probe, ap->ap_state); 850 } 851 if (error) { 852 sili_port_hardstop(ap); 853 /* ap_probe set to failed */ 854 } else { 855 ap->ap_probe = ATA_PROBE_NEED_IDENT; 856 ap->ap_pmcount = 1; 857 } 858 crit_exit(); 859 860 sili_pwrite(ap, SILI_PREG_SERR, -1); 861 if (bootverbose) 862 kprintf("%s: END SOFTRESET\n", PORTNAME(ap)); 863 864 return (error); 865 } 866 867 /* 868 * This function does a hard reset of the port. Note that the device 869 * connected to the port could still end-up hung. Phy detection is 870 * used to short-cut longer operations. 871 */ 872 int 873 sili_port_hardreset(struct sili_port *ap) 874 { 875 u_int32_t data; 876 int error; 877 int loop; 878 879 if (bootverbose) 880 kprintf("%s: START HARDRESET\n", PORTNAME(ap)); 881 882 ap->ap_state = AP_S_NORMAL; 883 884 /* 885 * Set SCTL up for any speed restrictions before issuing the 886 * device reset. This may also take us out of an INIT state 887 * (if we were previously in a continuous reset state from 888 * sili_port_listen()). 889 */ 890 data = SILI_PREG_SCTL_SPM_NONE | 891 SILI_PREG_SCTL_IPM_NONE | 892 SILI_PREG_SCTL_SPD_NONE | 893 SILI_PREG_SCTL_DET_NONE; 894 if (SiliForceGen1 & (1 << ap->ap_num)) { 895 data &= ~SILI_PREG_SCTL_SPD_NONE; 896 data |= SILI_PREG_SCTL_SPD_GEN1; 897 } 898 sili_pwrite(ap, SILI_PREG_SCTL, data); 899 900 /* 901 * The transition from a continuous COMRESET state from 902 * sili_port_listen() back to device detect can take a 903 * few seconds. It's quite non-deterministic. Most of 904 * the time it takes far less. Use a polling loop to 905 * wait. 906 */ 907 loop = 4000; 908 while (loop > 0) { 909 data = sili_pread(ap, SILI_PREG_SSTS); 910 if (data & SILI_PREG_SSTS_DET) 911 break; 912 loop -= sili_os_softsleep(); 913 } 914 sili_os_sleep(100); 915 916 /* 917 * Issue Device Reset, give the phy a little time to settle down. 918 * 919 * NOTE: Unlike Port Reset, the port ready signal will not 920 * go active unless a device is established to be on 921 * the port. 922 */ 923 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 924 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 925 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET); 926 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) { 927 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap)); 928 } 929 sili_os_sleep(20); 930 931 /* 932 * Try to determine if there is a device on the port. 933 * 934 * Give the device 3/10 second to at least be detected. 935 */ 936 loop = 300; 937 while (loop > 0) { 938 data = sili_pread(ap, SILI_PREG_SSTS); 939 if (data & SILI_PREG_SSTS_DET) 940 break; 941 loop -= sili_os_softsleep(); 942 } 943 if (loop <= 0) { 944 if (bootverbose) { 945 kprintf("%s: Port appears to be unplugged\n", 946 PORTNAME(ap)); 947 } 948 error = ENODEV; 949 goto done; 950 } 951 952 /* 953 * There is something on the port. Give the device 3 seconds 954 * to detect. 955 */ 956 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS, 957 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) { 958 if (bootverbose) { 959 kprintf("%s: Device may be powered down\n", 960 PORTNAME(ap)); 961 } 962 error = ENODEV; 963 goto pmdetect; 964 } 965 966 /* 967 * We got something that definitely looks like a device. Give 968 * the device time to send us its first D2H FIS. 969 * 970 * This effectively waits for BSY to clear. 971 */ 972 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS, 973 SILI_PREG_STATUS_READY)) { 974 error = EBUSY; 975 } else { 976 error = 0; 977 } 978 979 pmdetect: 980 /* 981 * Do the PM port probe regardless of how things turned out above. 982 * 983 * If the PM port probe fails it will return the original error 984 * from above. 985 */ 986 if (ap->ap_sc->sc_flags & SILI_F_SPM) { 987 error = sili_pm_port_probe(ap, error); 988 } 989 990 done: 991 /* 992 * Finish up 993 */ 994 switch(error) { 995 case 0: 996 if (ap->ap_type == ATA_PORT_T_PM) 997 ap->ap_probe = ATA_PROBE_GOOD; 998 else 999 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET; 1000 break; 1001 case ENODEV: 1002 /* 1003 * No device detected. 1004 */ 1005 data = sili_pread(ap, SILI_PREG_SSTS); 1006 1007 switch(data & SATA_PM_SSTS_DET) { 1008 case SILI_PREG_SSTS_DET_DEV_NE: 1009 kprintf("%s: Device not communicating\n", 1010 PORTNAME(ap)); 1011 break; 1012 case SILI_PREG_SSTS_DET_OFFLINE: 1013 kprintf("%s: PHY offline\n", 1014 PORTNAME(ap)); 1015 break; 1016 default: 1017 kprintf("%s: No device detected\n", 1018 PORTNAME(ap)); 1019 break; 1020 } 1021 sili_port_hardstop(ap); 1022 break; 1023 default: 1024 /* 1025 * (EBUSY) 1026 */ 1027 kprintf("%s: Device on port is bricked\n", 1028 PORTNAME(ap)); 1029 sili_port_hardstop(ap); 1030 break; 1031 } 1032 sili_pwrite(ap, SILI_PREG_SERR, -1); 1033 1034 if (bootverbose) 1035 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error); 1036 return (error); 1037 } 1038 1039 /* 1040 * Hard-stop on hot-swap device removal. See 10.10.1 1041 * 1042 * Place the port in a mode that will allow it to detect hot-swap insertions. 1043 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't 1044 * seem to do the job. 1045 */ 1046 void 1047 sili_port_hardstop(struct sili_port *ap) 1048 { 1049 struct sili_ccb *ccb; 1050 struct ata_port *at; 1051 int i; 1052 int slot; 1053 int serial; 1054 1055 ap->ap_state = AP_S_FATAL_ERROR; 1056 ap->ap_probe = ATA_PROBE_FAILED; 1057 ap->ap_type = ATA_PORT_T_NONE; 1058 1059 /* 1060 * Clean up AT sub-ports on SATA port. 1061 */ 1062 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) { 1063 at = &ap->ap_ata[i]; 1064 at->at_type = ATA_PORT_T_NONE; 1065 at->at_probe = ATA_PROBE_FAILED; 1066 at->at_features &= ~ATA_PORT_F_READLOG; 1067 } 1068 1069 /* 1070 * Kill the port. Don't bother waiting for it to transition 1071 * back up. 1072 */ 1073 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 1074 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 1075 kprintf("%s: Port will not go into reset\n", 1076 PORTNAME(ap)); 1077 } 1078 sili_os_sleep(10); 1079 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 1080 1081 /* 1082 * Turn off port-multiplier control bit 1083 */ 1084 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 1085 1086 /* 1087 * Clean up the command list. 1088 */ 1089 restart: 1090 while (ap->ap_active) { 1091 slot = ffs(ap->ap_active) - 1; 1092 ap->ap_active &= ~(1 << slot); 1093 ap->ap_expired &= ~(1 << slot); 1094 --ap->ap_active_cnt; 1095 ccb = &ap->ap_ccbs[slot]; 1096 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) { 1097 serial = ccb->ccb_xa.serial; 1098 callout_stop_sync(&ccb->ccb_timeout); 1099 if (serial != ccb->ccb_xa.serial) { 1100 kprintf("%s: Warning: timeout race ccb %p\n", 1101 PORTNAME(ap), ccb); 1102 goto restart; 1103 } 1104 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 1105 } 1106 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED | 1107 ATA_F_TIMEOUT_EXPIRED); 1108 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1109 ccb->ccb_done(ccb); 1110 ccb->ccb_xa.complete(&ccb->ccb_xa); 1111 } 1112 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1113 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1114 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1115 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 1116 ccb->ccb_done(ccb); 1117 ccb->ccb_xa.complete(&ccb->ccb_xa); 1118 } 1119 KKASSERT(ap->ap_active_cnt == 0); 1120 1121 /* 1122 * Put the port into a listen mode, we want to get insertion/removal 1123 * events. 1124 */ 1125 sili_port_listen(ap); 1126 } 1127 1128 /* 1129 * Place port into a listen mode for hotplug events only. The port has 1130 * already been reset and the command processor may not be ready due 1131 * to the lack of a device. 1132 */ 1133 void 1134 sili_port_listen(struct sili_port *ap) 1135 { 1136 u_int32_t data; 1137 1138 #if 1 1139 data = SILI_PREG_SCTL_SPM_NONE | 1140 SILI_PREG_SCTL_IPM_NONE | 1141 SILI_PREG_SCTL_SPD_NONE | 1142 SILI_PREG_SCTL_DET_INIT; 1143 if (SiliForceGen1 & (1 << ap->ap_num)) { 1144 data &= ~SILI_PREG_SCTL_SPD_NONE; 1145 data |= SILI_PREG_SCTL_SPD_GEN1; 1146 } 1147 #endif 1148 sili_os_sleep(20); 1149 sili_pwrite(ap, SILI_PREG_SERR, -1); 1150 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG | 1151 SILI_PREG_INT_DEVEXCHG); 1152 } 1153 1154 /* 1155 * Figure out what type of device is connected to the port, ATAPI or 1156 * DISK. 1157 */ 1158 int 1159 sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig) 1160 { 1161 if (bootverbose) 1162 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig); 1163 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) { 1164 return(ATA_PORT_T_ATAPI); 1165 } else if ((sig & 0xffff0000) == 1166 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) { 1167 return(ATA_PORT_T_PM); 1168 } else { 1169 return(ATA_PORT_T_DISK); 1170 } 1171 } 1172 1173 /* 1174 * Load the DMA descriptor table for a CCB's buffer. 1175 * 1176 * NOTE: ATA_F_PIO is auto-selected by sili part. 1177 */ 1178 int 1179 sili_load_prb(struct sili_ccb *ccb) 1180 { 1181 struct sili_port *ap = ccb->ccb_port; 1182 struct sili_softc *sc = ap->ap_sc; 1183 struct ata_xfer *xa = &ccb->ccb_xa; 1184 struct sili_prb *prb = ccb->ccb_prb; 1185 struct sili_sge *sge; 1186 bus_dmamap_t dmap = ccb->ccb_dmamap; 1187 int error; 1188 1189 /* 1190 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI 1191 * command). The SGE must be set up to link to the rest of our 1192 * SGE array, in blocks of four SGEs (a SGE table) starting at 1193 */ 1194 prb->prb_xfer_count = 0; 1195 prb->prb_control = 0; 1196 prb->prb_override = 0; 1197 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ? 1198 &prb->prb_sge_packet : &prb->prb_sge_normal; 1199 if (xa->datalen == 0) { 1200 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD; 1201 sge->sge_count = 0; 1202 return (0); 1203 } 1204 1205 if (ccb->ccb_xa.flags & ATA_F_READ) 1206 prb->prb_control |= SILI_PRB_CTRL_READ; 1207 if (ccb->ccb_xa.flags & ATA_F_WRITE) 1208 prb->prb_control |= SILI_PRB_CTRL_WRITE; 1209 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1210 sge->sge_count = 0; 1211 sge->sge_paddr = ccb->ccb_prb_paddr + 1212 offsetof(struct sili_prb, prb_sge[0]); 1213 1214 /* 1215 * Load our sge array. 1216 */ 1217 error = bus_dmamap_load(sc->sc_tag_data, dmap, 1218 xa->data, xa->datalen, 1219 sili_load_prb_callback, 1220 ccb, 1221 ((xa->flags & ATA_F_NOWAIT) ? 1222 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1223 if (error != 0) { 1224 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error); 1225 return (1); 1226 } 1227 1228 bus_dmamap_sync(sc->sc_tag_data, dmap, 1229 (xa->flags & ATA_F_READ) ? 1230 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1231 1232 return (0); 1233 } 1234 1235 /* 1236 * Callback from BUSDMA system to load the segment list. 1237 * 1238 * The scatter/gather table is loaded by the sili chip in blocks of 1239 * four SGE's. If a continuance is required the last entry in each 1240 * block must point to the next block. 1241 */ 1242 static 1243 void 1244 sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs, 1245 int error) 1246 { 1247 struct sili_ccb *ccb = info; 1248 struct sili_sge *sge; 1249 int sgi; 1250 1251 KKASSERT(nsegs <= SILI_MAX_SGET); 1252 1253 sgi = 0; 1254 sge = &ccb->ccb_prb->prb_sge[0]; 1255 while (nsegs) { 1256 if ((sgi & 3) == 3) { 1257 sge->sge_paddr = htole64(ccb->ccb_prb_paddr + 1258 offsetof(struct sili_prb, 1259 prb_sge[sgi + 1])); 1260 sge->sge_count = 0; 1261 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1262 } else { 1263 sge->sge_paddr = htole64(segs->ds_addr); 1264 sge->sge_count = htole32(segs->ds_len); 1265 sge->sge_flags = 0; 1266 --nsegs; 1267 ++segs; 1268 } 1269 ++sge; 1270 ++sgi; 1271 } 1272 --sge; 1273 sge->sge_flags |= SILI_SGE_FLAGS_TRM; 1274 } 1275 1276 void 1277 sili_unload_prb(struct sili_ccb *ccb) 1278 { 1279 struct sili_port *ap = ccb->ccb_port; 1280 struct sili_softc *sc = ap->ap_sc; 1281 struct ata_xfer *xa = &ccb->ccb_xa; 1282 bus_dmamap_t dmap = ccb->ccb_dmamap; 1283 1284 if (xa->datalen != 0) { 1285 bus_dmamap_sync(sc->sc_tag_data, dmap, 1286 (xa->flags & ATA_F_READ) ? 1287 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1288 1289 bus_dmamap_unload(sc->sc_tag_data, dmap); 1290 1291 if (ccb->ccb_xa.flags & ATA_F_NCQ) 1292 xa->resid = 0; 1293 else 1294 xa->resid = xa->datalen - 1295 le32toh(ccb->ccb_prb->prb_xfer_count); 1296 } 1297 } 1298 1299 /* 1300 * Start a command and poll for completion. 1301 * 1302 * timeout is in ms and only counts once the command gets on-chip. 1303 * 1304 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine 1305 * that no error occured. 1306 * 1307 * NOTE: If the caller specifies a NULL timeout function the caller is 1308 * responsible for clearing hardware state on failure, but we will 1309 * deal with removing the ccb from any pending queue. 1310 * 1311 * NOTE: NCQ should never be used with this function. 1312 * 1313 * NOTE: If the port is in a failed state and stopped we do not try 1314 * to activate the ccb. 1315 */ 1316 int 1317 sili_poll(struct sili_ccb *ccb, int timeout, 1318 void (*timeout_fn)(struct sili_ccb *)) 1319 { 1320 struct sili_port *ap = ccb->ccb_port; 1321 1322 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) { 1323 ccb->ccb_xa.state = ATA_S_ERROR; 1324 return(ccb->ccb_xa.state); 1325 } 1326 1327 KKASSERT((ap->ap_expired & (1 << ccb->ccb_slot)) == 0); 1328 sili_start(ccb); 1329 1330 do { 1331 sili_port_intr(ap, 1); 1332 switch(ccb->ccb_xa.state) { 1333 case ATA_S_ONCHIP: 1334 timeout -= sili_os_softsleep(); 1335 break; 1336 case ATA_S_PENDING: 1337 /* 1338 * The packet can get stuck on the pending queue 1339 * if the port refuses to come ready. XXX 1340 */ 1341 #if 0 1342 if (xxx AP_F_EXCLUSIVE_ACCESS) 1343 timeout -= sili_os_softsleep(); 1344 else 1345 #endif 1346 sili_os_softsleep(); 1347 sili_check_active_timeouts(ap); 1348 break; 1349 default: 1350 return (ccb->ccb_xa.state); 1351 } 1352 } while (timeout > 0); 1353 1354 /* 1355 * Don't spew if this is a probe during hard reset 1356 */ 1357 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) { 1358 kprintf("%s: Poll timeout slot %d\n", 1359 ATANAME(ap, ccb->ccb_xa.at), 1360 ccb->ccb_slot); 1361 } 1362 1363 timeout_fn(ccb); 1364 1365 return(ccb->ccb_xa.state); 1366 } 1367 1368 /* 1369 * When polling we have to check if the currently active CCB(s) 1370 * have timed out as the callout will be deadlocked while we 1371 * hold the port lock. 1372 */ 1373 void 1374 sili_check_active_timeouts(struct sili_port *ap) 1375 { 1376 struct sili_ccb *ccb; 1377 u_int32_t mask; 1378 int tag; 1379 1380 mask = ap->ap_active; 1381 while (mask) { 1382 tag = ffs(mask) - 1; 1383 mask &= ~(1 << tag); 1384 ccb = &ap->ap_ccbs[tag]; 1385 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) { 1386 sili_core_timeout(ccb, 0); 1387 } 1388 } 1389 } 1390 1391 static 1392 __inline 1393 void 1394 sili_start_timeout(struct sili_ccb *ccb) 1395 { 1396 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) { 1397 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING; 1398 callout_reset(&ccb->ccb_timeout, 1399 (ccb->ccb_xa.timeout * hz + 999) / 1000, 1400 sili_ata_cmd_timeout_unserialized, ccb); 1401 } 1402 } 1403 1404 void 1405 sili_start(struct sili_ccb *ccb) 1406 { 1407 struct sili_port *ap = ccb->ccb_port; 1408 #if 0 1409 struct sili_softc *sc = ap->ap_sc; 1410 #endif 1411 1412 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING); 1413 1414 /* 1415 * Sync our SGE table and PRB 1416 */ 1417 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag, 1418 ap->ap_dmamem_prbs->adm_map, 1419 BUS_DMASYNC_PREWRITE); 1420 1421 /* 1422 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE 1423 */ 1424 1425 /* 1426 * Controller will update shared memory! 1427 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ... 1428 */ 1429 /* Prepare RFIS area for write by controller */ 1430 1431 /* 1432 * There's no point trying to optimize this, it only shaves a few 1433 * nanoseconds so just queue the command and call our generic issue. 1434 */ 1435 sili_issue_pending_commands(ap, ccb); 1436 } 1437 1438 /* 1439 * Wait for all commands to complete processing. We hold the lock so no 1440 * new commands will be queued. 1441 */ 1442 void 1443 sili_exclusive_access(struct sili_port *ap) 1444 { 1445 while (ap->ap_active) { 1446 sili_port_intr(ap, 1); 1447 sili_os_softsleep(); 1448 } 1449 } 1450 1451 /* 1452 * If ccb is not NULL enqueue and/or issue it. 1453 * 1454 * If ccb is NULL issue whatever we can from the queue. However, nothing 1455 * new is issued if the exclusive access flag is set or expired ccb's are 1456 * present. 1457 * 1458 * If existing commands are still active (ap_active) we can only 1459 * issue matching new commands. 1460 */ 1461 void 1462 sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb) 1463 { 1464 /* 1465 * Enqueue the ccb. 1466 * 1467 * If just running the queue and in exclusive access mode we 1468 * just return. Also in this case if there are any expired ccb's 1469 * we want to clear the queue so the port can be safely stopped. 1470 * 1471 * XXX sili chip - expiration needs to be per-target if PM supports 1472 * FBSS? 1473 */ 1474 if (ccb) { 1475 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry); 1476 } else if (ap->ap_expired) { 1477 return; 1478 } 1479 1480 /* 1481 * Pull the next ccb off the queue and run it if possible. 1482 * If the port is not ready to accept commands enable the 1483 * ready interrupt instead of starting a new command. 1484 * 1485 * XXX limit ncqdepth for attached devices behind PM 1486 */ 1487 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1488 /* 1489 * Port may be wedged. 1490 */ 1491 if ((sili_pread(ap, SILI_PREG_STATUS) & 1492 SILI_PREG_STATUS_READY) == 0) { 1493 kprintf("%s: slot %d NOT READY\n", 1494 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 1495 sili_pwrite(ap, SILI_PREG_INT_ENABLE, 1496 SILI_PREG_INT_READY); 1497 break; 1498 } 1499 1500 /* 1501 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used 1502 * when we may have to access the rfis which is stored in 1503 * the LRAM PRB. Unfortunately reading the LRAM PRB is 1504 * highly problematic, so requests (like PM requests) which 1505 * need to access the rfis use exclusive mode and then 1506 * access the copy made by the port interrupt code back in 1507 * host memory. 1508 */ 1509 if (ap->ap_active & ~ap->ap_expired) { 1510 /* 1511 * There may be multiple ccb's already running, 1512 * if any are running and ap_run_flags sets 1513 * one of these flags then we know only one is 1514 * running. 1515 * 1516 * XXX Current AUTOSENSE code forces exclusivity 1517 * to simplify the code. 1518 */ 1519 if (ap->ap_run_flags & 1520 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1521 break; 1522 } 1523 1524 /* 1525 * If the ccb we want to run is exclusive and ccb's 1526 * are still active on the port, we can't queue it 1527 * yet. 1528 * 1529 * XXX Current AUTOSENSE code forces exclusivity 1530 * to simplify the code. 1531 */ 1532 if (ccb->ccb_xa.flags & 1533 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1534 break; 1535 } 1536 } 1537 1538 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1539 ccb->ccb_xa.state = ATA_S_ONCHIP; 1540 ap->ap_active |= 1 << ccb->ccb_slot; 1541 ap->ap_active_cnt++; 1542 ap->ap_run_flags = ccb->ccb_xa.flags; 1543 1544 /* 1545 * We can't use the CMD_FIFO method because it requires us 1546 * building the PRB in the LRAM, and the LRAM is buggy. So 1547 * we use host memory for the PRB. 1548 */ 1549 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot), 1550 (u_int32_t)ccb->ccb_prb_paddr); 1551 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4, 1552 (u_int32_t)(ccb->ccb_prb_paddr >> 32)); 1553 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */ 1554 sili_start_timeout(ccb); 1555 } 1556 } 1557 1558 void 1559 sili_intr(void *arg) 1560 { 1561 struct sili_softc *sc = arg; 1562 struct sili_port *ap; 1563 u_int32_t gint; 1564 int port; 1565 1566 /* 1567 * Check if the master enable is up, and whether any interrupts are 1568 * pending. 1569 * 1570 * Clear the ints we got. 1571 */ 1572 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0) 1573 return; 1574 gint = sili_read(sc, SILI_REG_GINT); 1575 if (gint == 0 || gint == 0xffffffff) 1576 return; 1577 sili_write(sc, SILI_REG_GINT, gint); 1578 1579 /* 1580 * Process interrupts for each port in a non-blocking fashion. 1581 */ 1582 while (gint & SILI_REG_GINT_PORTMASK) { 1583 port = ffs(gint) - 1; 1584 ap = sc->sc_ports[port]; 1585 if (ap) { 1586 if (sili_os_lock_port_nb(ap) == 0) { 1587 sili_port_intr(ap, 0); 1588 sili_os_unlock_port(ap); 1589 } else { 1590 sili_port_interrupt_redisable(ap); 1591 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1592 } 1593 } 1594 gint &= ~(1 << port); 1595 } 1596 } 1597 1598 /* 1599 * Core called from helper thread. 1600 */ 1601 void 1602 sili_port_thread_core(struct sili_port *ap, int mask) 1603 { 1604 /* 1605 * Process any expired timedouts. 1606 */ 1607 sili_os_lock_port(ap); 1608 if (mask & AP_SIGF_TIMEOUT) { 1609 sili_check_active_timeouts(ap); 1610 } 1611 1612 /* 1613 * Process port interrupts which require a higher level of 1614 * intervention. 1615 */ 1616 if (mask & AP_SIGF_PORTINT) { 1617 sili_port_intr(ap, 1); 1618 sili_port_interrupt_reenable(ap); 1619 } 1620 sili_os_unlock_port(ap); 1621 } 1622 1623 /* 1624 * Core per-port interrupt handler. 1625 * 1626 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only 1627 * deal with normal command completions which do not require blocking. 1628 */ 1629 void 1630 sili_port_intr(struct sili_port *ap, int blockable) 1631 { 1632 struct sili_softc *sc = ap->ap_sc; 1633 u_int32_t is; 1634 int slot; 1635 struct sili_ccb *ccb = NULL; 1636 struct ata_port *ccb_at = NULL; 1637 u_int32_t active; 1638 u_int32_t finished; 1639 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG | 1640 SILI_PREG_IST_DEVEXCHG | 1641 SILI_PREG_IST_CERROR | 1642 SILI_PREG_IST_DECODE | 1643 SILI_PREG_IST_CRC | 1644 SILI_PREG_IST_HANDSHK; 1645 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG | 1646 SILI_PREG_IST_DEVEXCHG | 1647 SILI_PREG_IST_DECODE | 1648 SILI_PREG_IST_CRC | 1649 SILI_PREG_IST_HANDSHK; 1650 1651 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT, 1652 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING; 1653 1654 /* 1655 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS. 1656 */ 1657 is = sili_pread(ap, SILI_PREG_INT_STATUS); 1658 is &= SILI_PREG_IST_MASK; 1659 if (is & SILI_PREG_IST_CCOMPLETE) 1660 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE); 1661 1662 /* 1663 * If we can't block then we can't handle these here. Disable 1664 * the interrupts in question so we don't live-lock, the helper 1665 * thread will re-enable them. 1666 * 1667 * If the port is in a completely failed state we do not want 1668 * to drop through to failed-command-processing if blockable is 0, 1669 * just let the thread deal with it all. 1670 * 1671 * Otherwise we fall through and still handle DHRS and any commands 1672 * which completed normally. Even if we are errored we haven't 1673 * stopped the port yet so CI/SACT are still good. 1674 */ 1675 if (blockable == 0) { 1676 if (ap->ap_state == AP_S_FATAL_ERROR) { 1677 sili_port_interrupt_redisable(ap); 1678 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1679 /*is &= ~blockable_mask;*/ 1680 return; 1681 } 1682 if (is & blockable_mask) { 1683 sili_port_interrupt_redisable(ap); 1684 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1685 /*is &= ~blockable_mask;*/ 1686 return; 1687 } 1688 } 1689 1690 if (is & SILI_PREG_IST_CERROR) { 1691 /* 1692 * Command failed (blockable). 1693 * 1694 * This stops command processing. We can extract the PM 1695 * target from the PMP field in SILI_PREG_CONTEXT. The 1696 * tag is not necessarily valid so don't use that. 1697 * 1698 * We must then expire all CCB's for that target and resume 1699 * processing if any other targets have active commands. 1700 * Particular error codes can be recovered by reading the LOG 1701 * page. 1702 * 1703 * The expire handling code will do the rest, which is 1704 * basically to reset the port once the only active 1705 * commands remaining are all expired. 1706 */ 1707 u_int32_t error; 1708 int target; 1709 int resume = 1; 1710 1711 target = (sili_pread(ap, SILI_PREG_CONTEXT) >> 1712 SILI_PREG_CONTEXT_PMPORT_SHIFT) & 1713 SILI_PREG_CONTEXT_PMPORT_MASK; 1714 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR); 1715 active = ap->ap_active & ~ap->ap_expired; 1716 error = sili_pread(ap, SILI_PREG_CERROR); 1717 kprintf("%s.%d target error %d active=%08x hactive=%08x " 1718 "SERR=%b\n", 1719 PORTNAME(ap), target, error, 1720 active, sili_pread(ap, SILI_PREG_SLOTST), 1721 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR); 1722 1723 while (active) { 1724 slot = ffs(active) - 1; 1725 ccb = &ap->ap_ccbs[slot]; 1726 if ((ccb_at = ccb->ccb_xa.at) == NULL) 1727 ccb_at = &ap->ap_ata[0]; 1728 if (target == ccb_at->at_target) { 1729 if ((ccb->ccb_xa.flags & ATA_F_NCQ) && 1730 (error == SILI_PREG_CERROR_DEVICE || 1731 error == SILI_PREG_CERROR_SDBERROR)) { 1732 ccb_at->at_features |= ATA_PORT_F_READLOG; 1733 } 1734 if (sili_core_timeout(ccb, 1) == 0) 1735 resume = 0; 1736 } 1737 active &= ~(1 << slot); 1738 } 1739 1740 /* 1741 * Resume will be 0 if the timeout reinited and restarted 1742 * the port. Otherwise we resume the port to allow other 1743 * commands to complete. 1744 */ 1745 if (resume) 1746 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME); 1747 } 1748 1749 /* 1750 * Device notification to us (non-blocking) 1751 * 1752 * This is interrupt status SILIPREG_IST_SDB 1753 * 1754 * NOTE! On some parts notification bits can get set without 1755 * generating an interrupt. It is unclear whether this is 1756 * a bug in the PM (sending a DTOH device setbits with 'N' set 1757 * and 'I' not set), or a bug in the host controller. 1758 * 1759 * It only seems to occur under load. 1760 */ 1761 if (sc->sc_flags & SILI_F_SSNTF) { 1762 u_int32_t data; 1763 const char *xstr; 1764 1765 data = sili_pread(ap, SILI_PREG_SNTF); 1766 if (is & SILI_PREG_IST_SDB) { 1767 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1768 SILI_PREG_IST_SDB); 1769 is &= ~SILI_PREG_IST_SDB; 1770 xstr = " (no SDBS!)"; 1771 } else { 1772 xstr = ""; 1773 } 1774 if (data) { 1775 kprintf("%s: NOTIFY %08x%s\n", 1776 PORTNAME(ap), data, xstr); 1777 sili_pwrite(ap, SILI_PREG_SNTF, data); 1778 sili_cam_changed(ap, NULL, -1); 1779 } 1780 } 1781 1782 /* 1783 * Port change (hot-plug) (blockable). 1784 * 1785 * A PCS interrupt will occur on hot-plug once communication is 1786 * established. 1787 * 1788 * A PRCS interrupt will occur on hot-unplug (and possibly also 1789 * on hot-plug). 1790 * 1791 * XXX We can then check the CPS (Cold Presence State) bit, if 1792 * supported, to determine if a device is plugged in or not and do 1793 * the right thing. 1794 * 1795 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and 1796 * can also occur if an unsolicited COMINIT is received. 1797 * If this occurs command processing is automatically 1798 * stopped (CR goes inactive) and the port must be stopped 1799 * and restarted. 1800 */ 1801 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) { 1802 /* XXX */ 1803 sili_pwrite(ap, SILI_PREG_SERR, 1804 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X)); 1805 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1806 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)); 1807 1808 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG); 1809 kprintf("%s: Port change\n", PORTNAME(ap)); 1810 1811 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) { 1812 case SILI_PREG_SSTS_DET_DEV: 1813 if (ap->ap_type == ATA_PORT_T_NONE && 1814 ap->ap_probe == ATA_PROBE_FAILED) { 1815 need = NEED_HOTPLUG_INSERT; 1816 goto fatal; 1817 } 1818 break; 1819 default: 1820 kprintf("%s: Device lost\n", PORTNAME(ap)); 1821 if (ap->ap_type != ATA_PORT_T_NONE) { 1822 need = NEED_HOTPLUG_REMOVE; 1823 goto fatal; 1824 } 1825 break; 1826 } 1827 } 1828 1829 /* 1830 * Check for remaining errors - they are fatal. (blockable) 1831 */ 1832 if (is & fatal_mask) { 1833 u_int32_t serr; 1834 1835 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask); 1836 1837 serr = sili_pread(ap, SILI_PREG_SERR); 1838 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), " 1839 "disabling port.\n", 1840 PORTNAME(ap), 1841 is, SILI_PFMT_INT_STATUS, 1842 serr, SILI_PFMT_SERR 1843 ); 1844 is &= ~fatal_mask; 1845 /* XXX try recovery first */ 1846 goto fatal; 1847 } 1848 1849 /* 1850 * Fail all outstanding commands if we know the port won't recover. 1851 * 1852 * We may have a ccb_at if the failed command is known and was 1853 * being sent to a device over a port multiplier (PM). In this 1854 * case if the port itself has not completely failed we fail just 1855 * the commands related to that target. 1856 */ 1857 if (ap->ap_state == AP_S_FATAL_ERROR && 1858 (ap->ap_active & ~ap->ap_expired)) { 1859 kprintf("%s: Fatal port error, expiring %08x\n", 1860 PORTNAME(ap), ap->ap_active & ~ap->ap_expired); 1861 fatal: 1862 ap->ap_state = AP_S_FATAL_ERROR; 1863 1864 /* 1865 * Error all the active slots. If running across a PM 1866 * try to error out just the slots related to the target. 1867 */ 1868 active = ap->ap_active & ~ap->ap_expired; 1869 1870 while (active) { 1871 slot = ffs(active) - 1; 1872 active &= ~(1 << slot); 1873 ccb = &ap->ap_ccbs[slot]; 1874 sili_core_timeout(ccb, 1); 1875 } 1876 } 1877 1878 /* 1879 * CCB completion (non blocking). 1880 * 1881 * CCB completion is detected by noticing the slot bit in 1882 * the port slot status register has cleared while the bit 1883 * is still set in our ap_active variable. 1884 * 1885 * When completing expired events we must remember to reinit 1886 * the port once everything is clear. 1887 * 1888 * Due to a single-level recursion when reading the log page, 1889 * it is possible for the slot to already have been cleared 1890 * for some expired tags, do not include expired tags in 1891 * the list. 1892 */ 1893 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST); 1894 active &= ~ap->ap_expired; 1895 1896 finished = active; 1897 while (active) { 1898 slot = ffs(active) - 1; 1899 ccb = &ap->ap_ccbs[slot]; 1900 1901 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n", 1902 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ? 1903 " (error)" : ""); 1904 1905 active &= ~(1 << slot); 1906 1907 /* 1908 * XXX sync POSTREAD for return data? 1909 */ 1910 ap->ap_active &= ~(1 << ccb->ccb_slot); 1911 --ap->ap_active_cnt; 1912 1913 /* 1914 * Complete the ccb. If the ccb was marked expired it 1915 * may or may not have been cleared from the port, 1916 * make sure we mark it as having timed out. 1917 * 1918 * In a normal completion if AUTOSENSE is set we copy 1919 * the PRB LRAM rfis back to the rfis in host-memory. 1920 * 1921 * XXX Currently AUTOSENSE also forces exclusivity so we 1922 * can safely work around a hardware bug when reading 1923 * the LRAM. 1924 */ 1925 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 1926 ap->ap_expired &= ~(1 << ccb->ccb_slot); 1927 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1928 ccb->ccb_done(ccb); 1929 ccb->ccb_xa.complete(&ccb->ccb_xa); 1930 } else { 1931 if (ccb->ccb_xa.state == ATA_S_ONCHIP) { 1932 ccb->ccb_xa.state = ATA_S_COMPLETE; 1933 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) { 1934 memcpy(ccb->ccb_xa.rfis, 1935 &ccb->ccb_prb_lram->prb_d2h, 1936 sizeof(ccb->ccb_prb_lram->prb_d2h)); 1937 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 1938 ccb->ccb_xa.state = ATA_S_ERROR; 1939 } 1940 } 1941 ccb->ccb_done(ccb); 1942 } 1943 } 1944 if (is & SILI_PREG_IST_READY) { 1945 is &= ~SILI_PREG_IST_READY; 1946 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY); 1947 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY); 1948 } 1949 1950 /* 1951 * If we had expired commands and were waiting for 1952 * remaining commands to complete, and they have now 1953 * completed, we can reinit the port. 1954 * 1955 * This will also clean out the expired commands. 1956 * The timeout code also calls sili_port_reinit() if 1957 * the only commands remaining after a timeout are all 1958 * now expired commands. 1959 * 1960 * Otherwise just reissue. 1961 */ 1962 if (ap->ap_expired && ap->ap_active == ap->ap_expired) { 1963 if (finished) 1964 sili_port_reinit(ap); 1965 } else { 1966 sili_issue_pending_commands(ap, NULL); 1967 } 1968 1969 /* 1970 * Cleanup. Will not be set if non-blocking. 1971 */ 1972 switch(need) { 1973 case NEED_HOTPLUG_INSERT: 1974 /* 1975 * A hot-plug insertion event has occured and all 1976 * outstanding commands have already been revoked. 1977 * 1978 * Don't recurse if this occurs while we are 1979 * resetting the port. 1980 * 1981 * Place the port in a continuous COMRESET state 1982 * until the INIT code gets to it. 1983 */ 1984 kprintf("%s: HOTPLUG - Device inserted\n", 1985 PORTNAME(ap)); 1986 ap->ap_probe = ATA_PROBE_NEED_INIT; 1987 sili_cam_changed(ap, NULL, -1); 1988 break; 1989 case NEED_HOTPLUG_REMOVE: 1990 /* 1991 * A hot-plug removal event has occured and all 1992 * outstanding commands have already been revoked. 1993 * 1994 * Don't recurse if this occurs while we are 1995 * resetting the port. 1996 */ 1997 kprintf("%s: HOTPLUG - Device removed\n", 1998 PORTNAME(ap)); 1999 sili_port_hardstop(ap); 2000 /* ap_probe set to failed */ 2001 sili_cam_changed(ap, NULL, -1); 2002 break; 2003 default: 2004 break; 2005 } 2006 } 2007 2008 struct sili_ccb * 2009 sili_get_ccb(struct sili_port *ap) 2010 { 2011 struct sili_ccb *ccb; 2012 2013 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2014 ccb = TAILQ_FIRST(&ap->ap_ccb_free); 2015 if (ccb != NULL) { 2016 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT); 2017 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry); 2018 ccb->ccb_xa.state = ATA_S_SETUP; 2019 ccb->ccb_xa.at = NULL; 2020 } 2021 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2022 2023 return (ccb); 2024 } 2025 2026 void 2027 sili_put_ccb(struct sili_ccb *ccb) 2028 { 2029 struct sili_port *ap = ccb->ccb_port; 2030 2031 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2032 ccb->ccb_xa.state = ATA_S_PUT; 2033 ++ccb->ccb_xa.serial; 2034 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry); 2035 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2036 } 2037 2038 struct sili_ccb * 2039 sili_get_err_ccb(struct sili_port *ap) 2040 { 2041 struct sili_ccb *err_ccb; 2042 2043 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0); 2044 ap->ap_flags |= AP_F_ERR_CCB_RESERVED; 2045 2046 /* 2047 * Grab a CCB to use for error recovery. This should never fail, as 2048 * we ask atascsi to reserve one for us at init time. 2049 */ 2050 err_ccb = ap->ap_err_ccb; 2051 KKASSERT(err_ccb != NULL); 2052 err_ccb->ccb_xa.flags = 0; 2053 err_ccb->ccb_done = sili_empty_done; 2054 2055 return err_ccb; 2056 } 2057 2058 void 2059 sili_put_err_ccb(struct sili_ccb *ccb) 2060 { 2061 struct sili_port *ap = ccb->ccb_port; 2062 2063 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0); 2064 2065 KKASSERT(ccb == ap->ap_err_ccb); 2066 2067 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED; 2068 } 2069 2070 /* 2071 * Read log page to get NCQ error. 2072 * 2073 * Return 0 on success 2074 */ 2075 void 2076 sili_port_read_ncq_error(struct sili_port *ap, int target) 2077 { 2078 struct sili_ccb *ccb; 2079 struct ata_fis_h2d *fis; 2080 int status; 2081 2082 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap)); 2083 2084 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */ 2085 ccb = sili_get_err_ccb(ap); 2086 ccb->ccb_done = sili_empty_done; 2087 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL; 2088 ccb->ccb_xa.data = ap->ap_err_scratch; 2089 ccb->ccb_xa.datalen = 512; 2090 ccb->ccb_xa.complete = sili_dummy_done; 2091 ccb->ccb_xa.at = &ap->ap_ata[target]; 2092 fis = &ccb->ccb_prb->prb_h2d; 2093 bzero(fis, sizeof(*fis)); 2094 2095 fis->type = ATA_FIS_TYPE_H2D; 2096 fis->flags = ATA_H2D_FLAGS_CMD | target; 2097 fis->command = ATA_C_READ_LOG_EXT; 2098 fis->lba_low = 0x10; /* queued error log page (10h) */ 2099 fis->sector_count = 1; /* number of sectors (1) */ 2100 fis->sector_count_exp = 0; 2101 fis->lba_mid = 0; /* starting offset */ 2102 fis->lba_mid_exp = 0; 2103 fis->device = 0; 2104 2105 /* 2106 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 2107 */ 2108 if (sili_load_prb(ccb) != 0) { 2109 status = ATA_S_ERROR; 2110 } else { 2111 ccb->ccb_xa.state = ATA_S_PENDING; 2112 status = sili_poll(ccb, 1000, sili_quick_timeout); 2113 } 2114 2115 /* 2116 * Just spew if it fails, there isn't much we can do at this point. 2117 */ 2118 if (status != ATA_S_COMPLETE) { 2119 kprintf("%s: log page read failed, slot %d was still active.\n", 2120 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 2121 } 2122 2123 /* Done with the error CCB now. */ 2124 sili_unload_prb(ccb); 2125 sili_put_err_ccb(ccb); 2126 2127 /* Extract failed register set and tags from the scratch space. */ 2128 if (status == ATA_S_COMPLETE) { 2129 struct ata_log_page_10h *log; 2130 int err_slot; 2131 2132 log = (struct ata_log_page_10h *)ap->ap_err_scratch; 2133 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) { 2134 /* 2135 * Not queued bit was set - wasn't an NCQ error? 2136 * 2137 * XXX This bit seems to be set a lot even for NCQ 2138 * errors? 2139 */ 2140 } else { 2141 /* 2142 * Copy back the log record as a D2H register FIS. 2143 */ 2144 err_slot = log->err_regs.type & 2145 ATA_LOG_10H_TYPE_TAG_MASK; 2146 ccb = &ap->ap_ccbs[err_slot]; 2147 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 2148 kprintf("%s: read NCQ error page slot=%d\n", 2149 ATANAME(ap, ccb->ccb_xa.at), err_slot 2150 ); 2151 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs, 2152 sizeof(struct ata_fis_d2h)); 2153 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H; 2154 ccb->ccb_prb->prb_d2h.flags = 0; 2155 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 2156 ccb->ccb_xa.state = ATA_S_ERROR; 2157 } else { 2158 kprintf("%s: read NCQ error page slot=%d, " 2159 "slot does not match any cmds\n", 2160 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2161 err_slot 2162 ); 2163 } 2164 } 2165 } 2166 } 2167 2168 /* 2169 * Allocate memory for various structures DMAd by hardware. The maximum 2170 * number of segments for these tags is 1 so the DMA memory will have a 2171 * single physical base address. 2172 */ 2173 struct sili_dmamem * 2174 sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag) 2175 { 2176 struct sili_dmamem *adm; 2177 int error; 2178 2179 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO); 2180 2181 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva, 2182 BUS_DMA_ZERO, &adm->adm_map); 2183 if (error == 0) { 2184 adm->adm_tag = tag; 2185 error = bus_dmamap_load(tag, adm->adm_map, 2186 adm->adm_kva, 2187 bus_dma_tag_getmaxsize(tag), 2188 sili_dmamem_saveseg, &adm->adm_busaddr, 2189 0); 2190 } 2191 if (error) { 2192 if (adm->adm_map) { 2193 bus_dmamap_destroy(tag, adm->adm_map); 2194 adm->adm_map = NULL; 2195 adm->adm_tag = NULL; 2196 adm->adm_kva = NULL; 2197 } 2198 kfree(adm, M_DEVBUF); 2199 adm = NULL; 2200 } 2201 return (adm); 2202 } 2203 2204 static 2205 void 2206 sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error) 2207 { 2208 KKASSERT(error == 0); 2209 KKASSERT(nsegs == 1); 2210 *(bus_addr_t *)info = segs->ds_addr; 2211 } 2212 2213 2214 void 2215 sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm) 2216 { 2217 if (adm->adm_map) { 2218 bus_dmamap_unload(adm->adm_tag, adm->adm_map); 2219 bus_dmamap_destroy(adm->adm_tag, adm->adm_map); 2220 adm->adm_map = NULL; 2221 adm->adm_tag = NULL; 2222 adm->adm_kva = NULL; 2223 } 2224 kfree(adm, M_DEVBUF); 2225 } 2226 2227 u_int32_t 2228 sili_read(struct sili_softc *sc, bus_size_t r) 2229 { 2230 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2231 BUS_SPACE_BARRIER_READ); 2232 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)); 2233 } 2234 2235 void 2236 sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v) 2237 { 2238 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 2239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2240 BUS_SPACE_BARRIER_WRITE); 2241 } 2242 2243 u_int32_t 2244 sili_pread(struct sili_port *ap, bus_size_t r) 2245 { 2246 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2247 BUS_SPACE_BARRIER_READ); 2248 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r)); 2249 } 2250 2251 void 2252 sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v) 2253 { 2254 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v); 2255 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2256 BUS_SPACE_BARRIER_WRITE); 2257 } 2258 2259 /* 2260 * Wait up to (timeout) milliseconds for the masked port register to 2261 * match the target. 2262 * 2263 * Timeout is in milliseconds. 2264 */ 2265 int 2266 sili_pwait_eq(struct sili_port *ap, int timeout, 2267 bus_size_t r, u_int32_t mask, u_int32_t target) 2268 { 2269 int t; 2270 2271 /* 2272 * Loop hard up to 100uS 2273 */ 2274 for (t = 0; t < 100; ++t) { 2275 if ((sili_pread(ap, r) & mask) == target) 2276 return (0); 2277 sili_os_hardsleep(1); /* us */ 2278 } 2279 2280 do { 2281 timeout -= sili_os_softsleep(); 2282 if ((sili_pread(ap, r) & mask) == target) 2283 return (0); 2284 } while (timeout > 0); 2285 return (1); 2286 } 2287 2288 int 2289 sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask, 2290 u_int32_t target) 2291 { 2292 int t; 2293 2294 /* 2295 * Loop hard up to 100uS 2296 */ 2297 for (t = 0; t < 100; ++t) { 2298 if ((sili_read(sc, r) & mask) != target) 2299 return (0); 2300 sili_os_hardsleep(1); /* us */ 2301 } 2302 2303 /* 2304 * And one millisecond the slow way 2305 */ 2306 t = 1000; 2307 do { 2308 t -= sili_os_softsleep(); 2309 if ((sili_read(sc, r) & mask) != target) 2310 return (0); 2311 } while (t > 0); 2312 2313 return (1); 2314 } 2315 2316 2317 /* 2318 * Acquire an ata transfer. 2319 * 2320 * Pass a NULL at for direct-attached transfers, and a non-NULL at for 2321 * targets that go through the port multiplier. 2322 */ 2323 struct ata_xfer * 2324 sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at) 2325 { 2326 struct sili_ccb *ccb; 2327 2328 ccb = sili_get_ccb(ap); 2329 if (ccb == NULL) { 2330 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n", 2331 PORTNAME(ap)); 2332 return (NULL); 2333 } 2334 2335 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n", 2336 PORTNAME(ap), ccb->ccb_slot); 2337 2338 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis)); 2339 ccb->ccb_xa.at = at; 2340 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D; 2341 2342 return (&ccb->ccb_xa); 2343 } 2344 2345 void 2346 sili_ata_put_xfer(struct ata_xfer *xa) 2347 { 2348 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2349 2350 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot); 2351 2352 sili_put_ccb(ccb); 2353 } 2354 2355 int 2356 sili_ata_cmd(struct ata_xfer *xa) 2357 { 2358 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2359 2360 KKASSERT(xa->state == ATA_S_SETUP); 2361 2362 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) 2363 goto failcmd; 2364 #if 0 2365 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n", 2366 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2367 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD, 2368 ccb->ccb_slot, 2369 ccb->ccb_xa.at, 2370 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1); 2371 #endif 2372 2373 ccb->ccb_done = sili_ata_cmd_done; 2374 2375 if (sili_load_prb(ccb) != 0) 2376 goto failcmd; 2377 2378 xa->state = ATA_S_PENDING; 2379 2380 if (xa->flags & ATA_F_POLL) 2381 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout)); 2382 2383 crit_enter(); 2384 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0); 2385 xa->flags |= ATA_F_TIMEOUT_DESIRED; 2386 sili_start(ccb); 2387 crit_exit(); 2388 return (xa->state); 2389 2390 failcmd: 2391 crit_enter(); 2392 xa->state = ATA_S_ERROR; 2393 xa->complete(xa); 2394 crit_exit(); 2395 return (ATA_S_ERROR); 2396 } 2397 2398 static void 2399 sili_ata_cmd_done(struct sili_ccb *ccb) 2400 { 2401 struct ata_xfer *xa = &ccb->ccb_xa; 2402 int serial; 2403 2404 /* 2405 * NOTE: callout does not lock port and may race us modifying 2406 * the flags, so make sure its stopped. 2407 */ 2408 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2409 serial = ccb->ccb_xa.serial; 2410 callout_stop_sync(&ccb->ccb_timeout); 2411 if (serial != ccb->ccb_xa.serial) { 2412 kprintf("%s: Warning: timeout race ccb %p\n", 2413 PORTNAME(ccb->ccb_port), ccb); 2414 return; 2415 } 2416 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2417 } 2418 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED); 2419 2420 KKASSERT(xa->state != ATA_S_ONCHIP); 2421 sili_unload_prb(ccb); 2422 2423 if (xa->state != ATA_S_TIMEOUT) 2424 xa->complete(xa); 2425 } 2426 2427 /* 2428 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags 2429 * while the callout is runing. 2430 * 2431 * We can't safely get the port lock here or delay, we could block 2432 * the callout thread. 2433 */ 2434 static void 2435 sili_ata_cmd_timeout_unserialized(void *arg) 2436 { 2437 struct sili_ccb *ccb = arg; 2438 struct sili_port *ap = ccb->ccb_port; 2439 2440 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 2441 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED; 2442 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT); 2443 } 2444 2445 void 2446 sili_ata_cmd_timeout(struct sili_ccb *ccb) 2447 { 2448 sili_core_timeout(ccb, 0); 2449 } 2450 2451 /* 2452 * Timeout code, typically called when the port command processor is running. 2453 * 2454 * Returns 0 if all timeout processing completed, non-zero if it is still 2455 * in progress. 2456 */ 2457 static 2458 int 2459 sili_core_timeout(struct sili_ccb *ccb, int really_error) 2460 { 2461 struct ata_xfer *xa = &ccb->ccb_xa; 2462 struct sili_port *ap = ccb->ccb_port; 2463 struct ata_port *at; 2464 2465 at = ccb->ccb_xa.at; 2466 2467 kprintf("%s: CMD %s state=%d slot=%d\n" 2468 "\t active=%08x\n" 2469 "\texpired=%08x\n" 2470 "\thactive=%08x\n", 2471 ATANAME(ap, at), 2472 (really_error ? "ERROR" : "TIMEOUT"), 2473 ccb->ccb_xa.state, ccb->ccb_slot, 2474 ap->ap_active, 2475 ap->ap_expired, 2476 sili_pread(ap, SILI_PREG_SLOTST) 2477 ); 2478 2479 /* 2480 * NOTE: Timeout will not be running if the command was polled. 2481 * If we got here at least one of these flags should be set. 2482 * 2483 * However, it might be running if we are called from the 2484 * interrupt error handling code. 2485 */ 2486 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED | 2487 ATA_F_TIMEOUT_RUNNING)); 2488 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2489 callout_stop(&ccb->ccb_timeout); 2490 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2491 } 2492 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED; 2493 2494 if (ccb->ccb_xa.state == ATA_S_PENDING) { 2495 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2496 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2497 ccb->ccb_done(ccb); 2498 xa->complete(xa); 2499 sili_issue_pending_commands(ap, NULL); 2500 return(1); 2501 } 2502 if (ccb->ccb_xa.state != ATA_S_ONCHIP) { 2503 kprintf("%s: Unexpected state during timeout: %d\n", 2504 ATANAME(ap, at), ccb->ccb_xa.state); 2505 return(1); 2506 } 2507 2508 /* 2509 * We can't process timeouts while other commands are running. 2510 */ 2511 ap->ap_expired |= 1 << ccb->ccb_slot; 2512 2513 if (ap->ap_active != ap->ap_expired) { 2514 kprintf("%s: Deferred timeout until its safe, slot %d\n", 2515 ATANAME(ap, at), ccb->ccb_slot); 2516 return(1); 2517 } 2518 2519 /* 2520 * We have to issue a Port reinit. We don't read an error log 2521 * page for timeouts. Reiniting the port will clear all pending 2522 * commands. 2523 */ 2524 sili_port_reinit(ap); 2525 return(0); 2526 } 2527 2528 /* 2529 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very 2530 * specialized, controlled circumstances. 2531 */ 2532 void 2533 sili_quick_timeout(struct sili_ccb *ccb) 2534 { 2535 struct sili_port *ap = ccb->ccb_port; 2536 2537 switch (ccb->ccb_xa.state) { 2538 case ATA_S_PENDING: 2539 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2540 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2541 break; 2542 case ATA_S_ONCHIP: 2543 KKASSERT((ap->ap_active & ~ap->ap_expired) == 2544 (1 << ccb->ccb_slot)); 2545 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2546 ap->ap_active &= ~(1 << ccb->ccb_slot); 2547 KKASSERT(ap->ap_active_cnt > 0); 2548 --ap->ap_active_cnt; 2549 sili_port_reinit(ap); 2550 break; 2551 default: 2552 panic("%s: sili_quick_timeout: ccb in bad state %d", 2553 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state); 2554 } 2555 } 2556 2557 static void 2558 sili_dummy_done(struct ata_xfer *xa) 2559 { 2560 } 2561 2562 static void 2563 sili_empty_done(struct sili_ccb *ccb) 2564 { 2565 } 2566