1 /* $NetBSD: ahcisata_core.c,v 1.107 2022/08/01 07:37:18 mlelstv Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: ahcisata_core.c,v 1.107 2022/08/01 07:37:18 mlelstv Exp $"); 30 31 #include <sys/types.h> 32 #include <sys/malloc.h> 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/disklabel.h> 37 #include <sys/proc.h> 38 #include <sys/buf.h> 39 40 #include <dev/ata/atareg.h> 41 #include <dev/ata/satavar.h> 42 #include <dev/ata/satareg.h> 43 #include <dev/ata/satafisvar.h> 44 #include <dev/ata/satafisreg.h> 45 #include <dev/ata/satapmpreg.h> 46 #include <dev/ic/ahcisatavar.h> 47 #include <dev/ic/wdcreg.h> 48 49 #include <dev/scsipi/scsi_all.h> /* for SCSI status */ 50 51 #include "atapibus.h" 52 53 #include "opt_ahcisata.h" 54 55 #ifdef AHCI_DEBUG 56 int ahcidebug_mask = 0; 57 #endif 58 59 static void ahci_probe_drive(struct ata_channel *); 60 static void ahci_setup_channel(struct ata_channel *); 61 62 static void ahci_ata_bio(struct ata_drive_datas *, struct ata_xfer *); 63 static int ahci_do_reset_drive(struct ata_channel *, int, int, uint32_t *, 64 uint8_t); 65 static void ahci_reset_drive(struct ata_drive_datas *, int, uint32_t *); 66 static void ahci_reset_channel(struct ata_channel *, int); 67 static void ahci_exec_command(struct ata_drive_datas *, struct ata_xfer *); 68 static int ahci_ata_addref(struct ata_drive_datas *); 69 static void ahci_ata_delref(struct ata_drive_datas *); 70 static void ahci_killpending(struct ata_drive_datas *); 71 72 static int ahci_cmd_start(struct ata_channel *, struct ata_xfer *); 73 static int ahci_cmd_complete(struct ata_channel *, struct ata_xfer *, int); 74 static int ahci_cmd_poll(struct ata_channel *, struct ata_xfer *); 75 static void ahci_cmd_abort(struct ata_channel *, struct ata_xfer *); 76 static void ahci_cmd_done(struct ata_channel *, struct ata_xfer *); 77 static void ahci_cmd_done_end(struct ata_channel *, struct ata_xfer *); 78 static void ahci_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 79 static int ahci_bio_start(struct ata_channel *, struct ata_xfer *); 80 static int ahci_bio_poll(struct ata_channel *, struct ata_xfer *); 81 static void ahci_bio_abort(struct ata_channel *, struct ata_xfer *); 82 static int ahci_bio_complete(struct ata_channel *, struct ata_xfer *, int); 83 static void ahci_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int) ; 84 static void ahci_channel_stop(struct ahci_softc *, struct ata_channel *, int); 85 static void ahci_channel_start(struct ahci_softc *, struct ata_channel *, 86 int, int); 87 static void ahci_channel_recover(struct ata_channel *, int, uint32_t); 88 static int ahci_dma_setup(struct ata_channel *, int, void *, size_t, int); 89 static int ahci_intr_port_common(struct ata_channel *); 90 91 #if NATAPIBUS > 0 92 static void ahci_atapibus_attach(struct atabus_softc *); 93 static void ahci_atapi_kill_pending(struct scsipi_periph *); 94 static void ahci_atapi_minphys(struct buf *); 95 static void ahci_atapi_scsipi_request(struct scsipi_channel *, 96 scsipi_adapter_req_t, void *); 97 static int ahci_atapi_start(struct ata_channel *, struct ata_xfer *); 98 static int ahci_atapi_poll(struct ata_channel *, struct ata_xfer *); 99 static void ahci_atapi_abort(struct ata_channel *, struct ata_xfer *); 100 static int ahci_atapi_complete(struct ata_channel *, struct ata_xfer *, int); 101 static void ahci_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 102 static void ahci_atapi_probe_device(struct atapibus_softc *, int); 103 104 static const struct scsipi_bustype ahci_atapi_bustype = { 105 .bustype_type = SCSIPI_BUSTYPE_ATAPI, 106 .bustype_cmd = atapi_scsipi_cmd, 107 .bustype_interpret_sense = atapi_interpret_sense, 108 .bustype_printaddr = atapi_print_addr, 109 .bustype_kill_pending = ahci_atapi_kill_pending, 110 .bustype_async_event_xfer_mode = NULL, 111 }; 112 #endif /* NATAPIBUS */ 113 114 #define ATA_DELAY 10000 /* 10s for a drive I/O */ 115 #define ATA_RESET_DELAY 31000 /* 31s for a drive reset */ 116 #define AHCI_RST_WAIT (ATA_RESET_DELAY / 10) 117 118 #ifndef AHCISATA_EXTRA_DELAY_MS 119 #define AHCISATA_EXTRA_DELAY_MS 500 /* XXX need to adjust */ 120 #endif 121 122 #ifdef AHCISATA_EXTRA_DELAY 123 #define AHCISATA_DO_EXTRA_DELAY(sc, chp, msg, flags) \ 124 ata_delay(chp, AHCISATA_EXTRA_DELAY_MS, msg, flags) 125 #else 126 #define AHCISATA_DO_EXTRA_DELAY(sc, chp, msg, flags) \ 127 do { \ 128 if ((sc)->sc_ahci_quirks & AHCI_QUIRK_EXTRA_DELAY) \ 129 ata_delay(chp, AHCISATA_EXTRA_DELAY_MS, msg, flags); \ 130 } while (0) 131 #endif 132 133 const struct ata_bustype ahci_ata_bustype = { 134 .bustype_type = SCSIPI_BUSTYPE_ATA, 135 .ata_bio = ahci_ata_bio, 136 .ata_reset_drive = ahci_reset_drive, 137 .ata_reset_channel = ahci_reset_channel, 138 .ata_exec_command = ahci_exec_command, 139 .ata_get_params = ata_get_params, 140 .ata_addref = ahci_ata_addref, 141 .ata_delref = ahci_ata_delref, 142 .ata_killpending = ahci_killpending, 143 .ata_recovery = ahci_channel_recover, 144 }; 145 146 static void ahci_setup_port(struct ahci_softc *sc, int i); 147 148 static void 149 ahci_enable(struct ahci_softc *sc) 150 { 151 uint32_t ghc; 152 153 ghc = AHCI_READ(sc, AHCI_GHC); 154 if (!(ghc & AHCI_GHC_AE)) { 155 ghc |= AHCI_GHC_AE; 156 AHCI_WRITE(sc, AHCI_GHC, ghc); 157 } 158 } 159 160 static int 161 ahci_reset(struct ahci_softc *sc) 162 { 163 int i; 164 165 /* reset controller */ 166 AHCI_WRITE(sc, AHCI_GHC, AHCI_GHC_HR); 167 /* wait up to 1s for reset to complete */ 168 for (i = 0; i < 1000; i++) { 169 delay(1000); 170 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR) == 0) 171 break; 172 } 173 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR)) { 174 aprint_error("%s: reset failed\n", AHCINAME(sc)); 175 return -1; 176 } 177 /* enable ahci mode */ 178 ahci_enable(sc); 179 180 if (sc->sc_save_init_data) { 181 AHCI_WRITE(sc, AHCI_CAP, sc->sc_init_data.cap); 182 if (sc->sc_init_data.cap2) 183 AHCI_WRITE(sc, AHCI_CAP2, sc->sc_init_data.cap2); 184 AHCI_WRITE(sc, AHCI_PI, sc->sc_init_data.ports); 185 } 186 187 /* Check if hardware reverted to single message MSI */ 188 sc->sc_ghc_mrsm = ISSET(AHCI_READ(sc, AHCI_GHC), AHCI_GHC_MRSM); 189 190 return 0; 191 } 192 193 static void 194 ahci_setup_ports(struct ahci_softc *sc) 195 { 196 int i, port; 197 198 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 199 if ((sc->sc_ahci_ports & (1U << i)) == 0) 200 continue; 201 if (port >= sc->sc_atac.atac_nchannels) { 202 aprint_error("%s: more ports than announced\n", 203 AHCINAME(sc)); 204 break; 205 } 206 ahci_setup_port(sc, i); 207 port++; 208 } 209 } 210 211 static void 212 ahci_reprobe_drives(struct ahci_softc *sc) 213 { 214 int i, port; 215 struct ahci_channel *achp; 216 struct ata_channel *chp; 217 218 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 219 if ((sc->sc_ahci_ports & (1U << i)) == 0) 220 continue; 221 if (port >= sc->sc_atac.atac_nchannels) { 222 aprint_error("%s: more ports than announced\n", 223 AHCINAME(sc)); 224 break; 225 } 226 achp = &sc->sc_channels[i]; 227 chp = &achp->ata_channel; 228 229 ahci_probe_drive(chp); 230 port++; 231 } 232 } 233 234 static void 235 ahci_setup_port(struct ahci_softc *sc, int i) 236 { 237 struct ahci_channel *achp; 238 239 achp = &sc->sc_channels[i]; 240 241 AHCI_WRITE(sc, AHCI_P_CLB(i), BUS_ADDR_LO32(achp->ahcic_bus_cmdh)); 242 AHCI_WRITE(sc, AHCI_P_CLBU(i), BUS_ADDR_HI32(achp->ahcic_bus_cmdh)); 243 AHCI_WRITE(sc, AHCI_P_FB(i), BUS_ADDR_LO32(achp->ahcic_bus_rfis)); 244 AHCI_WRITE(sc, AHCI_P_FBU(i), BUS_ADDR_HI32(achp->ahcic_bus_rfis)); 245 } 246 247 static void 248 ahci_enable_intrs(struct ahci_softc *sc) 249 { 250 251 /* clear interrupts */ 252 AHCI_WRITE(sc, AHCI_IS, AHCI_READ(sc, AHCI_IS)); 253 /* enable interrupts */ 254 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 255 } 256 257 void 258 ahci_attach(struct ahci_softc *sc) 259 { 260 uint32_t ahci_rev; 261 int i, j, port; 262 struct ahci_channel *achp; 263 struct ata_channel *chp; 264 int error; 265 int dmasize; 266 char buf[128]; 267 void *cmdhp; 268 void *cmdtblp; 269 270 if (sc->sc_save_init_data) { 271 ahci_enable(sc); 272 273 sc->sc_init_data.cap = AHCI_READ(sc, AHCI_CAP); 274 sc->sc_init_data.ports = AHCI_READ(sc, AHCI_PI); 275 276 ahci_rev = AHCI_READ(sc, AHCI_VS); 277 if (AHCI_VS_MJR(ahci_rev) > 1 || 278 (AHCI_VS_MJR(ahci_rev) == 1 && AHCI_VS_MNR(ahci_rev) >= 20)) { 279 sc->sc_init_data.cap2 = AHCI_READ(sc, AHCI_CAP2); 280 } else { 281 sc->sc_init_data.cap2 = 0; 282 } 283 if (sc->sc_init_data.ports == 0) { 284 sc->sc_init_data.ports = sc->sc_ahci_ports; 285 } 286 } 287 288 if (ahci_reset(sc) != 0) 289 return; 290 291 sc->sc_ahci_cap = AHCI_READ(sc, AHCI_CAP); 292 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADPMP) { 293 aprint_verbose_dev(sc->sc_atac.atac_dev, 294 "ignoring broken port multiplier support\n"); 295 sc->sc_ahci_cap &= ~AHCI_CAP_SPM; 296 } 297 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADNCQ) { 298 aprint_verbose_dev(sc->sc_atac.atac_dev, 299 "ignoring broken NCQ support\n"); 300 sc->sc_ahci_cap &= ~AHCI_CAP_NCQ; 301 } 302 sc->sc_atac.atac_nchannels = (sc->sc_ahci_cap & AHCI_CAP_NPMASK) + 1; 303 sc->sc_ncmds = ((sc->sc_ahci_cap & AHCI_CAP_NCS) >> 8) + 1; 304 ahci_rev = AHCI_READ(sc, AHCI_VS); 305 snprintb(buf, sizeof(buf), "\177\020" 306 /* "f\000\005NP\0" */ 307 "b\005SXS\0" 308 "b\006EMS\0" 309 "b\007CCCS\0" 310 /* "f\010\005NCS\0" */ 311 "b\015PSC\0" 312 "b\016SSC\0" 313 "b\017PMD\0" 314 "b\020FBSS\0" 315 "b\021SPM\0" 316 "b\022SAM\0" 317 "b\023SNZO\0" 318 "f\024\003ISS\0" 319 "=\001Gen1\0" 320 "=\002Gen2\0" 321 "=\003Gen3\0" 322 "b\030SCLO\0" 323 "b\031SAL\0" 324 "b\032SALP\0" 325 "b\033SSS\0" 326 "b\034SMPS\0" 327 "b\035SSNTF\0" 328 "b\036SNCQ\0" 329 "b\037S64A\0" 330 "\0", sc->sc_ahci_cap); 331 aprint_normal_dev(sc->sc_atac.atac_dev, "AHCI revision %u.%u" 332 ", %d port%s, %d slot%s, CAP %s\n", 333 AHCI_VS_MJR(ahci_rev), AHCI_VS_MNR(ahci_rev), 334 sc->sc_atac.atac_nchannels, 335 (sc->sc_atac.atac_nchannels == 1 ? "" : "s"), 336 sc->sc_ncmds, (sc->sc_ncmds == 1 ? "" : "s"), buf); 337 338 sc->sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA 339 | ((sc->sc_ahci_cap & AHCI_CAP_NCQ) ? ATAC_CAP_NCQ : 0); 340 sc->sc_atac.atac_cap |= sc->sc_atac_capflags; 341 sc->sc_atac.atac_pio_cap = 4; 342 sc->sc_atac.atac_dma_cap = 2; 343 sc->sc_atac.atac_udma_cap = 6; 344 sc->sc_atac.atac_channels = sc->sc_chanarray; 345 sc->sc_atac.atac_probe = ahci_probe_drive; 346 sc->sc_atac.atac_bustype_ata = &ahci_ata_bustype; 347 sc->sc_atac.atac_set_modes = ahci_setup_channel; 348 #if NATAPIBUS > 0 349 sc->sc_atac.atac_atapibus_attach = ahci_atapibus_attach; 350 #endif 351 352 dmasize = 353 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels; 354 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0, 355 &sc->sc_cmd_hdr_seg, 1, &sc->sc_cmd_hdr_nseg, BUS_DMA_NOWAIT); 356 if (error) { 357 aprint_error("%s: unable to allocate command header memory" 358 ", error=%d\n", AHCINAME(sc), error); 359 return; 360 } 361 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cmd_hdr_seg, 362 sc->sc_cmd_hdr_nseg, dmasize, 363 &cmdhp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 364 if (error) { 365 aprint_error("%s: unable to map command header memory" 366 ", error=%d\n", AHCINAME(sc), error); 367 return; 368 } 369 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0, 370 BUS_DMA_NOWAIT, &sc->sc_cmd_hdrd); 371 if (error) { 372 aprint_error("%s: unable to create command header map" 373 ", error=%d\n", AHCINAME(sc), error); 374 return; 375 } 376 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_hdrd, 377 cmdhp, dmasize, NULL, BUS_DMA_NOWAIT); 378 if (error) { 379 aprint_error("%s: unable to load command header map" 380 ", error=%d\n", AHCINAME(sc), error); 381 return; 382 } 383 sc->sc_cmd_hdr = cmdhp; 384 memset(cmdhp, 0, dmasize); 385 bus_dmamap_sync(sc->sc_dmat, sc->sc_cmd_hdrd, 0, dmasize, 386 BUS_DMASYNC_PREWRITE); 387 388 ahci_enable_intrs(sc); 389 390 if (sc->sc_ahci_ports == 0) { 391 sc->sc_ahci_ports = AHCI_READ(sc, AHCI_PI); 392 AHCIDEBUG_PRINT(("active ports %#x\n", sc->sc_ahci_ports), 393 DEBUG_PROBE); 394 } 395 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 396 if ((sc->sc_ahci_ports & (1U << i)) == 0) 397 continue; 398 if (port >= sc->sc_atac.atac_nchannels) { 399 aprint_error("%s: more ports than announced\n", 400 AHCINAME(sc)); 401 break; 402 } 403 404 /* Optional intr establish per active port */ 405 if (sc->sc_intr_establish && sc->sc_intr_establish(sc, i) != 0){ 406 aprint_error("%s: intr establish hook failed\n", 407 AHCINAME(sc)); 408 break; 409 } 410 411 achp = &sc->sc_channels[i]; 412 chp = &achp->ata_channel; 413 sc->sc_chanarray[i] = chp; 414 chp->ch_channel = i; 415 chp->ch_atac = &sc->sc_atac; 416 chp->ch_queue = ata_queue_alloc(sc->sc_ncmds); 417 if (chp->ch_queue == NULL) { 418 aprint_error("%s port %d: can't allocate memory for " 419 "command queue", AHCINAME(sc), i); 420 break; 421 } 422 dmasize = AHCI_CMDTBL_SIZE * sc->sc_ncmds; 423 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0, 424 &achp->ahcic_cmd_tbl_seg, 1, &achp->ahcic_cmd_tbl_nseg, 425 BUS_DMA_NOWAIT); 426 if (error) { 427 aprint_error("%s: unable to allocate command table " 428 "memory, error=%d\n", AHCINAME(sc), error); 429 break; 430 } 431 error = bus_dmamem_map(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg, 432 achp->ahcic_cmd_tbl_nseg, dmasize, 433 &cmdtblp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 434 if (error) { 435 aprint_error("%s: unable to map command table memory" 436 ", error=%d\n", AHCINAME(sc), error); 437 break; 438 } 439 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0, 440 BUS_DMA_NOWAIT, &achp->ahcic_cmd_tbld); 441 if (error) { 442 aprint_error("%s: unable to create command table map" 443 ", error=%d\n", AHCINAME(sc), error); 444 break; 445 } 446 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_cmd_tbld, 447 cmdtblp, dmasize, NULL, BUS_DMA_NOWAIT); 448 if (error) { 449 aprint_error("%s: unable to load command table map" 450 ", error=%d\n", AHCINAME(sc), error); 451 break; 452 } 453 memset(cmdtblp, 0, dmasize); 454 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_cmd_tbld, 0, 455 dmasize, BUS_DMASYNC_PREWRITE); 456 achp->ahcic_cmdh = (struct ahci_cmd_header *) 457 ((char *)cmdhp + AHCI_CMDH_SIZE * port); 458 achp->ahcic_bus_cmdh = sc->sc_cmd_hdrd->dm_segs[0].ds_addr + 459 AHCI_CMDH_SIZE * port; 460 achp->ahcic_rfis = (struct ahci_r_fis *) 461 ((char *)cmdhp + 462 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 463 AHCI_RFIS_SIZE * port); 464 achp->ahcic_bus_rfis = sc->sc_cmd_hdrd->dm_segs[0].ds_addr + 465 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 466 AHCI_RFIS_SIZE * port; 467 AHCIDEBUG_PRINT(("port %d cmdh %p (0x%" PRIx64 ") " 468 "rfis %p (0x%" PRIx64 ")\n", i, 469 achp->ahcic_cmdh, (uint64_t)achp->ahcic_bus_cmdh, 470 achp->ahcic_rfis, (uint64_t)achp->ahcic_bus_rfis), 471 DEBUG_PROBE); 472 473 for (j = 0; j < sc->sc_ncmds; j++) { 474 achp->ahcic_cmd_tbl[j] = (struct ahci_cmd_tbl *) 475 ((char *)cmdtblp + AHCI_CMDTBL_SIZE * j); 476 achp->ahcic_bus_cmd_tbl[j] = 477 achp->ahcic_cmd_tbld->dm_segs[0].ds_addr + 478 AHCI_CMDTBL_SIZE * j; 479 achp->ahcic_cmdh[j].cmdh_cmdtba = 480 htole64(achp->ahcic_bus_cmd_tbl[j]); 481 AHCIDEBUG_PRINT(("port %d/%d tbl %p (0x%" PRIx64 ")\n", i, j, 482 achp->ahcic_cmd_tbl[j], 483 (uint64_t)achp->ahcic_bus_cmd_tbl[j]), DEBUG_PROBE); 484 /* The xfer DMA map */ 485 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 486 AHCI_NPRD, 0x400000 /* 4MB */, 0, 487 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 488 &achp->ahcic_datad[j]); 489 if (error) { 490 aprint_error("%s: couldn't alloc xfer DMA map, " 491 "error=%d\n", AHCINAME(sc), error); 492 goto end; 493 } 494 } 495 ahci_setup_port(sc, i); 496 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 497 AHCI_P_SSTS(i), 4, &achp->ahcic_sstatus) != 0) { 498 aprint_error("%s: couldn't map port %d " 499 "sata_status regs\n", AHCINAME(sc), i); 500 break; 501 } 502 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 503 AHCI_P_SCTL(i), 4, &achp->ahcic_scontrol) != 0) { 504 aprint_error("%s: couldn't map port %d " 505 "sata_control regs\n", AHCINAME(sc), i); 506 break; 507 } 508 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 509 AHCI_P_SERR(i), 4, &achp->ahcic_serror) != 0) { 510 aprint_error("%s: couldn't map port %d " 511 "sata_error regs\n", AHCINAME(sc), i); 512 break; 513 } 514 ata_channel_attach(chp); 515 port++; 516 end: 517 continue; 518 } 519 } 520 521 void 522 ahci_childdetached(struct ahci_softc *sc, device_t child) 523 { 524 struct ahci_channel *achp; 525 struct ata_channel *chp; 526 527 for (int i = 0; i < AHCI_MAX_PORTS; i++) { 528 achp = &sc->sc_channels[i]; 529 chp = &achp->ata_channel; 530 531 if ((sc->sc_ahci_ports & (1U << i)) == 0) 532 continue; 533 534 if (child == chp->atabus) 535 chp->atabus = NULL; 536 } 537 } 538 539 int 540 ahci_detach(struct ahci_softc *sc, int flags) 541 { 542 struct atac_softc *atac; 543 struct ahci_channel *achp; 544 struct ata_channel *chp; 545 struct scsipi_adapter *adapt; 546 int i, j, port; 547 int error; 548 549 atac = &sc->sc_atac; 550 adapt = &atac->atac_atapi_adapter._generic; 551 552 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 553 achp = &sc->sc_channels[i]; 554 chp = &achp->ata_channel; 555 556 if ((sc->sc_ahci_ports & (1U << i)) == 0) 557 continue; 558 if (port >= sc->sc_atac.atac_nchannels) { 559 aprint_error("%s: more ports than announced\n", 560 AHCINAME(sc)); 561 break; 562 } 563 564 if (chp->atabus != NULL) { 565 if ((error = config_detach(chp->atabus, flags)) != 0) 566 return error; 567 568 KASSERT(chp->atabus == NULL); 569 } 570 571 if (chp->ch_flags & ATACH_DETACHED) 572 continue; 573 574 for (j = 0; j < sc->sc_ncmds; j++) 575 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_datad[j]); 576 577 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_cmd_tbld); 578 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_cmd_tbld); 579 bus_dmamem_unmap(sc->sc_dmat, achp->ahcic_cmd_tbl[0], 580 AHCI_CMDTBL_SIZE * sc->sc_ncmds); 581 bus_dmamem_free(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg, 582 achp->ahcic_cmd_tbl_nseg); 583 584 ata_channel_detach(chp); 585 port++; 586 } 587 588 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmd_hdrd); 589 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmd_hdrd); 590 bus_dmamem_unmap(sc->sc_dmat, sc->sc_cmd_hdr, 591 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels); 592 bus_dmamem_free(sc->sc_dmat, &sc->sc_cmd_hdr_seg, sc->sc_cmd_hdr_nseg); 593 594 if (adapt->adapt_refcnt != 0) 595 return EBUSY; 596 597 return 0; 598 } 599 600 void 601 ahci_resume(struct ahci_softc *sc) 602 { 603 ahci_reset(sc); 604 ahci_setup_ports(sc); 605 ahci_reprobe_drives(sc); 606 ahci_enable_intrs(sc); 607 } 608 609 int 610 ahci_intr(void *v) 611 { 612 struct ahci_softc *sc = v; 613 uint32_t is, ports; 614 int bit, r = 0; 615 616 while ((is = AHCI_READ(sc, AHCI_IS))) { 617 AHCIDEBUG_PRINT(("%s ahci_intr 0x%x\n", AHCINAME(sc), is), 618 DEBUG_INTR); 619 r = 1; 620 ports = is; 621 while ((bit = ffs(ports)) != 0) { 622 bit--; 623 ahci_intr_port_common(&sc->sc_channels[bit].ata_channel); 624 ports &= ~__BIT(bit); 625 } 626 AHCI_WRITE(sc, AHCI_IS, is); 627 } 628 629 return r; 630 } 631 632 int 633 ahci_intr_port(void *v) 634 { 635 struct ahci_channel *achp = v; 636 struct ata_channel *chp = &achp->ata_channel; 637 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 638 int ret; 639 640 ret = ahci_intr_port_common(chp); 641 if (ret) { 642 AHCI_WRITE(sc, AHCI_IS, 1U << chp->ch_channel); 643 } 644 645 return ret; 646 } 647 648 static int 649 ahci_intr_port_common(struct ata_channel *chp) 650 { 651 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 652 uint32_t is, tfd, sact; 653 struct ata_xfer *xfer; 654 int slot = -1; 655 bool recover = false; 656 uint32_t aslots; 657 658 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel)); 659 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), is); 660 661 AHCIDEBUG_PRINT(("ahci_intr_port_common %s port %d " 662 "is 0x%x CI 0x%x SACT 0x%x TFD 0x%x\n", 663 AHCINAME(sc), 664 chp->ch_channel, is, 665 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)), 666 AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel)), 667 AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel))), 668 DEBUG_INTR); 669 670 if ((chp->ch_flags & ATACH_NCQ) == 0) { 671 /* Non-NCQ operation */ 672 sact = AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)); 673 } else { 674 /* NCQ operation */ 675 sact = AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel)); 676 } 677 678 /* Handle errors */ 679 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 680 AHCI_P_IX_IFS | AHCI_P_IX_OFS | AHCI_P_IX_UFS)) { 681 /* Fatal errors */ 682 if (is & AHCI_P_IX_TFES) { 683 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 684 685 if ((chp->ch_flags & ATACH_NCQ) == 0) { 686 /* Slot valid only for Non-NCQ operation */ 687 slot = (AHCI_READ(sc, 688 AHCI_P_CMD(chp->ch_channel)) 689 & AHCI_P_CMD_CCS_MASK) 690 >> AHCI_P_CMD_CCS_SHIFT; 691 } 692 693 AHCIDEBUG_PRINT(( 694 "%s port %d: TFE: sact 0x%x is 0x%x tfd 0x%x\n", 695 AHCINAME(sc), chp->ch_channel, sact, is, tfd), 696 DEBUG_INTR); 697 } else { 698 /* mark an error, and set BSY */ 699 tfd = (WDCE_ABRT << AHCI_P_TFD_ERR_SHIFT) | 700 WDCS_ERR | WDCS_BSY; 701 } 702 703 if (is & AHCI_P_IX_IFS) { 704 AHCIDEBUG_PRINT(("%s port %d: SERR 0x%x\n", 705 AHCINAME(sc), chp->ch_channel, 706 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel))), 707 DEBUG_INTR); 708 } 709 710 if (!ISSET(chp->ch_flags, ATACH_RECOVERING)) 711 recover = true; 712 } else if (is & (AHCI_P_IX_DHRS|AHCI_P_IX_SDBS)) { 713 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 714 715 /* D2H Register FIS or Set Device Bits */ 716 if ((tfd & WDCS_ERR) != 0) { 717 if (!ISSET(chp->ch_flags, ATACH_RECOVERING)) 718 recover = true; 719 720 AHCIDEBUG_PRINT(("%s port %d: transfer aborted 0x%x\n", 721 AHCINAME(sc), chp->ch_channel, tfd), DEBUG_INTR); 722 } 723 } else { 724 tfd = 0; 725 } 726 727 if (__predict_false(recover)) 728 ata_channel_freeze(chp); 729 730 aslots = ata_queue_active(chp); 731 732 if (slot >= 0) { 733 if ((aslots & __BIT(slot)) != 0 && 734 (sact & __BIT(slot)) == 0) { 735 xfer = ata_queue_hwslot_to_xfer(chp, slot); 736 xfer->ops->c_intr(chp, xfer, tfd); 737 } 738 } else { 739 /* 740 * For NCQ, HBA halts processing when error is notified, 741 * and any further D2H FISes are ignored until the error 742 * condition is cleared. Hence if a command is inactive, 743 * it means it actually already finished successfully. 744 * Note: active slots can change as c_intr() callback 745 * can activate another command(s), so must only process 746 * commands active before we start processing. 747 */ 748 749 for (slot = 0; slot < sc->sc_ncmds; slot++) { 750 if ((aslots & __BIT(slot)) != 0 && 751 (sact & __BIT(slot)) == 0) { 752 xfer = ata_queue_hwslot_to_xfer(chp, slot); 753 xfer->ops->c_intr(chp, xfer, tfd); 754 } 755 } 756 } 757 758 if (__predict_false(recover)) { 759 ata_channel_lock(chp); 760 ata_channel_thaw_locked(chp); 761 ata_thread_run(chp, 0, ATACH_TH_RECOVERY, tfd); 762 ata_channel_unlock(chp); 763 } 764 765 return 1; 766 } 767 768 static void 769 ahci_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp) 770 { 771 struct ata_channel *chp = drvp->chnl_softc; 772 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 773 uint8_t c_slot; 774 775 ata_channel_lock_owned(chp); 776 777 /* get a slot for running the command on */ 778 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) { 779 panic("%s: %s: failed to get xfer for reset, port %d\n", 780 device_xname(sc->sc_atac.atac_dev), 781 __func__, chp->ch_channel); 782 /* NOTREACHED */ 783 } 784 785 AHCI_WRITE(sc, AHCI_GHC, 786 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 787 ahci_channel_stop(sc, chp, flags); 788 ahci_do_reset_drive(chp, drvp->drive, flags, sigp, c_slot); 789 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 790 791 ata_queue_free_slot(chp, c_slot); 792 } 793 794 /* return error code from ata_bio */ 795 static int 796 ahci_exec_fis(struct ata_channel *chp, int timeout, int flags, int slot) 797 { 798 struct ahci_channel *achp = (struct ahci_channel *)chp; 799 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 800 int i; 801 uint32_t is; 802 803 /* 804 * Base timeout is specified in ms. Delay for 10ms 805 * on each round. 806 */ 807 timeout = timeout / 10; 808 809 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE); 810 AHCI_CMDH_SYNC(sc, achp, slot, 811 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 812 /* start command */ 813 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot); 814 for (i = 0; i < timeout; i++) { 815 if ((AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)) & (1U << slot)) == 816 0) 817 return 0; 818 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel)); 819 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 820 AHCI_P_IX_IFS | 821 AHCI_P_IX_OFS | AHCI_P_IX_UFS)) { 822 if ((is & (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) == 823 (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) { 824 /* 825 * we got the D2H FIS anyway, 826 * assume sig is valid. 827 * channel is restarted later 828 */ 829 return ERROR; 830 } 831 aprint_debug("%s port %d: error 0x%x sending FIS\n", 832 AHCINAME(sc), chp->ch_channel, is); 833 return ERR_DF; 834 } 835 ata_delay(chp, 10, "ahcifis", flags); 836 } 837 838 aprint_debug("%s port %d: timeout sending FIS\n", 839 AHCINAME(sc), chp->ch_channel); 840 return TIMEOUT; 841 } 842 843 static int 844 ahci_do_reset_drive(struct ata_channel *chp, int drive, int flags, 845 uint32_t *sigp, uint8_t c_slot) 846 { 847 struct ahci_channel *achp = (struct ahci_channel *)chp; 848 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 849 struct ahci_cmd_tbl *cmd_tbl; 850 struct ahci_cmd_header *cmd_h; 851 int i, error = 0; 852 uint32_t sig, cmd; 853 int noclo_retry = 0, retry; 854 855 ata_channel_lock_owned(chp); 856 857 again: 858 /* clear port interrupt register */ 859 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 860 /* clear SErrors and start operations */ 861 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == AHCI_CAP_CLO) { 862 /* 863 * issue a command list override to clear BSY. 864 * This is needed if there's a PMP with no drive 865 * on port 0 866 */ 867 ahci_channel_start(sc, chp, flags, 1); 868 } else { 869 /* Can't handle command still running without CLO */ 870 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)); 871 if ((cmd & AHCI_P_CMD_CR) != 0) { 872 ahci_channel_stop(sc, chp, flags); 873 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)); 874 if ((cmd & AHCI_P_CMD_CR) != 0) { 875 aprint_error("%s port %d: DMA engine busy " 876 "for drive %d\n", AHCINAME(sc), 877 chp->ch_channel, drive); 878 error = EBUSY; 879 goto end; 880 } 881 } 882 883 KASSERT((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) == 0); 884 885 ahci_channel_start(sc, chp, flags, 0); 886 } 887 if (drive > 0) { 888 KASSERT(sc->sc_ahci_cap & AHCI_CAP_SPM); 889 } 890 891 /* polled command, assume interrupts are disabled */ 892 893 cmd_h = &achp->ahcic_cmdh[c_slot]; 894 cmd_tbl = achp->ahcic_cmd_tbl[c_slot]; 895 cmd_h->cmdh_flags = htole16(AHCI_CMDH_F_RST | AHCI_CMDH_F_CBSY | 896 RHD_FISLEN / 4 | (drive << AHCI_CMDH_F_PMP_SHIFT)); 897 cmd_h->cmdh_prdtl = 0; 898 cmd_h->cmdh_prdbc = 0; 899 memset(cmd_tbl->cmdt_cfis, 0, 64); 900 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE; 901 cmd_tbl->cmdt_cfis[rhd_c] = drive; 902 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_RST | WDCTL_4BIT; 903 switch (ahci_exec_fis(chp, 100, flags, c_slot)) { 904 case ERR_DF: 905 case TIMEOUT: 906 /* 907 * without CLO we can't make sure a software reset will 908 * success, as the drive may still have BSY or DRQ set. 909 * in this case, reset the whole channel and retry the 910 * drive reset. The channel reset should clear BSY and DRQ 911 */ 912 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == 0 && noclo_retry == 0) { 913 noclo_retry++; 914 ahci_reset_channel(chp, flags); 915 goto again; 916 } 917 aprint_error("%s port %d: setting WDCTL_RST failed " 918 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive); 919 error = EBUSY; 920 goto end; 921 default: 922 break; 923 } 924 925 /* 926 * SATA specification has toggle period for SRST bit of 5 usec. Some 927 * controllers fail to process the SRST clear operation unless 928 * we wait for at least this period between the set and clear commands. 929 */ 930 ata_delay(chp, 10, "ahcirstw", flags); 931 932 /* 933 * Try to clear WDCTL_RST a few times before giving up. 934 */ 935 for (error = EBUSY, retry = 0; error != 0 && retry < 5; retry++) { 936 cmd_h->cmdh_flags = htole16(RHD_FISLEN / 4 | 937 (drive << AHCI_CMDH_F_PMP_SHIFT)); 938 cmd_h->cmdh_prdbc = 0; 939 memset(cmd_tbl->cmdt_cfis, 0, 64); 940 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE; 941 cmd_tbl->cmdt_cfis[rhd_c] = drive; 942 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_4BIT; 943 switch (ahci_exec_fis(chp, 310, flags, c_slot)) { 944 case ERR_DF: 945 case TIMEOUT: 946 error = EBUSY; 947 break; 948 default: 949 error = 0; 950 break; 951 } 952 if (error == 0) { 953 break; 954 } 955 } 956 if (error == EBUSY) { 957 aprint_error("%s port %d: clearing WDCTL_RST failed " 958 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive); 959 goto end; 960 } 961 962 /* 963 * wait 31s for BSY to clear 964 * This should not be needed, but some controllers clear the 965 * command slot before receiving the D2H FIS ... 966 */ 967 for (i = 0; i < AHCI_RST_WAIT; i++) { 968 sig = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 969 if ((__SHIFTOUT(sig, AHCI_P_TFD_ST) & WDCS_BSY) == 0) 970 break; 971 ata_delay(chp, 10, "ahcid2h", flags); 972 } 973 if (i == AHCI_RST_WAIT) { 974 aprint_error("%s: BSY never cleared, TD 0x%x\n", 975 AHCINAME(sc), sig); 976 goto end; 977 } 978 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10), 979 DEBUG_PROBE); 980 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel)); 981 if (sigp) 982 *sigp = sig; 983 AHCIDEBUG_PRINT(("%s: port %d: sig=0x%x CMD=0x%x\n", 984 AHCINAME(sc), chp->ch_channel, sig, 985 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel))), DEBUG_PROBE); 986 end: 987 ahci_channel_stop(sc, chp, flags); 988 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcirst", flags); 989 /* clear port interrupt register */ 990 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 991 ahci_channel_start(sc, chp, flags, 992 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 993 return error; 994 } 995 996 static void 997 ahci_reset_channel(struct ata_channel *chp, int flags) 998 { 999 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1000 struct ahci_channel *achp = (struct ahci_channel *)chp; 1001 int i, tfd; 1002 1003 ata_channel_lock_owned(chp); 1004 1005 ahci_channel_stop(sc, chp, flags); 1006 if (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol, 1007 achp->ahcic_sstatus, flags) != SStatus_DET_DEV) { 1008 printf("%s: port %d reset failed\n", AHCINAME(sc), chp->ch_channel); 1009 /* XXX and then ? */ 1010 } 1011 ata_kill_active(chp, KILL_RESET, flags); 1012 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcirst", flags); 1013 /* clear port interrupt register */ 1014 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1015 /* clear SErrors and start operations */ 1016 ahci_channel_start(sc, chp, flags, 1017 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 1018 /* wait 31s for BSY to clear */ 1019 for (i = 0; i < AHCI_RST_WAIT; i++) { 1020 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 1021 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) == 0) 1022 break; 1023 ata_delay(chp, 10, "ahcid2h", flags); 1024 } 1025 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) != 0) 1026 aprint_error("%s: BSY never cleared, TD 0x%x\n", 1027 AHCINAME(sc), tfd); 1028 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10), 1029 DEBUG_PROBE); 1030 /* clear port interrupt register */ 1031 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1032 1033 return; 1034 } 1035 1036 static int 1037 ahci_ata_addref(struct ata_drive_datas *drvp) 1038 { 1039 return 0; 1040 } 1041 1042 static void 1043 ahci_ata_delref(struct ata_drive_datas *drvp) 1044 { 1045 return; 1046 } 1047 1048 static void 1049 ahci_killpending(struct ata_drive_datas *drvp) 1050 { 1051 return; 1052 } 1053 1054 static void 1055 ahci_probe_drive(struct ata_channel *chp) 1056 { 1057 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1058 struct ahci_channel *achp = (struct ahci_channel *)chp; 1059 uint32_t sig; 1060 uint8_t c_slot; 1061 int error; 1062 1063 ata_channel_lock(chp); 1064 1065 /* get a slot for running the command on */ 1066 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) { 1067 aprint_error_dev(sc->sc_atac.atac_dev, 1068 "%s: failed to get xfer port %d\n", 1069 __func__, chp->ch_channel); 1070 ata_channel_unlock(chp); 1071 return; 1072 } 1073 1074 /* bring interface up, accept FISs, power up and spin up device */ 1075 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1076 AHCI_P_CMD_ICC_AC | AHCI_P_CMD_FRE | 1077 AHCI_P_CMD_POD | AHCI_P_CMD_SUD); 1078 /* reset the PHY and bring online */ 1079 switch (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol, 1080 achp->ahcic_sstatus, AT_WAIT)) { 1081 case SStatus_DET_DEV: 1082 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcidv", AT_WAIT); 1083 1084 /* Initial value, used in case the soft reset fails */ 1085 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel)); 1086 1087 if (sc->sc_ahci_cap & AHCI_CAP_SPM) { 1088 error = ahci_do_reset_drive(chp, PMP_PORT_CTL, AT_WAIT, 1089 &sig, c_slot); 1090 1091 /* If probe for PMP failed, just fallback to drive 0 */ 1092 if (error) { 1093 aprint_error("%s port %d: drive %d reset " 1094 "failed, disabling PMP\n", 1095 AHCINAME(sc), chp->ch_channel, 1096 PMP_PORT_CTL); 1097 1098 sc->sc_ahci_cap &= ~AHCI_CAP_SPM; 1099 ahci_reset_channel(chp, AT_WAIT); 1100 } 1101 } else { 1102 ahci_do_reset_drive(chp, 0, AT_WAIT, &sig, c_slot); 1103 } 1104 sata_interpret_sig(chp, 0, sig); 1105 /* if we have a PMP attached, inform the controller */ 1106 if (chp->ch_ndrives > PMP_PORT_CTL && 1107 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 1108 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1109 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) | 1110 AHCI_P_CMD_PMA); 1111 } 1112 /* clear port interrupt register */ 1113 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1114 1115 /* and enable interrupts */ 1116 AHCI_WRITE(sc, AHCI_P_IE(chp->ch_channel), 1117 AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 1118 AHCI_P_IX_IFS | 1119 AHCI_P_IX_OFS | AHCI_P_IX_DPS | AHCI_P_IX_UFS | 1120 AHCI_P_IX_PSS | AHCI_P_IX_DHRS | AHCI_P_IX_SDBS); 1121 /* 1122 * optionally, wait AHCISATA_EXTRA_DELAY_MS msec before 1123 * actually starting operations 1124 */ 1125 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahciprb", AT_WAIT); 1126 break; 1127 1128 default: 1129 break; 1130 } 1131 1132 ata_queue_free_slot(chp, c_slot); 1133 1134 ata_channel_unlock(chp); 1135 } 1136 1137 static void 1138 ahci_setup_channel(struct ata_channel *chp) 1139 { 1140 return; 1141 } 1142 1143 static const struct ata_xfer_ops ahci_cmd_xfer_ops = { 1144 .c_start = ahci_cmd_start, 1145 .c_poll = ahci_cmd_poll, 1146 .c_abort = ahci_cmd_abort, 1147 .c_intr = ahci_cmd_complete, 1148 .c_kill_xfer = ahci_cmd_kill_xfer, 1149 }; 1150 1151 static void 1152 ahci_exec_command(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 1153 { 1154 struct ata_channel *chp = drvp->chnl_softc; 1155 struct ata_command *ata_c = &xfer->c_ata_c; 1156 1157 AHCIDEBUG_PRINT(("ahci_exec_command port %d CI 0x%x\n", 1158 chp->ch_channel, 1159 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1160 DEBUG_XFERS); 1161 if (ata_c->flags & AT_POLL) 1162 xfer->c_flags |= C_POLL; 1163 if (ata_c->flags & AT_WAIT) 1164 xfer->c_flags |= C_WAIT; 1165 xfer->c_drive = drvp->drive; 1166 xfer->c_databuf = ata_c->data; 1167 xfer->c_bcount = ata_c->bcount; 1168 xfer->ops = &ahci_cmd_xfer_ops; 1169 1170 ata_exec_xfer(chp, xfer); 1171 } 1172 1173 static int 1174 ahci_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer) 1175 { 1176 struct ahci_softc *sc = AHCI_CH2SC(chp); 1177 struct ahci_channel *achp = (struct ahci_channel *)chp; 1178 struct ata_command *ata_c = &xfer->c_ata_c; 1179 int slot = xfer->c_slot; 1180 struct ahci_cmd_tbl *cmd_tbl; 1181 struct ahci_cmd_header *cmd_h; 1182 1183 AHCIDEBUG_PRINT(("ahci_cmd_start CI 0x%x timo %d\n slot %d", 1184 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)), 1185 ata_c->timeout, slot), 1186 DEBUG_XFERS); 1187 1188 ata_channel_lock_owned(chp); 1189 1190 cmd_tbl = achp->ahcic_cmd_tbl[slot]; 1191 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1192 cmd_tbl), DEBUG_XFERS); 1193 1194 satafis_rhd_construct_cmd(ata_c, cmd_tbl->cmdt_cfis); 1195 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1196 1197 cmd_h = &achp->ahcic_cmdh[slot]; 1198 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1199 chp->ch_channel, cmd_h), DEBUG_XFERS); 1200 if (ahci_dma_setup(chp, slot, 1201 (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) ? 1202 ata_c->data : NULL, 1203 ata_c->bcount, 1204 (ata_c->flags & AT_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) { 1205 ata_c->flags |= AT_DF; 1206 return ATASTART_ABORT; 1207 } 1208 cmd_h->cmdh_flags = htole16( 1209 ((ata_c->flags & AT_WRITE) ? AHCI_CMDH_F_WR : 0) | 1210 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1211 cmd_h->cmdh_prdbc = 0; 1212 AHCI_CMDH_SYNC(sc, achp, slot, 1213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1214 1215 if (ata_c->flags & AT_POLL) { 1216 /* polled command, disable interrupts */ 1217 AHCI_WRITE(sc, AHCI_GHC, 1218 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1219 } 1220 /* start command */ 1221 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot); 1222 1223 if ((ata_c->flags & AT_POLL) == 0) { 1224 callout_reset(&chp->c_timo_callout, mstohz(ata_c->timeout), 1225 ata_timeout, chp); 1226 return ATASTART_STARTED; 1227 } else 1228 return ATASTART_POLL; 1229 } 1230 1231 static int 1232 ahci_cmd_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1233 { 1234 struct ahci_softc *sc = AHCI_CH2SC(chp); 1235 struct ahci_channel *achp = (struct ahci_channel *)chp; 1236 1237 ata_channel_lock(chp); 1238 1239 /* 1240 * Polled command. 1241 */ 1242 for (int i = 0; i < xfer->c_ata_c.timeout / 10; i++) { 1243 if (xfer->c_ata_c.flags & AT_DONE) 1244 break; 1245 ata_channel_unlock(chp); 1246 ahci_intr_port(achp); 1247 ata_channel_lock(chp); 1248 ata_delay(chp, 10, "ahcipl", xfer->c_ata_c.flags); 1249 } 1250 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1251 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1252 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1253 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1254 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1255 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 1256 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 1257 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 1258 DEBUG_XFERS); 1259 1260 ata_channel_unlock(chp); 1261 1262 if ((xfer->c_ata_c.flags & AT_DONE) == 0) { 1263 xfer->c_ata_c.flags |= AT_TIMEOU; 1264 xfer->ops->c_intr(chp, xfer, 0); 1265 } 1266 /* reenable interrupts */ 1267 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 1268 1269 return ATAPOLL_DONE; 1270 } 1271 1272 static void 1273 ahci_cmd_abort(struct ata_channel *chp, struct ata_xfer *xfer) 1274 { 1275 ahci_cmd_complete(chp, xfer, 0); 1276 } 1277 1278 static void 1279 ahci_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1280 { 1281 struct ata_command *ata_c = &xfer->c_ata_c; 1282 bool deactivate = true; 1283 1284 AHCIDEBUG_PRINT(("ahci_cmd_kill_xfer port %d\n", chp->ch_channel), 1285 DEBUG_FUNCS); 1286 1287 switch (reason) { 1288 case KILL_GONE_INACTIVE: 1289 deactivate = false; 1290 /* FALLTHROUGH */ 1291 case KILL_GONE: 1292 ata_c->flags |= AT_GONE; 1293 break; 1294 case KILL_RESET: 1295 ata_c->flags |= AT_RESET; 1296 break; 1297 case KILL_REQUEUE: 1298 panic("%s: not supposed to be requeued\n", __func__); 1299 break; 1300 default: 1301 printf("ahci_cmd_kill_xfer: unknown reason %d\n", reason); 1302 panic("ahci_cmd_kill_xfer"); 1303 } 1304 1305 ahci_cmd_done_end(chp, xfer); 1306 1307 if (deactivate) 1308 ata_deactivate_xfer(chp, xfer); 1309 } 1310 1311 static int 1312 ahci_cmd_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 1313 { 1314 struct ata_command *ata_c = &xfer->c_ata_c; 1315 struct ahci_channel *achp = (struct ahci_channel *)chp; 1316 struct ahci_softc *sc = AHCI_CH2SC(chp); 1317 1318 AHCIDEBUG_PRINT(("ahci_cmd_complete port %d CMD 0x%x CI 0x%x\n", 1319 chp->ch_channel, 1320 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CMD(chp->ch_channel)), 1321 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1322 DEBUG_FUNCS); 1323 1324 if (ata_waitdrain_xfer_check(chp, xfer)) 1325 return 0; 1326 1327 if (xfer->c_flags & C_TIMEOU) { 1328 ata_c->flags |= AT_TIMEOU; 1329 } 1330 1331 if (AHCI_TFD_ST(tfd) & WDCS_BSY) { 1332 ata_c->flags |= AT_TIMEOU; 1333 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) { 1334 ata_c->r_error = AHCI_TFD_ERR(tfd); 1335 ata_c->flags |= AT_ERROR; 1336 } 1337 1338 if (ata_c->flags & AT_READREG) { 1339 AHCI_RFIS_SYNC(sc, achp, BUS_DMASYNC_POSTREAD); 1340 satafis_rdh_cmd_readreg(ata_c, achp->ahcic_rfis->rfis_rfis); 1341 } 1342 1343 ahci_cmd_done(chp, xfer); 1344 1345 ata_deactivate_xfer(chp, xfer); 1346 1347 if ((ata_c->flags & (AT_TIMEOU|AT_ERROR)) == 0) 1348 atastart(chp); 1349 1350 return 0; 1351 } 1352 1353 static void 1354 ahci_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer) 1355 { 1356 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1357 struct ahci_channel *achp = (struct ahci_channel *)chp; 1358 struct ata_command *ata_c = &xfer->c_ata_c; 1359 uint16_t *idwordbuf; 1360 int i; 1361 1362 AHCIDEBUG_PRINT(("ahci_cmd_done port %d flags %#x/%#x\n", 1363 chp->ch_channel, xfer->c_flags, ata_c->flags), DEBUG_FUNCS); 1364 1365 if (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) { 1366 bus_dmamap_t map = achp->ahcic_datad[xfer->c_slot]; 1367 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1368 (ata_c->flags & AT_READ) ? BUS_DMASYNC_POSTREAD : 1369 BUS_DMASYNC_POSTWRITE); 1370 bus_dmamap_unload(sc->sc_dmat, map); 1371 } 1372 1373 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1374 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1375 1376 /* ata(4) expects IDENTIFY data to be in host endianness */ 1377 if (ata_c->r_command == WDCC_IDENTIFY || 1378 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) { 1379 idwordbuf = xfer->c_databuf; 1380 for (i = 0; i < (xfer->c_bcount / sizeof(*idwordbuf)); i++) { 1381 idwordbuf[i] = le16toh(idwordbuf[i]); 1382 } 1383 } 1384 1385 if (achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc) 1386 ata_c->flags |= AT_XFDONE; 1387 1388 ahci_cmd_done_end(chp, xfer); 1389 } 1390 1391 static void 1392 ahci_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer) 1393 { 1394 struct ata_command *ata_c = &xfer->c_ata_c; 1395 1396 ata_c->flags |= AT_DONE; 1397 } 1398 1399 static const struct ata_xfer_ops ahci_bio_xfer_ops = { 1400 .c_start = ahci_bio_start, 1401 .c_poll = ahci_bio_poll, 1402 .c_abort = ahci_bio_abort, 1403 .c_intr = ahci_bio_complete, 1404 .c_kill_xfer = ahci_bio_kill_xfer, 1405 }; 1406 1407 static void 1408 ahci_ata_bio(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 1409 { 1410 struct ata_channel *chp = drvp->chnl_softc; 1411 struct ata_bio *ata_bio = &xfer->c_bio; 1412 1413 AHCIDEBUG_PRINT(("ahci_ata_bio port %d CI 0x%x\n", 1414 chp->ch_channel, 1415 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1416 DEBUG_XFERS); 1417 if (ata_bio->flags & ATA_POLL) 1418 xfer->c_flags |= C_POLL; 1419 xfer->c_drive = drvp->drive; 1420 xfer->c_databuf = ata_bio->databuf; 1421 xfer->c_bcount = ata_bio->bcount; 1422 xfer->ops = &ahci_bio_xfer_ops; 1423 ata_exec_xfer(chp, xfer); 1424 } 1425 1426 static int 1427 ahci_bio_start(struct ata_channel *chp, struct ata_xfer *xfer) 1428 { 1429 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1430 struct ahci_channel *achp = (struct ahci_channel *)chp; 1431 struct ata_bio *ata_bio = &xfer->c_bio; 1432 struct ahci_cmd_tbl *cmd_tbl; 1433 struct ahci_cmd_header *cmd_h; 1434 1435 AHCIDEBUG_PRINT(("ahci_bio_start CI 0x%x\n", 1436 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS); 1437 1438 ata_channel_lock_owned(chp); 1439 1440 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot]; 1441 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1442 cmd_tbl), DEBUG_XFERS); 1443 1444 satafis_rhd_construct_bio(xfer, cmd_tbl->cmdt_cfis); 1445 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1446 1447 cmd_h = &achp->ahcic_cmdh[xfer->c_slot]; 1448 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1449 chp->ch_channel, cmd_h), DEBUG_XFERS); 1450 if (ahci_dma_setup(chp, xfer->c_slot, ata_bio->databuf, ata_bio->bcount, 1451 (ata_bio->flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) { 1452 ata_bio->error = ERR_DMA; 1453 ata_bio->r_error = 0; 1454 return ATASTART_ABORT; 1455 } 1456 cmd_h->cmdh_flags = htole16( 1457 ((ata_bio->flags & ATA_READ) ? 0 : AHCI_CMDH_F_WR) | 1458 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1459 cmd_h->cmdh_prdbc = 0; 1460 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1461 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1462 1463 if (xfer->c_flags & C_POLL) { 1464 /* polled command, disable interrupts */ 1465 AHCI_WRITE(sc, AHCI_GHC, 1466 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1467 } 1468 if (xfer->c_flags & C_NCQ) 1469 AHCI_WRITE(sc, AHCI_P_SACT(chp->ch_channel), 1U << xfer->c_slot); 1470 /* start command */ 1471 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot); 1472 1473 if ((xfer->c_flags & C_POLL) == 0) { 1474 callout_reset(&chp->c_timo_callout, mstohz(ATA_DELAY), 1475 ata_timeout, chp); 1476 return ATASTART_STARTED; 1477 } else 1478 return ATASTART_POLL; 1479 } 1480 1481 static int 1482 ahci_bio_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1483 { 1484 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1485 struct ahci_channel *achp = (struct ahci_channel *)chp; 1486 1487 /* 1488 * Polled command. 1489 */ 1490 for (int i = 0; i < ATA_DELAY * 10; i++) { 1491 if (xfer->c_bio.flags & ATA_ITSDONE) 1492 break; 1493 ahci_intr_port(achp); 1494 delay(100); 1495 } 1496 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1497 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1498 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1499 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1500 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1501 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 1502 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 1503 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 1504 DEBUG_XFERS); 1505 if ((xfer->c_bio.flags & ATA_ITSDONE) == 0) { 1506 xfer->c_bio.error = TIMEOUT; 1507 xfer->ops->c_intr(chp, xfer, 0); 1508 } 1509 /* reenable interrupts */ 1510 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 1511 return ATAPOLL_DONE; 1512 } 1513 1514 static void 1515 ahci_bio_abort(struct ata_channel *chp, struct ata_xfer *xfer) 1516 { 1517 ahci_bio_complete(chp, xfer, 0); 1518 } 1519 1520 static void 1521 ahci_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1522 { 1523 int drive = xfer->c_drive; 1524 struct ata_bio *ata_bio = &xfer->c_bio; 1525 bool deactivate = true; 1526 1527 AHCIDEBUG_PRINT(("ahci_bio_kill_xfer port %d\n", chp->ch_channel), 1528 DEBUG_FUNCS); 1529 1530 ata_bio->flags |= ATA_ITSDONE; 1531 switch (reason) { 1532 case KILL_GONE_INACTIVE: 1533 deactivate = false; 1534 /* FALLTHROUGH */ 1535 case KILL_GONE: 1536 ata_bio->error = ERR_NODEV; 1537 break; 1538 case KILL_RESET: 1539 ata_bio->error = ERR_RESET; 1540 break; 1541 case KILL_REQUEUE: 1542 ata_bio->error = REQUEUE; 1543 break; 1544 default: 1545 printf("ahci_bio_kill_xfer: unknown reason %d\n", reason); 1546 panic("ahci_bio_kill_xfer"); 1547 } 1548 ata_bio->r_error = WDCE_ABRT; 1549 1550 if (deactivate) 1551 ata_deactivate_xfer(chp, xfer); 1552 1553 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1554 } 1555 1556 static int 1557 ahci_bio_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 1558 { 1559 struct ata_bio *ata_bio = &xfer->c_bio; 1560 int drive = xfer->c_drive; 1561 struct ahci_channel *achp = (struct ahci_channel *)chp; 1562 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1563 1564 AHCIDEBUG_PRINT(("ahci_bio_complete port %d\n", chp->ch_channel), 1565 DEBUG_FUNCS); 1566 1567 if (ata_waitdrain_xfer_check(chp, xfer)) 1568 return 0; 1569 1570 if (xfer->c_flags & C_TIMEOU) { 1571 ata_bio->error = TIMEOUT; 1572 } 1573 1574 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0, 1575 achp->ahcic_datad[xfer->c_slot]->dm_mapsize, 1576 (ata_bio->flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : 1577 BUS_DMASYNC_POSTWRITE); 1578 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]); 1579 1580 ata_bio->flags |= ATA_ITSDONE; 1581 if (AHCI_TFD_ERR(tfd) & WDCS_DWF) { 1582 ata_bio->error = ERR_DF; 1583 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) { 1584 ata_bio->error = ERROR; 1585 ata_bio->r_error = AHCI_TFD_ERR(tfd); 1586 } else if (AHCI_TFD_ST(tfd) & WDCS_CORR) 1587 ata_bio->flags |= ATA_CORR; 1588 1589 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1590 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1591 AHCIDEBUG_PRINT(("ahci_bio_complete bcount %ld", 1592 ata_bio->bcount), DEBUG_XFERS); 1593 /* 1594 * If it was a write, complete data buffer may have been transferred 1595 * before error detection; in this case don't use cmdh_prdbc 1596 * as it won't reflect what was written to media. Assume nothing 1597 * was transferred and leave bcount as-is. 1598 * For queued commands, PRD Byte Count should not be used, and is 1599 * not required to be valid; in that case underflow is always illegal. 1600 */ 1601 if ((xfer->c_flags & C_NCQ) != 0) { 1602 if (ata_bio->error == NOERROR) 1603 ata_bio->bcount = 0; 1604 } else { 1605 if ((ata_bio->flags & ATA_READ) || ata_bio->error == NOERROR) 1606 ata_bio->bcount -= 1607 le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc); 1608 } 1609 AHCIDEBUG_PRINT((" now %ld\n", ata_bio->bcount), DEBUG_XFERS); 1610 1611 ata_deactivate_xfer(chp, xfer); 1612 1613 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1614 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0) 1615 atastart(chp); 1616 return 0; 1617 } 1618 1619 static void 1620 ahci_channel_stop(struct ahci_softc *sc, struct ata_channel *chp, int flags) 1621 { 1622 int i; 1623 /* stop channel */ 1624 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1625 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & ~AHCI_P_CMD_ST); 1626 /* wait 1s for channel to stop */ 1627 for (i = 0; i <100; i++) { 1628 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) 1629 == 0) 1630 break; 1631 ata_delay(chp, 10, "ahcistop", flags); 1632 } 1633 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) { 1634 printf("%s: channel wouldn't stop\n", AHCINAME(sc)); 1635 /* XXX controller reset ? */ 1636 return; 1637 } 1638 1639 if (sc->sc_channel_stop) 1640 sc->sc_channel_stop(sc, chp); 1641 } 1642 1643 static void 1644 ahci_channel_start(struct ahci_softc *sc, struct ata_channel *chp, 1645 int flags, int clo) 1646 { 1647 int i; 1648 uint32_t p_cmd; 1649 /* clear error */ 1650 AHCI_WRITE(sc, AHCI_P_SERR(chp->ch_channel), 1651 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel))); 1652 1653 if (clo) { 1654 /* issue command list override */ 1655 KASSERT(sc->sc_ahci_cap & AHCI_CAP_CLO); 1656 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1657 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) | AHCI_P_CMD_CLO); 1658 /* wait 1s for AHCI_CAP_CLO to clear */ 1659 for (i = 0; i <100; i++) { 1660 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & 1661 AHCI_P_CMD_CLO) == 0) 1662 break; 1663 ata_delay(chp, 10, "ahciclo", flags); 1664 } 1665 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CLO) { 1666 printf("%s: channel wouldn't CLO\n", AHCINAME(sc)); 1667 /* XXX controller reset ? */ 1668 return; 1669 } 1670 } 1671 1672 if (sc->sc_channel_start) 1673 sc->sc_channel_start(sc, chp); 1674 1675 /* and start controller */ 1676 p_cmd = AHCI_P_CMD_ICC_AC | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 1677 AHCI_P_CMD_FRE | AHCI_P_CMD_ST; 1678 if (chp->ch_ndrives > PMP_PORT_CTL && 1679 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 1680 p_cmd |= AHCI_P_CMD_PMA; 1681 } 1682 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), p_cmd); 1683 } 1684 1685 /* Recover channel after command failure */ 1686 static void 1687 ahci_channel_recover(struct ata_channel *chp, int flags, uint32_t tfd) 1688 { 1689 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1690 int drive = ATACH_NODRIVE; 1691 bool reset = false; 1692 1693 ata_channel_lock_owned(chp); 1694 1695 /* 1696 * Read FBS to get the drive which caused the error, if PM is in use. 1697 * According to AHCI 1.3 spec, this register is available regardless 1698 * if FIS-based switching (FBSS) feature is supported, or disabled. 1699 * If FIS-based switching is not in use, it merely maintains single 1700 * pair of DRQ/BSY state, but it is enough since in that case we 1701 * never issue commands for more than one device at the time anyway. 1702 * XXX untested 1703 */ 1704 if (chp->ch_ndrives > PMP_PORT_CTL) { 1705 uint32_t fbs = AHCI_READ(sc, AHCI_P_FBS(chp->ch_channel)); 1706 if (fbs & AHCI_P_FBS_SDE) { 1707 drive = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; 1708 1709 /* 1710 * Tell HBA to reset PM port X (value in DWE) state, 1711 * and resume processing commands for other ports. 1712 */ 1713 fbs |= AHCI_P_FBS_DEC; 1714 AHCI_WRITE(sc, AHCI_P_FBS(chp->ch_channel), fbs); 1715 for (int i = 0; i < 1000; i++) { 1716 fbs = AHCI_READ(sc, 1717 AHCI_P_FBS(chp->ch_channel)); 1718 if ((fbs & AHCI_P_FBS_DEC) == 0) 1719 break; 1720 DELAY(1000); 1721 } 1722 if ((fbs & AHCI_P_FBS_DEC) != 0) { 1723 /* follow non-device specific recovery */ 1724 drive = ATACH_NODRIVE; 1725 reset = true; 1726 } 1727 } else { 1728 /* not device specific, reset channel */ 1729 drive = ATACH_NODRIVE; 1730 reset = true; 1731 } 1732 } else 1733 drive = 0; 1734 1735 /* 1736 * If BSY or DRQ bits are set, must execute COMRESET to return 1737 * device to idle state. If drive is idle, it's enough to just 1738 * reset CMD.ST, it's not necessary to do software reset. 1739 * After resetting CMD.ST, need to execute READ LOG EXT for NCQ 1740 * to unblock device processing if COMRESET was not done. 1741 */ 1742 if (reset || (AHCI_TFD_ST(tfd) & (WDCS_BSY|WDCS_DRQ)) != 0) { 1743 ahci_reset_channel(chp, flags); 1744 goto out; 1745 } 1746 1747 KASSERT(drive != ATACH_NODRIVE && drive >= 0); 1748 ahci_channel_stop(sc, chp, flags); 1749 ahci_channel_start(sc, chp, flags, 1750 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 1751 1752 ata_recovery_resume(chp, drive, tfd, flags); 1753 1754 out: 1755 /* Drive unblocked, back to normal operation */ 1756 return; 1757 } 1758 1759 static int 1760 ahci_dma_setup(struct ata_channel *chp, int slot, void *data, 1761 size_t count, int op) 1762 { 1763 int error, seg; 1764 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1765 struct ahci_channel *achp = (struct ahci_channel *)chp; 1766 struct ahci_cmd_tbl *cmd_tbl; 1767 struct ahci_cmd_header *cmd_h; 1768 1769 cmd_h = &achp->ahcic_cmdh[slot]; 1770 cmd_tbl = achp->ahcic_cmd_tbl[slot]; 1771 1772 if (data == NULL) { 1773 cmd_h->cmdh_prdtl = 0; 1774 goto end; 1775 } 1776 1777 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_datad[slot], 1778 data, count, NULL, 1779 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | op); 1780 if (error) { 1781 printf("%s port %d: failed to load xfer: %d\n", 1782 AHCINAME(sc), chp->ch_channel, error); 1783 return error; 1784 } 1785 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[slot], 0, 1786 achp->ahcic_datad[slot]->dm_mapsize, 1787 (op == BUS_DMA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1788 for (seg = 0; seg < achp->ahcic_datad[slot]->dm_nsegs; seg++) { 1789 cmd_tbl->cmdt_prd[seg].prd_dba = htole64( 1790 achp->ahcic_datad[slot]->dm_segs[seg].ds_addr); 1791 cmd_tbl->cmdt_prd[seg].prd_dbc = htole32( 1792 achp->ahcic_datad[slot]->dm_segs[seg].ds_len - 1); 1793 } 1794 cmd_tbl->cmdt_prd[seg - 1].prd_dbc |= htole32(AHCI_PRD_DBC_IPC); 1795 cmd_h->cmdh_prdtl = htole16(achp->ahcic_datad[slot]->dm_nsegs); 1796 end: 1797 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE); 1798 return 0; 1799 } 1800 1801 #if NATAPIBUS > 0 1802 static void 1803 ahci_atapibus_attach(struct atabus_softc * ata_sc) 1804 { 1805 struct ata_channel *chp = ata_sc->sc_chan; 1806 struct atac_softc *atac = chp->ch_atac; 1807 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1808 struct scsipi_channel *chan = &chp->ch_atapi_channel; 1809 /* 1810 * Fill in the scsipi_adapter. 1811 */ 1812 adapt->adapt_dev = atac->atac_dev; 1813 adapt->adapt_nchannels = atac->atac_nchannels; 1814 adapt->adapt_request = ahci_atapi_scsipi_request; 1815 adapt->adapt_minphys = ahci_atapi_minphys; 1816 atac->atac_atapi_adapter.atapi_probe_device = ahci_atapi_probe_device; 1817 1818 /* 1819 * Fill in the scsipi_channel. 1820 */ 1821 memset(chan, 0, sizeof(*chan)); 1822 chan->chan_adapter = adapt; 1823 chan->chan_bustype = &ahci_atapi_bustype; 1824 chan->chan_channel = chp->ch_channel; 1825 chan->chan_flags = SCSIPI_CHAN_OPENINGS; 1826 chan->chan_openings = 1; 1827 chan->chan_max_periph = 1; 1828 chan->chan_ntargets = 1; 1829 chan->chan_nluns = 1; 1830 chp->atapibus = config_found(ata_sc->sc_dev, chan, atapiprint, 1831 CFARGS(.iattr = "atapi")); 1832 } 1833 1834 static void 1835 ahci_atapi_minphys(struct buf *bp) 1836 { 1837 if (bp->b_bcount > MAXPHYS) 1838 bp->b_bcount = MAXPHYS; 1839 minphys(bp); 1840 } 1841 1842 /* 1843 * Kill off all pending xfers for a periph. 1844 * 1845 * Must be called at splbio(). 1846 */ 1847 static void 1848 ahci_atapi_kill_pending(struct scsipi_periph *periph) 1849 { 1850 struct atac_softc *atac = 1851 device_private(periph->periph_channel->chan_adapter->adapt_dev); 1852 struct ata_channel *chp = 1853 atac->atac_channels[periph->periph_channel->chan_channel]; 1854 1855 ata_kill_pending(&chp->ch_drive[periph->periph_target]); 1856 } 1857 1858 static const struct ata_xfer_ops ahci_atapi_xfer_ops = { 1859 .c_start = ahci_atapi_start, 1860 .c_poll = ahci_atapi_poll, 1861 .c_abort = ahci_atapi_abort, 1862 .c_intr = ahci_atapi_complete, 1863 .c_kill_xfer = ahci_atapi_kill_xfer, 1864 }; 1865 1866 static void 1867 ahci_atapi_scsipi_request(struct scsipi_channel *chan, 1868 scsipi_adapter_req_t req, void *arg) 1869 { 1870 struct scsipi_adapter *adapt = chan->chan_adapter; 1871 struct scsipi_periph *periph; 1872 struct scsipi_xfer *sc_xfer; 1873 struct ahci_softc *sc = device_private(adapt->adapt_dev); 1874 struct atac_softc *atac = &sc->sc_atac; 1875 struct ata_xfer *xfer; 1876 int channel = chan->chan_channel; 1877 int drive, s; 1878 1879 switch (req) { 1880 case ADAPTER_REQ_RUN_XFER: 1881 sc_xfer = arg; 1882 periph = sc_xfer->xs_periph; 1883 drive = periph->periph_target; 1884 if (!device_is_active(atac->atac_dev)) { 1885 sc_xfer->error = XS_DRIVER_STUFFUP; 1886 scsipi_done(sc_xfer); 1887 return; 1888 } 1889 xfer = ata_get_xfer(atac->atac_channels[channel], false); 1890 if (xfer == NULL) { 1891 sc_xfer->error = XS_RESOURCE_SHORTAGE; 1892 scsipi_done(sc_xfer); 1893 return; 1894 } 1895 1896 if (sc_xfer->xs_control & XS_CTL_POLL) 1897 xfer->c_flags |= C_POLL; 1898 xfer->c_drive = drive; 1899 xfer->c_flags |= C_ATAPI; 1900 xfer->c_databuf = sc_xfer->data; 1901 xfer->c_bcount = sc_xfer->datalen; 1902 xfer->ops = &ahci_atapi_xfer_ops; 1903 xfer->c_scsipi = sc_xfer; 1904 xfer->c_atapi.c_dscpoll = 0; 1905 s = splbio(); 1906 ata_exec_xfer(atac->atac_channels[channel], xfer); 1907 #ifdef DIAGNOSTIC 1908 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 && 1909 (sc_xfer->xs_status & XS_STS_DONE) == 0) 1910 panic("ahci_atapi_scsipi_request: polled command " 1911 "not done"); 1912 #endif 1913 splx(s); 1914 return; 1915 default: 1916 /* Not supported, nothing to do. */ 1917 ; 1918 } 1919 } 1920 1921 static int 1922 ahci_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer) 1923 { 1924 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1925 struct ahci_channel *achp = (struct ahci_channel *)chp; 1926 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 1927 struct ahci_cmd_tbl *cmd_tbl; 1928 struct ahci_cmd_header *cmd_h; 1929 1930 AHCIDEBUG_PRINT(("ahci_atapi_start CI 0x%x\n", 1931 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS); 1932 1933 ata_channel_lock_owned(chp); 1934 1935 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot]; 1936 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1937 cmd_tbl), DEBUG_XFERS); 1938 1939 satafis_rhd_construct_atapi(xfer, cmd_tbl->cmdt_cfis); 1940 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1941 memset(&cmd_tbl->cmdt_acmd, 0, sizeof(cmd_tbl->cmdt_acmd)); 1942 memcpy(cmd_tbl->cmdt_acmd, sc_xfer->cmd, sc_xfer->cmdlen); 1943 1944 cmd_h = &achp->ahcic_cmdh[xfer->c_slot]; 1945 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1946 chp->ch_channel, cmd_h), DEBUG_XFERS); 1947 if (ahci_dma_setup(chp, xfer->c_slot, 1948 sc_xfer->datalen ? sc_xfer->data : NULL, 1949 sc_xfer->datalen, 1950 (sc_xfer->xs_control & XS_CTL_DATA_IN) ? 1951 BUS_DMA_READ : BUS_DMA_WRITE)) { 1952 sc_xfer->error = XS_DRIVER_STUFFUP; 1953 return ATASTART_ABORT; 1954 } 1955 cmd_h->cmdh_flags = htole16( 1956 ((sc_xfer->xs_control & XS_CTL_DATA_OUT) ? AHCI_CMDH_F_WR : 0) | 1957 RHD_FISLEN / 4 | AHCI_CMDH_F_A | 1958 (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1959 cmd_h->cmdh_prdbc = 0; 1960 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1962 1963 if (xfer->c_flags & C_POLL) { 1964 /* polled command, disable interrupts */ 1965 AHCI_WRITE(sc, AHCI_GHC, 1966 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1967 } 1968 /* start command */ 1969 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot); 1970 1971 if ((xfer->c_flags & C_POLL) == 0) { 1972 callout_reset(&chp->c_timo_callout, mstohz(sc_xfer->timeout), 1973 ata_timeout, chp); 1974 return ATASTART_STARTED; 1975 } else 1976 return ATASTART_POLL; 1977 } 1978 1979 static int 1980 ahci_atapi_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1981 { 1982 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1983 struct ahci_channel *achp = (struct ahci_channel *)chp; 1984 1985 /* 1986 * Polled command. 1987 */ 1988 for (int i = 0; i < ATA_DELAY / 10; i++) { 1989 if (xfer->c_scsipi->xs_status & XS_STS_DONE) 1990 break; 1991 ahci_intr_port(achp); 1992 delay(10000); 1993 } 1994 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1995 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1996 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1997 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1998 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1999 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 2000 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 2001 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 2002 DEBUG_XFERS); 2003 if ((xfer->c_scsipi->xs_status & XS_STS_DONE) == 0) { 2004 xfer->c_scsipi->error = XS_TIMEOUT; 2005 xfer->ops->c_intr(chp, xfer, 0); 2006 } 2007 /* reenable interrupts */ 2008 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 2009 return ATAPOLL_DONE; 2010 } 2011 2012 static void 2013 ahci_atapi_abort(struct ata_channel *chp, struct ata_xfer *xfer) 2014 { 2015 ahci_atapi_complete(chp, xfer, 0); 2016 } 2017 2018 static int 2019 ahci_atapi_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 2020 { 2021 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2022 struct ahci_channel *achp = (struct ahci_channel *)chp; 2023 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 2024 2025 AHCIDEBUG_PRINT(("ahci_atapi_complete port %d\n", chp->ch_channel), 2026 DEBUG_FUNCS); 2027 2028 if (ata_waitdrain_xfer_check(chp, xfer)) 2029 return 0; 2030 2031 if (xfer->c_flags & C_TIMEOU) { 2032 sc_xfer->error = XS_TIMEOUT; 2033 } 2034 2035 if (xfer->c_bcount > 0) { 2036 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0, 2037 achp->ahcic_datad[xfer->c_slot]->dm_mapsize, 2038 (sc_xfer->xs_control & XS_CTL_DATA_IN) ? 2039 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2040 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]); 2041 } 2042 2043 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 2044 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2045 sc_xfer->resid = sc_xfer->datalen; 2046 sc_xfer->resid -= le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc); 2047 AHCIDEBUG_PRINT(("ahci_atapi_complete datalen %d resid %d\n", 2048 sc_xfer->datalen, sc_xfer->resid), DEBUG_XFERS); 2049 if (AHCI_TFD_ST(tfd) & WDCS_ERR && 2050 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 || 2051 sc_xfer->resid == sc_xfer->datalen)) { 2052 sc_xfer->error = XS_SHORTSENSE; 2053 sc_xfer->sense.atapi_sense = AHCI_TFD_ERR(tfd); 2054 if ((sc_xfer->xs_periph->periph_quirks & 2055 PQUIRK_NOSENSE) == 0) { 2056 /* ask scsipi to send a REQUEST_SENSE */ 2057 sc_xfer->error = XS_BUSY; 2058 sc_xfer->status = SCSI_CHECK; 2059 } 2060 } 2061 2062 ata_deactivate_xfer(chp, xfer); 2063 2064 ata_free_xfer(chp, xfer); 2065 scsipi_done(sc_xfer); 2066 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0) 2067 atastart(chp); 2068 return 0; 2069 } 2070 2071 static void 2072 ahci_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 2073 { 2074 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2075 bool deactivate = true; 2076 2077 /* remove this command from xfer queue */ 2078 switch (reason) { 2079 case KILL_GONE_INACTIVE: 2080 deactivate = false; 2081 /* FALLTHROUGH */ 2082 case KILL_GONE: 2083 sc_xfer->error = XS_DRIVER_STUFFUP; 2084 break; 2085 case KILL_RESET: 2086 sc_xfer->error = XS_RESET; 2087 break; 2088 case KILL_REQUEUE: 2089 sc_xfer->error = XS_REQUEUE; 2090 break; 2091 default: 2092 printf("ahci_ata_atapi_kill_xfer: unknown reason %d\n", reason); 2093 panic("ahci_ata_atapi_kill_xfer"); 2094 } 2095 2096 if (deactivate) 2097 ata_deactivate_xfer(chp, xfer); 2098 2099 ata_free_xfer(chp, xfer); 2100 scsipi_done(sc_xfer); 2101 } 2102 2103 static void 2104 ahci_atapi_probe_device(struct atapibus_softc *sc, int target) 2105 { 2106 struct scsipi_channel *chan = sc->sc_channel; 2107 struct scsipi_periph *periph; 2108 struct ataparams ids; 2109 struct ataparams *id = &ids; 2110 struct ahci_softc *ahcic = 2111 device_private(chan->chan_adapter->adapt_dev); 2112 struct atac_softc *atac = &ahcic->sc_atac; 2113 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 2114 struct ata_drive_datas *drvp = &chp->ch_drive[target]; 2115 struct scsipibus_attach_args sa; 2116 char serial_number[21], model[41], firmware_revision[9]; 2117 int s; 2118 2119 /* skip if already attached */ 2120 if (scsipi_lookup_periph(chan, target, 0) != NULL) 2121 return; 2122 2123 /* if no ATAPI device detected at attach time, skip */ 2124 if (drvp->drive_type != ATA_DRIVET_ATAPI) { 2125 AHCIDEBUG_PRINT(("ahci_atapi_probe_device: drive %d " 2126 "not present\n", target), DEBUG_PROBE); 2127 return; 2128 } 2129 2130 /* Some ATAPI devices need a bit more time after software reset. */ 2131 delay(5000); 2132 if (ata_get_params(drvp, AT_WAIT, id) == 0) { 2133 #ifdef ATAPI_DEBUG_PROBE 2134 printf("%s drive %d: cmdsz 0x%x drqtype 0x%x\n", 2135 AHCINAME(ahcic), target, 2136 id->atap_config & ATAPI_CFG_CMD_MASK, 2137 id->atap_config & ATAPI_CFG_DRQ_MASK); 2138 #endif 2139 periph = scsipi_alloc_periph(M_NOWAIT); 2140 if (periph == NULL) { 2141 aprint_error_dev(sc->sc_dev, 2142 "unable to allocate periph for drive %d\n", 2143 target); 2144 return; 2145 } 2146 periph->periph_dev = NULL; 2147 periph->periph_channel = chan; 2148 periph->periph_switch = &atapi_probe_periphsw; 2149 periph->periph_target = target; 2150 periph->periph_lun = 0; 2151 periph->periph_quirks = PQUIRK_ONLYBIG; 2152 2153 #ifdef SCSIPI_DEBUG 2154 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI && 2155 SCSIPI_DEBUG_TARGET == target) 2156 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS; 2157 #endif 2158 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config); 2159 if (id->atap_config & ATAPI_CFG_REMOV) 2160 periph->periph_flags |= PERIPH_REMOVABLE; 2161 if (periph->periph_type == T_SEQUENTIAL) { 2162 s = splbio(); 2163 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW; 2164 splx(s); 2165 } 2166 2167 sa.sa_periph = periph; 2168 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config); 2169 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ? 2170 T_REMOV : T_FIXED; 2171 strnvisx(model, sizeof(model), id->atap_model, 40, 2172 VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2173 strnvisx(serial_number, sizeof(serial_number), id->atap_serial, 2174 20, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2175 strnvisx(firmware_revision, sizeof(firmware_revision), 2176 id->atap_revision, 8, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2177 sa.sa_inqbuf.vendor = model; 2178 sa.sa_inqbuf.product = serial_number; 2179 sa.sa_inqbuf.revision = firmware_revision; 2180 2181 /* 2182 * Determine the operating mode capabilities of the device. 2183 */ 2184 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16) 2185 periph->periph_cap |= PERIPH_CAP_CMD16; 2186 /* XXX This is gross. */ 2187 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK); 2188 2189 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa); 2190 2191 if (drvp->drv_softc) 2192 ata_probe_caps(drvp); 2193 else { 2194 s = splbio(); 2195 drvp->drive_type = ATA_DRIVET_NONE; 2196 splx(s); 2197 } 2198 } else { 2199 AHCIDEBUG_PRINT(("ahci_atapi_get_params: ATAPI_IDENTIFY_DEVICE " 2200 "failed for drive %s:%d:%d\n", 2201 AHCINAME(ahcic), chp->ch_channel, target), DEBUG_PROBE); 2202 s = splbio(); 2203 drvp->drive_type = ATA_DRIVET_NONE; 2204 splx(s); 2205 } 2206 } 2207 #endif /* NATAPIBUS */ 2208