1 /* $NetBSD: ahcisata_core.c,v 1.108 2023/09/10 14:04:28 abs Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: ahcisata_core.c,v 1.108 2023/09/10 14:04:28 abs Exp $"); 30 31 #include <sys/types.h> 32 #include <sys/malloc.h> 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/disklabel.h> 37 #include <sys/proc.h> 38 #include <sys/buf.h> 39 40 #include <dev/ata/atareg.h> 41 #include <dev/ata/satavar.h> 42 #include <dev/ata/satareg.h> 43 #include <dev/ata/satafisvar.h> 44 #include <dev/ata/satafisreg.h> 45 #include <dev/ata/satapmpreg.h> 46 #include <dev/ic/ahcisatavar.h> 47 #include <dev/ic/wdcreg.h> 48 49 #include <dev/scsipi/scsi_all.h> /* for SCSI status */ 50 51 #include "atapibus.h" 52 53 #include "opt_ahcisata.h" 54 55 #ifdef AHCI_DEBUG 56 int ahcidebug_mask = 0; 57 #endif 58 59 static void ahci_probe_drive(struct ata_channel *); 60 static void ahci_setup_channel(struct ata_channel *); 61 62 static void ahci_ata_bio(struct ata_drive_datas *, struct ata_xfer *); 63 static int ahci_do_reset_drive(struct ata_channel *, int, int, uint32_t *, 64 uint8_t); 65 static void ahci_reset_drive(struct ata_drive_datas *, int, uint32_t *); 66 static void ahci_reset_channel(struct ata_channel *, int); 67 static void ahci_exec_command(struct ata_drive_datas *, struct ata_xfer *); 68 static int ahci_ata_addref(struct ata_drive_datas *); 69 static void ahci_ata_delref(struct ata_drive_datas *); 70 static void ahci_killpending(struct ata_drive_datas *); 71 72 static int ahci_cmd_start(struct ata_channel *, struct ata_xfer *); 73 static int ahci_cmd_complete(struct ata_channel *, struct ata_xfer *, int); 74 static int ahci_cmd_poll(struct ata_channel *, struct ata_xfer *); 75 static void ahci_cmd_abort(struct ata_channel *, struct ata_xfer *); 76 static void ahci_cmd_done(struct ata_channel *, struct ata_xfer *); 77 static void ahci_cmd_done_end(struct ata_channel *, struct ata_xfer *); 78 static void ahci_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 79 static int ahci_bio_start(struct ata_channel *, struct ata_xfer *); 80 static int ahci_bio_poll(struct ata_channel *, struct ata_xfer *); 81 static void ahci_bio_abort(struct ata_channel *, struct ata_xfer *); 82 static int ahci_bio_complete(struct ata_channel *, struct ata_xfer *, int); 83 static void ahci_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int) ; 84 static void ahci_channel_stop(struct ahci_softc *, struct ata_channel *, int); 85 static void ahci_channel_start(struct ahci_softc *, struct ata_channel *, 86 int, int); 87 static void ahci_channel_recover(struct ata_channel *, int, uint32_t); 88 static int ahci_dma_setup(struct ata_channel *, int, void *, size_t, int); 89 static int ahci_intr_port_common(struct ata_channel *); 90 91 #if NATAPIBUS > 0 92 static void ahci_atapibus_attach(struct atabus_softc *); 93 static void ahci_atapi_kill_pending(struct scsipi_periph *); 94 static void ahci_atapi_minphys(struct buf *); 95 static void ahci_atapi_scsipi_request(struct scsipi_channel *, 96 scsipi_adapter_req_t, void *); 97 static int ahci_atapi_start(struct ata_channel *, struct ata_xfer *); 98 static int ahci_atapi_poll(struct ata_channel *, struct ata_xfer *); 99 static void ahci_atapi_abort(struct ata_channel *, struct ata_xfer *); 100 static int ahci_atapi_complete(struct ata_channel *, struct ata_xfer *, int); 101 static void ahci_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 102 static void ahci_atapi_probe_device(struct atapibus_softc *, int); 103 104 static const struct scsipi_bustype ahci_atapi_bustype = { 105 .bustype_type = SCSIPI_BUSTYPE_ATAPI, 106 .bustype_cmd = atapi_scsipi_cmd, 107 .bustype_interpret_sense = atapi_interpret_sense, 108 .bustype_printaddr = atapi_print_addr, 109 .bustype_kill_pending = ahci_atapi_kill_pending, 110 .bustype_async_event_xfer_mode = NULL, 111 }; 112 #endif /* NATAPIBUS */ 113 114 #define ATA_DELAY 10000 /* 10s for a drive I/O */ 115 #define ATA_RESET_DELAY 31000 /* 31s for a drive reset */ 116 #define AHCI_RST_WAIT (ATA_RESET_DELAY / 10) 117 118 #ifndef AHCISATA_EXTRA_DELAY_MS 119 #define AHCISATA_EXTRA_DELAY_MS 500 /* XXX need to adjust */ 120 #endif 121 122 #if !defined(AHCISATA_REMOVE_EXTRA_DELAY) && AHCISATA_EXTRA_DELAY_MS > 0 123 #define AHCISATA_DO_EXTRA_DELAY(sc, chp, msg, flags) \ 124 ata_delay(chp, AHCISATA_EXTRA_DELAY_MS, msg, flags) 125 #else 126 #define AHCISATA_DO_EXTRA_DELAY(sc, chp, msg, flags) do { } while (0) 127 #endif 128 129 const struct ata_bustype ahci_ata_bustype = { 130 .bustype_type = SCSIPI_BUSTYPE_ATA, 131 .ata_bio = ahci_ata_bio, 132 .ata_reset_drive = ahci_reset_drive, 133 .ata_reset_channel = ahci_reset_channel, 134 .ata_exec_command = ahci_exec_command, 135 .ata_get_params = ata_get_params, 136 .ata_addref = ahci_ata_addref, 137 .ata_delref = ahci_ata_delref, 138 .ata_killpending = ahci_killpending, 139 .ata_recovery = ahci_channel_recover, 140 }; 141 142 static void ahci_setup_port(struct ahci_softc *sc, int i); 143 144 static void 145 ahci_enable(struct ahci_softc *sc) 146 { 147 uint32_t ghc; 148 149 ghc = AHCI_READ(sc, AHCI_GHC); 150 if (!(ghc & AHCI_GHC_AE)) { 151 ghc |= AHCI_GHC_AE; 152 AHCI_WRITE(sc, AHCI_GHC, ghc); 153 } 154 } 155 156 static int 157 ahci_reset(struct ahci_softc *sc) 158 { 159 int i; 160 161 /* reset controller */ 162 AHCI_WRITE(sc, AHCI_GHC, AHCI_GHC_HR); 163 /* wait up to 1s for reset to complete */ 164 for (i = 0; i < 1000; i++) { 165 delay(1000); 166 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR) == 0) 167 break; 168 } 169 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR)) { 170 aprint_error("%s: reset failed\n", AHCINAME(sc)); 171 return -1; 172 } 173 /* enable ahci mode */ 174 ahci_enable(sc); 175 176 if (sc->sc_save_init_data) { 177 AHCI_WRITE(sc, AHCI_CAP, sc->sc_init_data.cap); 178 if (sc->sc_init_data.cap2) 179 AHCI_WRITE(sc, AHCI_CAP2, sc->sc_init_data.cap2); 180 AHCI_WRITE(sc, AHCI_PI, sc->sc_init_data.ports); 181 } 182 183 /* Check if hardware reverted to single message MSI */ 184 sc->sc_ghc_mrsm = ISSET(AHCI_READ(sc, AHCI_GHC), AHCI_GHC_MRSM); 185 186 return 0; 187 } 188 189 static void 190 ahci_setup_ports(struct ahci_softc *sc) 191 { 192 int i, port; 193 194 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 195 if ((sc->sc_ahci_ports & (1U << i)) == 0) 196 continue; 197 if (port >= sc->sc_atac.atac_nchannels) { 198 aprint_error("%s: more ports than announced\n", 199 AHCINAME(sc)); 200 break; 201 } 202 ahci_setup_port(sc, i); 203 port++; 204 } 205 } 206 207 static void 208 ahci_reprobe_drives(struct ahci_softc *sc) 209 { 210 int i, port; 211 struct ahci_channel *achp; 212 struct ata_channel *chp; 213 214 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 215 if ((sc->sc_ahci_ports & (1U << i)) == 0) 216 continue; 217 if (port >= sc->sc_atac.atac_nchannels) { 218 aprint_error("%s: more ports than announced\n", 219 AHCINAME(sc)); 220 break; 221 } 222 achp = &sc->sc_channels[i]; 223 chp = &achp->ata_channel; 224 225 ahci_probe_drive(chp); 226 port++; 227 } 228 } 229 230 static void 231 ahci_setup_port(struct ahci_softc *sc, int i) 232 { 233 struct ahci_channel *achp; 234 235 achp = &sc->sc_channels[i]; 236 237 AHCI_WRITE(sc, AHCI_P_CLB(i), BUS_ADDR_LO32(achp->ahcic_bus_cmdh)); 238 AHCI_WRITE(sc, AHCI_P_CLBU(i), BUS_ADDR_HI32(achp->ahcic_bus_cmdh)); 239 AHCI_WRITE(sc, AHCI_P_FB(i), BUS_ADDR_LO32(achp->ahcic_bus_rfis)); 240 AHCI_WRITE(sc, AHCI_P_FBU(i), BUS_ADDR_HI32(achp->ahcic_bus_rfis)); 241 } 242 243 static void 244 ahci_enable_intrs(struct ahci_softc *sc) 245 { 246 247 /* clear interrupts */ 248 AHCI_WRITE(sc, AHCI_IS, AHCI_READ(sc, AHCI_IS)); 249 /* enable interrupts */ 250 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 251 } 252 253 void 254 ahci_attach(struct ahci_softc *sc) 255 { 256 uint32_t ahci_rev; 257 int i, j, port; 258 struct ahci_channel *achp; 259 struct ata_channel *chp; 260 int error; 261 int dmasize; 262 char buf[128]; 263 void *cmdhp; 264 void *cmdtblp; 265 266 if (sc->sc_save_init_data) { 267 ahci_enable(sc); 268 269 sc->sc_init_data.cap = AHCI_READ(sc, AHCI_CAP); 270 sc->sc_init_data.ports = AHCI_READ(sc, AHCI_PI); 271 272 ahci_rev = AHCI_READ(sc, AHCI_VS); 273 if (AHCI_VS_MJR(ahci_rev) > 1 || 274 (AHCI_VS_MJR(ahci_rev) == 1 && AHCI_VS_MNR(ahci_rev) >= 20)) { 275 sc->sc_init_data.cap2 = AHCI_READ(sc, AHCI_CAP2); 276 } else { 277 sc->sc_init_data.cap2 = 0; 278 } 279 if (sc->sc_init_data.ports == 0) { 280 sc->sc_init_data.ports = sc->sc_ahci_ports; 281 } 282 } 283 284 if (ahci_reset(sc) != 0) 285 return; 286 287 sc->sc_ahci_cap = AHCI_READ(sc, AHCI_CAP); 288 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADPMP) { 289 aprint_verbose_dev(sc->sc_atac.atac_dev, 290 "ignoring broken port multiplier support\n"); 291 sc->sc_ahci_cap &= ~AHCI_CAP_SPM; 292 } 293 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADNCQ) { 294 aprint_verbose_dev(sc->sc_atac.atac_dev, 295 "ignoring broken NCQ support\n"); 296 sc->sc_ahci_cap &= ~AHCI_CAP_NCQ; 297 } 298 sc->sc_atac.atac_nchannels = (sc->sc_ahci_cap & AHCI_CAP_NPMASK) + 1; 299 sc->sc_ncmds = ((sc->sc_ahci_cap & AHCI_CAP_NCS) >> 8) + 1; 300 ahci_rev = AHCI_READ(sc, AHCI_VS); 301 snprintb(buf, sizeof(buf), "\177\020" 302 /* "f\000\005NP\0" */ 303 "b\005SXS\0" 304 "b\006EMS\0" 305 "b\007CCCS\0" 306 /* "f\010\005NCS\0" */ 307 "b\015PSC\0" 308 "b\016SSC\0" 309 "b\017PMD\0" 310 "b\020FBSS\0" 311 "b\021SPM\0" 312 "b\022SAM\0" 313 "b\023SNZO\0" 314 "f\024\003ISS\0" 315 "=\001Gen1\0" 316 "=\002Gen2\0" 317 "=\003Gen3\0" 318 "b\030SCLO\0" 319 "b\031SAL\0" 320 "b\032SALP\0" 321 "b\033SSS\0" 322 "b\034SMPS\0" 323 "b\035SSNTF\0" 324 "b\036SNCQ\0" 325 "b\037S64A\0" 326 "\0", sc->sc_ahci_cap); 327 aprint_normal_dev(sc->sc_atac.atac_dev, "AHCI revision %u.%u" 328 ", %d port%s, %d slot%s, CAP %s\n", 329 AHCI_VS_MJR(ahci_rev), AHCI_VS_MNR(ahci_rev), 330 sc->sc_atac.atac_nchannels, 331 (sc->sc_atac.atac_nchannels == 1 ? "" : "s"), 332 sc->sc_ncmds, (sc->sc_ncmds == 1 ? "" : "s"), buf); 333 334 sc->sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA 335 | ((sc->sc_ahci_cap & AHCI_CAP_NCQ) ? ATAC_CAP_NCQ : 0); 336 sc->sc_atac.atac_cap |= sc->sc_atac_capflags; 337 sc->sc_atac.atac_pio_cap = 4; 338 sc->sc_atac.atac_dma_cap = 2; 339 sc->sc_atac.atac_udma_cap = 6; 340 sc->sc_atac.atac_channels = sc->sc_chanarray; 341 sc->sc_atac.atac_probe = ahci_probe_drive; 342 sc->sc_atac.atac_bustype_ata = &ahci_ata_bustype; 343 sc->sc_atac.atac_set_modes = ahci_setup_channel; 344 #if NATAPIBUS > 0 345 sc->sc_atac.atac_atapibus_attach = ahci_atapibus_attach; 346 #endif 347 348 dmasize = 349 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels; 350 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0, 351 &sc->sc_cmd_hdr_seg, 1, &sc->sc_cmd_hdr_nseg, BUS_DMA_NOWAIT); 352 if (error) { 353 aprint_error("%s: unable to allocate command header memory" 354 ", error=%d\n", AHCINAME(sc), error); 355 return; 356 } 357 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cmd_hdr_seg, 358 sc->sc_cmd_hdr_nseg, dmasize, 359 &cmdhp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 360 if (error) { 361 aprint_error("%s: unable to map command header memory" 362 ", error=%d\n", AHCINAME(sc), error); 363 return; 364 } 365 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0, 366 BUS_DMA_NOWAIT, &sc->sc_cmd_hdrd); 367 if (error) { 368 aprint_error("%s: unable to create command header map" 369 ", error=%d\n", AHCINAME(sc), error); 370 return; 371 } 372 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_hdrd, 373 cmdhp, dmasize, NULL, BUS_DMA_NOWAIT); 374 if (error) { 375 aprint_error("%s: unable to load command header map" 376 ", error=%d\n", AHCINAME(sc), error); 377 return; 378 } 379 sc->sc_cmd_hdr = cmdhp; 380 memset(cmdhp, 0, dmasize); 381 bus_dmamap_sync(sc->sc_dmat, sc->sc_cmd_hdrd, 0, dmasize, 382 BUS_DMASYNC_PREWRITE); 383 384 ahci_enable_intrs(sc); 385 386 if (sc->sc_ahci_ports == 0) { 387 sc->sc_ahci_ports = AHCI_READ(sc, AHCI_PI); 388 AHCIDEBUG_PRINT(("active ports %#x\n", sc->sc_ahci_ports), 389 DEBUG_PROBE); 390 } 391 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 392 if ((sc->sc_ahci_ports & (1U << i)) == 0) 393 continue; 394 if (port >= sc->sc_atac.atac_nchannels) { 395 aprint_error("%s: more ports than announced\n", 396 AHCINAME(sc)); 397 break; 398 } 399 400 /* Optional intr establish per active port */ 401 if (sc->sc_intr_establish && sc->sc_intr_establish(sc, i) != 0){ 402 aprint_error("%s: intr establish hook failed\n", 403 AHCINAME(sc)); 404 break; 405 } 406 407 achp = &sc->sc_channels[i]; 408 chp = &achp->ata_channel; 409 sc->sc_chanarray[i] = chp; 410 chp->ch_channel = i; 411 chp->ch_atac = &sc->sc_atac; 412 chp->ch_queue = ata_queue_alloc(sc->sc_ncmds); 413 if (chp->ch_queue == NULL) { 414 aprint_error("%s port %d: can't allocate memory for " 415 "command queue", AHCINAME(sc), i); 416 break; 417 } 418 dmasize = AHCI_CMDTBL_SIZE * sc->sc_ncmds; 419 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0, 420 &achp->ahcic_cmd_tbl_seg, 1, &achp->ahcic_cmd_tbl_nseg, 421 BUS_DMA_NOWAIT); 422 if (error) { 423 aprint_error("%s: unable to allocate command table " 424 "memory, error=%d\n", AHCINAME(sc), error); 425 break; 426 } 427 error = bus_dmamem_map(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg, 428 achp->ahcic_cmd_tbl_nseg, dmasize, 429 &cmdtblp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 430 if (error) { 431 aprint_error("%s: unable to map command table memory" 432 ", error=%d\n", AHCINAME(sc), error); 433 break; 434 } 435 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0, 436 BUS_DMA_NOWAIT, &achp->ahcic_cmd_tbld); 437 if (error) { 438 aprint_error("%s: unable to create command table map" 439 ", error=%d\n", AHCINAME(sc), error); 440 break; 441 } 442 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_cmd_tbld, 443 cmdtblp, dmasize, NULL, BUS_DMA_NOWAIT); 444 if (error) { 445 aprint_error("%s: unable to load command table map" 446 ", error=%d\n", AHCINAME(sc), error); 447 break; 448 } 449 memset(cmdtblp, 0, dmasize); 450 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_cmd_tbld, 0, 451 dmasize, BUS_DMASYNC_PREWRITE); 452 achp->ahcic_cmdh = (struct ahci_cmd_header *) 453 ((char *)cmdhp + AHCI_CMDH_SIZE * port); 454 achp->ahcic_bus_cmdh = sc->sc_cmd_hdrd->dm_segs[0].ds_addr + 455 AHCI_CMDH_SIZE * port; 456 achp->ahcic_rfis = (struct ahci_r_fis *) 457 ((char *)cmdhp + 458 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 459 AHCI_RFIS_SIZE * port); 460 achp->ahcic_bus_rfis = sc->sc_cmd_hdrd->dm_segs[0].ds_addr + 461 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 462 AHCI_RFIS_SIZE * port; 463 AHCIDEBUG_PRINT(("port %d cmdh %p (0x%" PRIx64 ") " 464 "rfis %p (0x%" PRIx64 ")\n", i, 465 achp->ahcic_cmdh, (uint64_t)achp->ahcic_bus_cmdh, 466 achp->ahcic_rfis, (uint64_t)achp->ahcic_bus_rfis), 467 DEBUG_PROBE); 468 469 for (j = 0; j < sc->sc_ncmds; j++) { 470 achp->ahcic_cmd_tbl[j] = (struct ahci_cmd_tbl *) 471 ((char *)cmdtblp + AHCI_CMDTBL_SIZE * j); 472 achp->ahcic_bus_cmd_tbl[j] = 473 achp->ahcic_cmd_tbld->dm_segs[0].ds_addr + 474 AHCI_CMDTBL_SIZE * j; 475 achp->ahcic_cmdh[j].cmdh_cmdtba = 476 htole64(achp->ahcic_bus_cmd_tbl[j]); 477 AHCIDEBUG_PRINT(("port %d/%d tbl %p (0x%" PRIx64 ")\n", i, j, 478 achp->ahcic_cmd_tbl[j], 479 (uint64_t)achp->ahcic_bus_cmd_tbl[j]), DEBUG_PROBE); 480 /* The xfer DMA map */ 481 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 482 AHCI_NPRD, 0x400000 /* 4MB */, 0, 483 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 484 &achp->ahcic_datad[j]); 485 if (error) { 486 aprint_error("%s: couldn't alloc xfer DMA map, " 487 "error=%d\n", AHCINAME(sc), error); 488 goto end; 489 } 490 } 491 ahci_setup_port(sc, i); 492 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 493 AHCI_P_SSTS(i), 4, &achp->ahcic_sstatus) != 0) { 494 aprint_error("%s: couldn't map port %d " 495 "sata_status regs\n", AHCINAME(sc), i); 496 break; 497 } 498 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 499 AHCI_P_SCTL(i), 4, &achp->ahcic_scontrol) != 0) { 500 aprint_error("%s: couldn't map port %d " 501 "sata_control regs\n", AHCINAME(sc), i); 502 break; 503 } 504 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih, 505 AHCI_P_SERR(i), 4, &achp->ahcic_serror) != 0) { 506 aprint_error("%s: couldn't map port %d " 507 "sata_error regs\n", AHCINAME(sc), i); 508 break; 509 } 510 ata_channel_attach(chp); 511 port++; 512 end: 513 continue; 514 } 515 } 516 517 void 518 ahci_childdetached(struct ahci_softc *sc, device_t child) 519 { 520 struct ahci_channel *achp; 521 struct ata_channel *chp; 522 523 for (int i = 0; i < AHCI_MAX_PORTS; i++) { 524 achp = &sc->sc_channels[i]; 525 chp = &achp->ata_channel; 526 527 if ((sc->sc_ahci_ports & (1U << i)) == 0) 528 continue; 529 530 if (child == chp->atabus) 531 chp->atabus = NULL; 532 } 533 } 534 535 int 536 ahci_detach(struct ahci_softc *sc, int flags) 537 { 538 struct atac_softc *atac; 539 struct ahci_channel *achp; 540 struct ata_channel *chp; 541 struct scsipi_adapter *adapt; 542 int i, j, port; 543 int error; 544 545 atac = &sc->sc_atac; 546 adapt = &atac->atac_atapi_adapter._generic; 547 548 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) { 549 achp = &sc->sc_channels[i]; 550 chp = &achp->ata_channel; 551 552 if ((sc->sc_ahci_ports & (1U << i)) == 0) 553 continue; 554 if (port >= sc->sc_atac.atac_nchannels) { 555 aprint_error("%s: more ports than announced\n", 556 AHCINAME(sc)); 557 break; 558 } 559 560 if (chp->atabus != NULL) { 561 if ((error = config_detach(chp->atabus, flags)) != 0) 562 return error; 563 564 KASSERT(chp->atabus == NULL); 565 } 566 567 if (chp->ch_flags & ATACH_DETACHED) 568 continue; 569 570 for (j = 0; j < sc->sc_ncmds; j++) 571 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_datad[j]); 572 573 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_cmd_tbld); 574 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_cmd_tbld); 575 bus_dmamem_unmap(sc->sc_dmat, achp->ahcic_cmd_tbl[0], 576 AHCI_CMDTBL_SIZE * sc->sc_ncmds); 577 bus_dmamem_free(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg, 578 achp->ahcic_cmd_tbl_nseg); 579 580 ata_channel_detach(chp); 581 port++; 582 } 583 584 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmd_hdrd); 585 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmd_hdrd); 586 bus_dmamem_unmap(sc->sc_dmat, sc->sc_cmd_hdr, 587 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels); 588 bus_dmamem_free(sc->sc_dmat, &sc->sc_cmd_hdr_seg, sc->sc_cmd_hdr_nseg); 589 590 if (adapt->adapt_refcnt != 0) 591 return EBUSY; 592 593 return 0; 594 } 595 596 void 597 ahci_resume(struct ahci_softc *sc) 598 { 599 ahci_reset(sc); 600 ahci_setup_ports(sc); 601 ahci_reprobe_drives(sc); 602 ahci_enable_intrs(sc); 603 } 604 605 int 606 ahci_intr(void *v) 607 { 608 struct ahci_softc *sc = v; 609 uint32_t is, ports; 610 int bit, r = 0; 611 612 while ((is = AHCI_READ(sc, AHCI_IS))) { 613 AHCIDEBUG_PRINT(("%s ahci_intr 0x%x\n", AHCINAME(sc), is), 614 DEBUG_INTR); 615 r = 1; 616 ports = is; 617 while ((bit = ffs(ports)) != 0) { 618 bit--; 619 ahci_intr_port_common(&sc->sc_channels[bit].ata_channel); 620 ports &= ~__BIT(bit); 621 } 622 AHCI_WRITE(sc, AHCI_IS, is); 623 } 624 625 return r; 626 } 627 628 int 629 ahci_intr_port(void *v) 630 { 631 struct ahci_channel *achp = v; 632 struct ata_channel *chp = &achp->ata_channel; 633 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 634 int ret; 635 636 ret = ahci_intr_port_common(chp); 637 if (ret) { 638 AHCI_WRITE(sc, AHCI_IS, 1U << chp->ch_channel); 639 } 640 641 return ret; 642 } 643 644 static int 645 ahci_intr_port_common(struct ata_channel *chp) 646 { 647 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 648 uint32_t is, tfd, sact; 649 struct ata_xfer *xfer; 650 int slot = -1; 651 bool recover = false; 652 uint32_t aslots; 653 654 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel)); 655 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), is); 656 657 AHCIDEBUG_PRINT(("ahci_intr_port_common %s port %d " 658 "is 0x%x CI 0x%x SACT 0x%x TFD 0x%x\n", 659 AHCINAME(sc), 660 chp->ch_channel, is, 661 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)), 662 AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel)), 663 AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel))), 664 DEBUG_INTR); 665 666 if ((chp->ch_flags & ATACH_NCQ) == 0) { 667 /* Non-NCQ operation */ 668 sact = AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)); 669 } else { 670 /* NCQ operation */ 671 sact = AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel)); 672 } 673 674 /* Handle errors */ 675 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 676 AHCI_P_IX_IFS | AHCI_P_IX_OFS | AHCI_P_IX_UFS)) { 677 /* Fatal errors */ 678 if (is & AHCI_P_IX_TFES) { 679 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 680 681 if ((chp->ch_flags & ATACH_NCQ) == 0) { 682 /* Slot valid only for Non-NCQ operation */ 683 slot = (AHCI_READ(sc, 684 AHCI_P_CMD(chp->ch_channel)) 685 & AHCI_P_CMD_CCS_MASK) 686 >> AHCI_P_CMD_CCS_SHIFT; 687 } 688 689 AHCIDEBUG_PRINT(( 690 "%s port %d: TFE: sact 0x%x is 0x%x tfd 0x%x\n", 691 AHCINAME(sc), chp->ch_channel, sact, is, tfd), 692 DEBUG_INTR); 693 } else { 694 /* mark an error, and set BSY */ 695 tfd = (WDCE_ABRT << AHCI_P_TFD_ERR_SHIFT) | 696 WDCS_ERR | WDCS_BSY; 697 } 698 699 if (is & AHCI_P_IX_IFS) { 700 AHCIDEBUG_PRINT(("%s port %d: SERR 0x%x\n", 701 AHCINAME(sc), chp->ch_channel, 702 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel))), 703 DEBUG_INTR); 704 } 705 706 if (!ISSET(chp->ch_flags, ATACH_RECOVERING)) 707 recover = true; 708 } else if (is & (AHCI_P_IX_DHRS|AHCI_P_IX_SDBS)) { 709 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 710 711 /* D2H Register FIS or Set Device Bits */ 712 if ((tfd & WDCS_ERR) != 0) { 713 if (!ISSET(chp->ch_flags, ATACH_RECOVERING)) 714 recover = true; 715 716 AHCIDEBUG_PRINT(("%s port %d: transfer aborted 0x%x\n", 717 AHCINAME(sc), chp->ch_channel, tfd), DEBUG_INTR); 718 } 719 } else { 720 tfd = 0; 721 } 722 723 if (__predict_false(recover)) 724 ata_channel_freeze(chp); 725 726 aslots = ata_queue_active(chp); 727 728 if (slot >= 0) { 729 if ((aslots & __BIT(slot)) != 0 && 730 (sact & __BIT(slot)) == 0) { 731 xfer = ata_queue_hwslot_to_xfer(chp, slot); 732 xfer->ops->c_intr(chp, xfer, tfd); 733 } 734 } else { 735 /* 736 * For NCQ, HBA halts processing when error is notified, 737 * and any further D2H FISes are ignored until the error 738 * condition is cleared. Hence if a command is inactive, 739 * it means it actually already finished successfully. 740 * Note: active slots can change as c_intr() callback 741 * can activate another command(s), so must only process 742 * commands active before we start processing. 743 */ 744 745 for (slot = 0; slot < sc->sc_ncmds; slot++) { 746 if ((aslots & __BIT(slot)) != 0 && 747 (sact & __BIT(slot)) == 0) { 748 xfer = ata_queue_hwslot_to_xfer(chp, slot); 749 xfer->ops->c_intr(chp, xfer, tfd); 750 } 751 } 752 } 753 754 if (__predict_false(recover)) { 755 ata_channel_lock(chp); 756 ata_channel_thaw_locked(chp); 757 ata_thread_run(chp, 0, ATACH_TH_RECOVERY, tfd); 758 ata_channel_unlock(chp); 759 } 760 761 return 1; 762 } 763 764 static void 765 ahci_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp) 766 { 767 struct ata_channel *chp = drvp->chnl_softc; 768 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 769 uint8_t c_slot; 770 771 ata_channel_lock_owned(chp); 772 773 /* get a slot for running the command on */ 774 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) { 775 panic("%s: %s: failed to get xfer for reset, port %d\n", 776 device_xname(sc->sc_atac.atac_dev), 777 __func__, chp->ch_channel); 778 /* NOTREACHED */ 779 } 780 781 AHCI_WRITE(sc, AHCI_GHC, 782 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 783 ahci_channel_stop(sc, chp, flags); 784 ahci_do_reset_drive(chp, drvp->drive, flags, sigp, c_slot); 785 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 786 787 ata_queue_free_slot(chp, c_slot); 788 } 789 790 /* return error code from ata_bio */ 791 static int 792 ahci_exec_fis(struct ata_channel *chp, int timeout, int flags, int slot) 793 { 794 struct ahci_channel *achp = (struct ahci_channel *)chp; 795 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 796 int i; 797 uint32_t is; 798 799 /* 800 * Base timeout is specified in ms. Delay for 10ms 801 * on each round. 802 */ 803 timeout = timeout / 10; 804 805 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE); 806 AHCI_CMDH_SYNC(sc, achp, slot, 807 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 808 /* start command */ 809 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot); 810 for (i = 0; i < timeout; i++) { 811 if ((AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)) & (1U << slot)) == 812 0) 813 return 0; 814 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel)); 815 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 816 AHCI_P_IX_IFS | 817 AHCI_P_IX_OFS | AHCI_P_IX_UFS)) { 818 if ((is & (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) == 819 (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) { 820 /* 821 * we got the D2H FIS anyway, 822 * assume sig is valid. 823 * channel is restarted later 824 */ 825 return ERROR; 826 } 827 aprint_debug("%s port %d: error 0x%x sending FIS\n", 828 AHCINAME(sc), chp->ch_channel, is); 829 return ERR_DF; 830 } 831 ata_delay(chp, 10, "ahcifis", flags); 832 } 833 834 aprint_debug("%s port %d: timeout sending FIS\n", 835 AHCINAME(sc), chp->ch_channel); 836 return TIMEOUT; 837 } 838 839 static int 840 ahci_do_reset_drive(struct ata_channel *chp, int drive, int flags, 841 uint32_t *sigp, uint8_t c_slot) 842 { 843 struct ahci_channel *achp = (struct ahci_channel *)chp; 844 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 845 struct ahci_cmd_tbl *cmd_tbl; 846 struct ahci_cmd_header *cmd_h; 847 int i, error = 0; 848 uint32_t sig, cmd; 849 int noclo_retry = 0, retry; 850 851 ata_channel_lock_owned(chp); 852 853 again: 854 /* clear port interrupt register */ 855 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 856 /* clear SErrors and start operations */ 857 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == AHCI_CAP_CLO) { 858 /* 859 * issue a command list override to clear BSY. 860 * This is needed if there's a PMP with no drive 861 * on port 0 862 */ 863 ahci_channel_start(sc, chp, flags, 1); 864 } else { 865 /* Can't handle command still running without CLO */ 866 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)); 867 if ((cmd & AHCI_P_CMD_CR) != 0) { 868 ahci_channel_stop(sc, chp, flags); 869 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)); 870 if ((cmd & AHCI_P_CMD_CR) != 0) { 871 aprint_error("%s port %d: DMA engine busy " 872 "for drive %d\n", AHCINAME(sc), 873 chp->ch_channel, drive); 874 error = EBUSY; 875 goto end; 876 } 877 } 878 879 KASSERT((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) == 0); 880 881 ahci_channel_start(sc, chp, flags, 0); 882 } 883 if (drive > 0) { 884 KASSERT(sc->sc_ahci_cap & AHCI_CAP_SPM); 885 } 886 887 /* polled command, assume interrupts are disabled */ 888 889 cmd_h = &achp->ahcic_cmdh[c_slot]; 890 cmd_tbl = achp->ahcic_cmd_tbl[c_slot]; 891 cmd_h->cmdh_flags = htole16(AHCI_CMDH_F_RST | AHCI_CMDH_F_CBSY | 892 RHD_FISLEN / 4 | (drive << AHCI_CMDH_F_PMP_SHIFT)); 893 cmd_h->cmdh_prdtl = 0; 894 cmd_h->cmdh_prdbc = 0; 895 memset(cmd_tbl->cmdt_cfis, 0, 64); 896 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE; 897 cmd_tbl->cmdt_cfis[rhd_c] = drive; 898 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_RST | WDCTL_4BIT; 899 switch (ahci_exec_fis(chp, 100, flags, c_slot)) { 900 case ERR_DF: 901 case TIMEOUT: 902 /* 903 * without CLO we can't make sure a software reset will 904 * success, as the drive may still have BSY or DRQ set. 905 * in this case, reset the whole channel and retry the 906 * drive reset. The channel reset should clear BSY and DRQ 907 */ 908 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == 0 && noclo_retry == 0) { 909 noclo_retry++; 910 ahci_reset_channel(chp, flags); 911 goto again; 912 } 913 aprint_error("%s port %d: setting WDCTL_RST failed " 914 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive); 915 error = EBUSY; 916 goto end; 917 default: 918 break; 919 } 920 921 /* 922 * SATA specification has toggle period for SRST bit of 5 usec. Some 923 * controllers fail to process the SRST clear operation unless 924 * we wait for at least this period between the set and clear commands. 925 */ 926 ata_delay(chp, 10, "ahcirstw", flags); 927 928 /* 929 * Try to clear WDCTL_RST a few times before giving up. 930 */ 931 for (error = EBUSY, retry = 0; error != 0 && retry < 5; retry++) { 932 cmd_h->cmdh_flags = htole16(RHD_FISLEN / 4 | 933 (drive << AHCI_CMDH_F_PMP_SHIFT)); 934 cmd_h->cmdh_prdbc = 0; 935 memset(cmd_tbl->cmdt_cfis, 0, 64); 936 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE; 937 cmd_tbl->cmdt_cfis[rhd_c] = drive; 938 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_4BIT; 939 switch (ahci_exec_fis(chp, 310, flags, c_slot)) { 940 case ERR_DF: 941 case TIMEOUT: 942 error = EBUSY; 943 break; 944 default: 945 error = 0; 946 break; 947 } 948 if (error == 0) { 949 break; 950 } 951 } 952 if (error == EBUSY) { 953 aprint_error("%s port %d: clearing WDCTL_RST failed " 954 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive); 955 goto end; 956 } 957 958 /* 959 * wait 31s for BSY to clear 960 * This should not be needed, but some controllers clear the 961 * command slot before receiving the D2H FIS ... 962 */ 963 for (i = 0; i < AHCI_RST_WAIT; i++) { 964 sig = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 965 if ((__SHIFTOUT(sig, AHCI_P_TFD_ST) & WDCS_BSY) == 0) 966 break; 967 ata_delay(chp, 10, "ahcid2h", flags); 968 } 969 if (i == AHCI_RST_WAIT) { 970 aprint_error("%s: BSY never cleared, TD 0x%x\n", 971 AHCINAME(sc), sig); 972 goto end; 973 } 974 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10), 975 DEBUG_PROBE); 976 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel)); 977 if (sigp) 978 *sigp = sig; 979 AHCIDEBUG_PRINT(("%s: port %d: sig=0x%x CMD=0x%x\n", 980 AHCINAME(sc), chp->ch_channel, sig, 981 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel))), DEBUG_PROBE); 982 end: 983 ahci_channel_stop(sc, chp, flags); 984 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcirst", flags); 985 /* clear port interrupt register */ 986 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 987 ahci_channel_start(sc, chp, flags, 988 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 989 return error; 990 } 991 992 static void 993 ahci_reset_channel(struct ata_channel *chp, int flags) 994 { 995 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 996 struct ahci_channel *achp = (struct ahci_channel *)chp; 997 int i, tfd; 998 999 ata_channel_lock_owned(chp); 1000 1001 ahci_channel_stop(sc, chp, flags); 1002 if (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol, 1003 achp->ahcic_sstatus, flags) != SStatus_DET_DEV) { 1004 printf("%s: port %d reset failed\n", AHCINAME(sc), chp->ch_channel); 1005 /* XXX and then ? */ 1006 } 1007 ata_kill_active(chp, KILL_RESET, flags); 1008 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcirst", flags); 1009 /* clear port interrupt register */ 1010 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1011 /* clear SErrors and start operations */ 1012 ahci_channel_start(sc, chp, flags, 1013 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 1014 /* wait 31s for BSY to clear */ 1015 for (i = 0; i < AHCI_RST_WAIT; i++) { 1016 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel)); 1017 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) == 0) 1018 break; 1019 ata_delay(chp, 10, "ahcid2h", flags); 1020 } 1021 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) != 0) 1022 aprint_error("%s: BSY never cleared, TD 0x%x\n", 1023 AHCINAME(sc), tfd); 1024 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10), 1025 DEBUG_PROBE); 1026 /* clear port interrupt register */ 1027 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1028 1029 return; 1030 } 1031 1032 static int 1033 ahci_ata_addref(struct ata_drive_datas *drvp) 1034 { 1035 return 0; 1036 } 1037 1038 static void 1039 ahci_ata_delref(struct ata_drive_datas *drvp) 1040 { 1041 return; 1042 } 1043 1044 static void 1045 ahci_killpending(struct ata_drive_datas *drvp) 1046 { 1047 return; 1048 } 1049 1050 static void 1051 ahci_probe_drive(struct ata_channel *chp) 1052 { 1053 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1054 struct ahci_channel *achp = (struct ahci_channel *)chp; 1055 uint32_t sig; 1056 uint8_t c_slot; 1057 int error; 1058 1059 ata_channel_lock(chp); 1060 1061 /* get a slot for running the command on */ 1062 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) { 1063 aprint_error_dev(sc->sc_atac.atac_dev, 1064 "%s: failed to get xfer port %d\n", 1065 __func__, chp->ch_channel); 1066 ata_channel_unlock(chp); 1067 return; 1068 } 1069 1070 /* bring interface up, accept FISs, power up and spin up device */ 1071 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1072 AHCI_P_CMD_ICC_AC | AHCI_P_CMD_FRE | 1073 AHCI_P_CMD_POD | AHCI_P_CMD_SUD); 1074 /* reset the PHY and bring online */ 1075 switch (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol, 1076 achp->ahcic_sstatus, AT_WAIT)) { 1077 case SStatus_DET_DEV: 1078 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahcidv", AT_WAIT); 1079 1080 /* Initial value, used in case the soft reset fails */ 1081 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel)); 1082 1083 if (sc->sc_ahci_cap & AHCI_CAP_SPM) { 1084 error = ahci_do_reset_drive(chp, PMP_PORT_CTL, AT_WAIT, 1085 &sig, c_slot); 1086 1087 /* If probe for PMP failed, just fallback to drive 0 */ 1088 if (error) { 1089 aprint_error("%s port %d: drive %d reset " 1090 "failed, disabling PMP\n", 1091 AHCINAME(sc), chp->ch_channel, 1092 PMP_PORT_CTL); 1093 1094 sc->sc_ahci_cap &= ~AHCI_CAP_SPM; 1095 ahci_reset_channel(chp, AT_WAIT); 1096 } 1097 } else { 1098 ahci_do_reset_drive(chp, 0, AT_WAIT, &sig, c_slot); 1099 } 1100 sata_interpret_sig(chp, 0, sig); 1101 /* if we have a PMP attached, inform the controller */ 1102 if (chp->ch_ndrives > PMP_PORT_CTL && 1103 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 1104 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1105 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) | 1106 AHCI_P_CMD_PMA); 1107 } 1108 /* clear port interrupt register */ 1109 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff); 1110 1111 /* and enable interrupts */ 1112 AHCI_WRITE(sc, AHCI_P_IE(chp->ch_channel), 1113 AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS | 1114 AHCI_P_IX_IFS | 1115 AHCI_P_IX_OFS | AHCI_P_IX_DPS | AHCI_P_IX_UFS | 1116 AHCI_P_IX_PSS | AHCI_P_IX_DHRS | AHCI_P_IX_SDBS); 1117 /* 1118 * optionally, wait AHCISATA_EXTRA_DELAY_MS msec before 1119 * actually starting operations 1120 */ 1121 AHCISATA_DO_EXTRA_DELAY(sc, chp, "ahciprb", AT_WAIT); 1122 break; 1123 1124 default: 1125 break; 1126 } 1127 1128 ata_queue_free_slot(chp, c_slot); 1129 1130 ata_channel_unlock(chp); 1131 } 1132 1133 static void 1134 ahci_setup_channel(struct ata_channel *chp) 1135 { 1136 return; 1137 } 1138 1139 static const struct ata_xfer_ops ahci_cmd_xfer_ops = { 1140 .c_start = ahci_cmd_start, 1141 .c_poll = ahci_cmd_poll, 1142 .c_abort = ahci_cmd_abort, 1143 .c_intr = ahci_cmd_complete, 1144 .c_kill_xfer = ahci_cmd_kill_xfer, 1145 }; 1146 1147 static void 1148 ahci_exec_command(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 1149 { 1150 struct ata_channel *chp = drvp->chnl_softc; 1151 struct ata_command *ata_c = &xfer->c_ata_c; 1152 1153 AHCIDEBUG_PRINT(("ahci_exec_command port %d CI 0x%x\n", 1154 chp->ch_channel, 1155 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1156 DEBUG_XFERS); 1157 if (ata_c->flags & AT_POLL) 1158 xfer->c_flags |= C_POLL; 1159 if (ata_c->flags & AT_WAIT) 1160 xfer->c_flags |= C_WAIT; 1161 xfer->c_drive = drvp->drive; 1162 xfer->c_databuf = ata_c->data; 1163 xfer->c_bcount = ata_c->bcount; 1164 xfer->ops = &ahci_cmd_xfer_ops; 1165 1166 ata_exec_xfer(chp, xfer); 1167 } 1168 1169 static int 1170 ahci_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer) 1171 { 1172 struct ahci_softc *sc = AHCI_CH2SC(chp); 1173 struct ahci_channel *achp = (struct ahci_channel *)chp; 1174 struct ata_command *ata_c = &xfer->c_ata_c; 1175 int slot = xfer->c_slot; 1176 struct ahci_cmd_tbl *cmd_tbl; 1177 struct ahci_cmd_header *cmd_h; 1178 1179 AHCIDEBUG_PRINT(("ahci_cmd_start CI 0x%x timo %d\n slot %d", 1180 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)), 1181 ata_c->timeout, slot), 1182 DEBUG_XFERS); 1183 1184 ata_channel_lock_owned(chp); 1185 1186 cmd_tbl = achp->ahcic_cmd_tbl[slot]; 1187 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1188 cmd_tbl), DEBUG_XFERS); 1189 1190 satafis_rhd_construct_cmd(ata_c, cmd_tbl->cmdt_cfis); 1191 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1192 1193 cmd_h = &achp->ahcic_cmdh[slot]; 1194 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1195 chp->ch_channel, cmd_h), DEBUG_XFERS); 1196 if (ahci_dma_setup(chp, slot, 1197 (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) ? 1198 ata_c->data : NULL, 1199 ata_c->bcount, 1200 (ata_c->flags & AT_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) { 1201 ata_c->flags |= AT_DF; 1202 return ATASTART_ABORT; 1203 } 1204 cmd_h->cmdh_flags = htole16( 1205 ((ata_c->flags & AT_WRITE) ? AHCI_CMDH_F_WR : 0) | 1206 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1207 cmd_h->cmdh_prdbc = 0; 1208 AHCI_CMDH_SYNC(sc, achp, slot, 1209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1210 1211 if (ata_c->flags & AT_POLL) { 1212 /* polled command, disable interrupts */ 1213 AHCI_WRITE(sc, AHCI_GHC, 1214 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1215 } 1216 /* start command */ 1217 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot); 1218 1219 if ((ata_c->flags & AT_POLL) == 0) { 1220 callout_reset(&chp->c_timo_callout, mstohz(ata_c->timeout), 1221 ata_timeout, chp); 1222 return ATASTART_STARTED; 1223 } else 1224 return ATASTART_POLL; 1225 } 1226 1227 static int 1228 ahci_cmd_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1229 { 1230 struct ahci_softc *sc = AHCI_CH2SC(chp); 1231 struct ahci_channel *achp = (struct ahci_channel *)chp; 1232 1233 ata_channel_lock(chp); 1234 1235 /* 1236 * Polled command. 1237 */ 1238 for (int i = 0; i < xfer->c_ata_c.timeout / 10; i++) { 1239 if (xfer->c_ata_c.flags & AT_DONE) 1240 break; 1241 ata_channel_unlock(chp); 1242 ahci_intr_port(achp); 1243 ata_channel_lock(chp); 1244 ata_delay(chp, 10, "ahcipl", xfer->c_ata_c.flags); 1245 } 1246 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1247 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1248 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1249 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1250 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1251 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 1252 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 1253 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 1254 DEBUG_XFERS); 1255 1256 ata_channel_unlock(chp); 1257 1258 if ((xfer->c_ata_c.flags & AT_DONE) == 0) { 1259 xfer->c_ata_c.flags |= AT_TIMEOU; 1260 xfer->ops->c_intr(chp, xfer, 0); 1261 } 1262 /* reenable interrupts */ 1263 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 1264 1265 return ATAPOLL_DONE; 1266 } 1267 1268 static void 1269 ahci_cmd_abort(struct ata_channel *chp, struct ata_xfer *xfer) 1270 { 1271 ahci_cmd_complete(chp, xfer, 0); 1272 } 1273 1274 static void 1275 ahci_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1276 { 1277 struct ata_command *ata_c = &xfer->c_ata_c; 1278 bool deactivate = true; 1279 1280 AHCIDEBUG_PRINT(("ahci_cmd_kill_xfer port %d\n", chp->ch_channel), 1281 DEBUG_FUNCS); 1282 1283 switch (reason) { 1284 case KILL_GONE_INACTIVE: 1285 deactivate = false; 1286 /* FALLTHROUGH */ 1287 case KILL_GONE: 1288 ata_c->flags |= AT_GONE; 1289 break; 1290 case KILL_RESET: 1291 ata_c->flags |= AT_RESET; 1292 break; 1293 case KILL_REQUEUE: 1294 panic("%s: not supposed to be requeued\n", __func__); 1295 break; 1296 default: 1297 printf("ahci_cmd_kill_xfer: unknown reason %d\n", reason); 1298 panic("ahci_cmd_kill_xfer"); 1299 } 1300 1301 ahci_cmd_done_end(chp, xfer); 1302 1303 if (deactivate) 1304 ata_deactivate_xfer(chp, xfer); 1305 } 1306 1307 static int 1308 ahci_cmd_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 1309 { 1310 struct ata_command *ata_c = &xfer->c_ata_c; 1311 struct ahci_channel *achp = (struct ahci_channel *)chp; 1312 struct ahci_softc *sc = AHCI_CH2SC(chp); 1313 1314 AHCIDEBUG_PRINT(("ahci_cmd_complete port %d CMD 0x%x CI 0x%x\n", 1315 chp->ch_channel, 1316 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CMD(chp->ch_channel)), 1317 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1318 DEBUG_FUNCS); 1319 1320 if (ata_waitdrain_xfer_check(chp, xfer)) 1321 return 0; 1322 1323 if (xfer->c_flags & C_TIMEOU) { 1324 ata_c->flags |= AT_TIMEOU; 1325 } 1326 1327 if (AHCI_TFD_ST(tfd) & WDCS_BSY) { 1328 ata_c->flags |= AT_TIMEOU; 1329 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) { 1330 ata_c->r_error = AHCI_TFD_ERR(tfd); 1331 ata_c->flags |= AT_ERROR; 1332 } 1333 1334 if (ata_c->flags & AT_READREG) { 1335 AHCI_RFIS_SYNC(sc, achp, BUS_DMASYNC_POSTREAD); 1336 satafis_rdh_cmd_readreg(ata_c, achp->ahcic_rfis->rfis_rfis); 1337 } 1338 1339 ahci_cmd_done(chp, xfer); 1340 1341 ata_deactivate_xfer(chp, xfer); 1342 1343 if ((ata_c->flags & (AT_TIMEOU|AT_ERROR)) == 0) 1344 atastart(chp); 1345 1346 return 0; 1347 } 1348 1349 static void 1350 ahci_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer) 1351 { 1352 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1353 struct ahci_channel *achp = (struct ahci_channel *)chp; 1354 struct ata_command *ata_c = &xfer->c_ata_c; 1355 uint16_t *idwordbuf; 1356 int i; 1357 1358 AHCIDEBUG_PRINT(("ahci_cmd_done port %d flags %#x/%#x\n", 1359 chp->ch_channel, xfer->c_flags, ata_c->flags), DEBUG_FUNCS); 1360 1361 if (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) { 1362 bus_dmamap_t map = achp->ahcic_datad[xfer->c_slot]; 1363 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1364 (ata_c->flags & AT_READ) ? BUS_DMASYNC_POSTREAD : 1365 BUS_DMASYNC_POSTWRITE); 1366 bus_dmamap_unload(sc->sc_dmat, map); 1367 } 1368 1369 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1370 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1371 1372 /* ata(4) expects IDENTIFY data to be in host endianness */ 1373 if (ata_c->r_command == WDCC_IDENTIFY || 1374 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) { 1375 idwordbuf = xfer->c_databuf; 1376 for (i = 0; i < (xfer->c_bcount / sizeof(*idwordbuf)); i++) { 1377 idwordbuf[i] = le16toh(idwordbuf[i]); 1378 } 1379 } 1380 1381 if (achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc) 1382 ata_c->flags |= AT_XFDONE; 1383 1384 ahci_cmd_done_end(chp, xfer); 1385 } 1386 1387 static void 1388 ahci_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer) 1389 { 1390 struct ata_command *ata_c = &xfer->c_ata_c; 1391 1392 ata_c->flags |= AT_DONE; 1393 } 1394 1395 static const struct ata_xfer_ops ahci_bio_xfer_ops = { 1396 .c_start = ahci_bio_start, 1397 .c_poll = ahci_bio_poll, 1398 .c_abort = ahci_bio_abort, 1399 .c_intr = ahci_bio_complete, 1400 .c_kill_xfer = ahci_bio_kill_xfer, 1401 }; 1402 1403 static void 1404 ahci_ata_bio(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 1405 { 1406 struct ata_channel *chp = drvp->chnl_softc; 1407 struct ata_bio *ata_bio = &xfer->c_bio; 1408 1409 AHCIDEBUG_PRINT(("ahci_ata_bio port %d CI 0x%x\n", 1410 chp->ch_channel, 1411 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))), 1412 DEBUG_XFERS); 1413 if (ata_bio->flags & ATA_POLL) 1414 xfer->c_flags |= C_POLL; 1415 xfer->c_drive = drvp->drive; 1416 xfer->c_databuf = ata_bio->databuf; 1417 xfer->c_bcount = ata_bio->bcount; 1418 xfer->ops = &ahci_bio_xfer_ops; 1419 ata_exec_xfer(chp, xfer); 1420 } 1421 1422 static int 1423 ahci_bio_start(struct ata_channel *chp, struct ata_xfer *xfer) 1424 { 1425 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1426 struct ahci_channel *achp = (struct ahci_channel *)chp; 1427 struct ata_bio *ata_bio = &xfer->c_bio; 1428 struct ahci_cmd_tbl *cmd_tbl; 1429 struct ahci_cmd_header *cmd_h; 1430 1431 AHCIDEBUG_PRINT(("ahci_bio_start CI 0x%x\n", 1432 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS); 1433 1434 ata_channel_lock_owned(chp); 1435 1436 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot]; 1437 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1438 cmd_tbl), DEBUG_XFERS); 1439 1440 satafis_rhd_construct_bio(xfer, cmd_tbl->cmdt_cfis); 1441 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1442 1443 cmd_h = &achp->ahcic_cmdh[xfer->c_slot]; 1444 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1445 chp->ch_channel, cmd_h), DEBUG_XFERS); 1446 if (ahci_dma_setup(chp, xfer->c_slot, ata_bio->databuf, ata_bio->bcount, 1447 (ata_bio->flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) { 1448 ata_bio->error = ERR_DMA; 1449 ata_bio->r_error = 0; 1450 return ATASTART_ABORT; 1451 } 1452 cmd_h->cmdh_flags = htole16( 1453 ((ata_bio->flags & ATA_READ) ? 0 : AHCI_CMDH_F_WR) | 1454 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1455 cmd_h->cmdh_prdbc = 0; 1456 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1458 1459 if (xfer->c_flags & C_POLL) { 1460 /* polled command, disable interrupts */ 1461 AHCI_WRITE(sc, AHCI_GHC, 1462 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1463 } 1464 if (xfer->c_flags & C_NCQ) 1465 AHCI_WRITE(sc, AHCI_P_SACT(chp->ch_channel), 1U << xfer->c_slot); 1466 /* start command */ 1467 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot); 1468 1469 if ((xfer->c_flags & C_POLL) == 0) { 1470 callout_reset(&chp->c_timo_callout, mstohz(ATA_DELAY), 1471 ata_timeout, chp); 1472 return ATASTART_STARTED; 1473 } else 1474 return ATASTART_POLL; 1475 } 1476 1477 static int 1478 ahci_bio_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1479 { 1480 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1481 struct ahci_channel *achp = (struct ahci_channel *)chp; 1482 1483 /* 1484 * Polled command. 1485 */ 1486 for (int i = 0; i < ATA_DELAY * 10; i++) { 1487 if (xfer->c_bio.flags & ATA_ITSDONE) 1488 break; 1489 ahci_intr_port(achp); 1490 delay(100); 1491 } 1492 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1493 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1494 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1495 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1496 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1497 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 1498 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 1499 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 1500 DEBUG_XFERS); 1501 if ((xfer->c_bio.flags & ATA_ITSDONE) == 0) { 1502 xfer->c_bio.error = TIMEOUT; 1503 xfer->ops->c_intr(chp, xfer, 0); 1504 } 1505 /* reenable interrupts */ 1506 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 1507 return ATAPOLL_DONE; 1508 } 1509 1510 static void 1511 ahci_bio_abort(struct ata_channel *chp, struct ata_xfer *xfer) 1512 { 1513 ahci_bio_complete(chp, xfer, 0); 1514 } 1515 1516 static void 1517 ahci_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1518 { 1519 int drive = xfer->c_drive; 1520 struct ata_bio *ata_bio = &xfer->c_bio; 1521 bool deactivate = true; 1522 1523 AHCIDEBUG_PRINT(("ahci_bio_kill_xfer port %d\n", chp->ch_channel), 1524 DEBUG_FUNCS); 1525 1526 ata_bio->flags |= ATA_ITSDONE; 1527 switch (reason) { 1528 case KILL_GONE_INACTIVE: 1529 deactivate = false; 1530 /* FALLTHROUGH */ 1531 case KILL_GONE: 1532 ata_bio->error = ERR_NODEV; 1533 break; 1534 case KILL_RESET: 1535 ata_bio->error = ERR_RESET; 1536 break; 1537 case KILL_REQUEUE: 1538 ata_bio->error = REQUEUE; 1539 break; 1540 default: 1541 printf("ahci_bio_kill_xfer: unknown reason %d\n", reason); 1542 panic("ahci_bio_kill_xfer"); 1543 } 1544 ata_bio->r_error = WDCE_ABRT; 1545 1546 if (deactivate) 1547 ata_deactivate_xfer(chp, xfer); 1548 1549 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1550 } 1551 1552 static int 1553 ahci_bio_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 1554 { 1555 struct ata_bio *ata_bio = &xfer->c_bio; 1556 int drive = xfer->c_drive; 1557 struct ahci_channel *achp = (struct ahci_channel *)chp; 1558 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1559 1560 AHCIDEBUG_PRINT(("ahci_bio_complete port %d\n", chp->ch_channel), 1561 DEBUG_FUNCS); 1562 1563 if (ata_waitdrain_xfer_check(chp, xfer)) 1564 return 0; 1565 1566 if (xfer->c_flags & C_TIMEOU) { 1567 ata_bio->error = TIMEOUT; 1568 } 1569 1570 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0, 1571 achp->ahcic_datad[xfer->c_slot]->dm_mapsize, 1572 (ata_bio->flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : 1573 BUS_DMASYNC_POSTWRITE); 1574 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]); 1575 1576 ata_bio->flags |= ATA_ITSDONE; 1577 if (AHCI_TFD_ERR(tfd) & WDCS_DWF) { 1578 ata_bio->error = ERR_DF; 1579 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) { 1580 ata_bio->error = ERROR; 1581 ata_bio->r_error = AHCI_TFD_ERR(tfd); 1582 } else if (AHCI_TFD_ST(tfd) & WDCS_CORR) 1583 ata_bio->flags |= ATA_CORR; 1584 1585 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1586 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1587 AHCIDEBUG_PRINT(("ahci_bio_complete bcount %ld", 1588 ata_bio->bcount), DEBUG_XFERS); 1589 /* 1590 * If it was a write, complete data buffer may have been transferred 1591 * before error detection; in this case don't use cmdh_prdbc 1592 * as it won't reflect what was written to media. Assume nothing 1593 * was transferred and leave bcount as-is. 1594 * For queued commands, PRD Byte Count should not be used, and is 1595 * not required to be valid; in that case underflow is always illegal. 1596 */ 1597 if ((xfer->c_flags & C_NCQ) != 0) { 1598 if (ata_bio->error == NOERROR) 1599 ata_bio->bcount = 0; 1600 } else { 1601 if ((ata_bio->flags & ATA_READ) || ata_bio->error == NOERROR) 1602 ata_bio->bcount -= 1603 le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc); 1604 } 1605 AHCIDEBUG_PRINT((" now %ld\n", ata_bio->bcount), DEBUG_XFERS); 1606 1607 ata_deactivate_xfer(chp, xfer); 1608 1609 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1610 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0) 1611 atastart(chp); 1612 return 0; 1613 } 1614 1615 static void 1616 ahci_channel_stop(struct ahci_softc *sc, struct ata_channel *chp, int flags) 1617 { 1618 int i; 1619 /* stop channel */ 1620 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1621 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & ~AHCI_P_CMD_ST); 1622 /* wait 1s for channel to stop */ 1623 for (i = 0; i <100; i++) { 1624 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) 1625 == 0) 1626 break; 1627 ata_delay(chp, 10, "ahcistop", flags); 1628 } 1629 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) { 1630 printf("%s: channel wouldn't stop\n", AHCINAME(sc)); 1631 /* XXX controller reset ? */ 1632 return; 1633 } 1634 1635 if (sc->sc_channel_stop) 1636 sc->sc_channel_stop(sc, chp); 1637 } 1638 1639 static void 1640 ahci_channel_start(struct ahci_softc *sc, struct ata_channel *chp, 1641 int flags, int clo) 1642 { 1643 int i; 1644 uint32_t p_cmd; 1645 /* clear error */ 1646 AHCI_WRITE(sc, AHCI_P_SERR(chp->ch_channel), 1647 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel))); 1648 1649 if (clo) { 1650 /* issue command list override */ 1651 KASSERT(sc->sc_ahci_cap & AHCI_CAP_CLO); 1652 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), 1653 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) | AHCI_P_CMD_CLO); 1654 /* wait 1s for AHCI_CAP_CLO to clear */ 1655 for (i = 0; i <100; i++) { 1656 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & 1657 AHCI_P_CMD_CLO) == 0) 1658 break; 1659 ata_delay(chp, 10, "ahciclo", flags); 1660 } 1661 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CLO) { 1662 printf("%s: channel wouldn't CLO\n", AHCINAME(sc)); 1663 /* XXX controller reset ? */ 1664 return; 1665 } 1666 } 1667 1668 if (sc->sc_channel_start) 1669 sc->sc_channel_start(sc, chp); 1670 1671 /* and start controller */ 1672 p_cmd = AHCI_P_CMD_ICC_AC | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 1673 AHCI_P_CMD_FRE | AHCI_P_CMD_ST; 1674 if (chp->ch_ndrives > PMP_PORT_CTL && 1675 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 1676 p_cmd |= AHCI_P_CMD_PMA; 1677 } 1678 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), p_cmd); 1679 } 1680 1681 /* Recover channel after command failure */ 1682 static void 1683 ahci_channel_recover(struct ata_channel *chp, int flags, uint32_t tfd) 1684 { 1685 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1686 int drive = ATACH_NODRIVE; 1687 bool reset = false; 1688 1689 ata_channel_lock_owned(chp); 1690 1691 /* 1692 * Read FBS to get the drive which caused the error, if PM is in use. 1693 * According to AHCI 1.3 spec, this register is available regardless 1694 * if FIS-based switching (FBSS) feature is supported, or disabled. 1695 * If FIS-based switching is not in use, it merely maintains single 1696 * pair of DRQ/BSY state, but it is enough since in that case we 1697 * never issue commands for more than one device at the time anyway. 1698 * XXX untested 1699 */ 1700 if (chp->ch_ndrives > PMP_PORT_CTL) { 1701 uint32_t fbs = AHCI_READ(sc, AHCI_P_FBS(chp->ch_channel)); 1702 if (fbs & AHCI_P_FBS_SDE) { 1703 drive = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; 1704 1705 /* 1706 * Tell HBA to reset PM port X (value in DWE) state, 1707 * and resume processing commands for other ports. 1708 */ 1709 fbs |= AHCI_P_FBS_DEC; 1710 AHCI_WRITE(sc, AHCI_P_FBS(chp->ch_channel), fbs); 1711 for (int i = 0; i < 1000; i++) { 1712 fbs = AHCI_READ(sc, 1713 AHCI_P_FBS(chp->ch_channel)); 1714 if ((fbs & AHCI_P_FBS_DEC) == 0) 1715 break; 1716 DELAY(1000); 1717 } 1718 if ((fbs & AHCI_P_FBS_DEC) != 0) { 1719 /* follow non-device specific recovery */ 1720 drive = ATACH_NODRIVE; 1721 reset = true; 1722 } 1723 } else { 1724 /* not device specific, reset channel */ 1725 drive = ATACH_NODRIVE; 1726 reset = true; 1727 } 1728 } else 1729 drive = 0; 1730 1731 /* 1732 * If BSY or DRQ bits are set, must execute COMRESET to return 1733 * device to idle state. If drive is idle, it's enough to just 1734 * reset CMD.ST, it's not necessary to do software reset. 1735 * After resetting CMD.ST, need to execute READ LOG EXT for NCQ 1736 * to unblock device processing if COMRESET was not done. 1737 */ 1738 if (reset || (AHCI_TFD_ST(tfd) & (WDCS_BSY|WDCS_DRQ)) != 0) { 1739 ahci_reset_channel(chp, flags); 1740 goto out; 1741 } 1742 1743 KASSERT(drive != ATACH_NODRIVE && drive >= 0); 1744 ahci_channel_stop(sc, chp, flags); 1745 ahci_channel_start(sc, chp, flags, 1746 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0); 1747 1748 ata_recovery_resume(chp, drive, tfd, flags); 1749 1750 out: 1751 /* Drive unblocked, back to normal operation */ 1752 return; 1753 } 1754 1755 static int 1756 ahci_dma_setup(struct ata_channel *chp, int slot, void *data, 1757 size_t count, int op) 1758 { 1759 int error, seg; 1760 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1761 struct ahci_channel *achp = (struct ahci_channel *)chp; 1762 struct ahci_cmd_tbl *cmd_tbl; 1763 struct ahci_cmd_header *cmd_h; 1764 1765 cmd_h = &achp->ahcic_cmdh[slot]; 1766 cmd_tbl = achp->ahcic_cmd_tbl[slot]; 1767 1768 if (data == NULL) { 1769 cmd_h->cmdh_prdtl = 0; 1770 goto end; 1771 } 1772 1773 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_datad[slot], 1774 data, count, NULL, 1775 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | op); 1776 if (error) { 1777 printf("%s port %d: failed to load xfer: %d\n", 1778 AHCINAME(sc), chp->ch_channel, error); 1779 return error; 1780 } 1781 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[slot], 0, 1782 achp->ahcic_datad[slot]->dm_mapsize, 1783 (op == BUS_DMA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1784 for (seg = 0; seg < achp->ahcic_datad[slot]->dm_nsegs; seg++) { 1785 cmd_tbl->cmdt_prd[seg].prd_dba = htole64( 1786 achp->ahcic_datad[slot]->dm_segs[seg].ds_addr); 1787 cmd_tbl->cmdt_prd[seg].prd_dbc = htole32( 1788 achp->ahcic_datad[slot]->dm_segs[seg].ds_len - 1); 1789 } 1790 cmd_tbl->cmdt_prd[seg - 1].prd_dbc |= htole32(AHCI_PRD_DBC_IPC); 1791 cmd_h->cmdh_prdtl = htole16(achp->ahcic_datad[slot]->dm_nsegs); 1792 end: 1793 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE); 1794 return 0; 1795 } 1796 1797 #if NATAPIBUS > 0 1798 static void 1799 ahci_atapibus_attach(struct atabus_softc * ata_sc) 1800 { 1801 struct ata_channel *chp = ata_sc->sc_chan; 1802 struct atac_softc *atac = chp->ch_atac; 1803 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1804 struct scsipi_channel *chan = &chp->ch_atapi_channel; 1805 /* 1806 * Fill in the scsipi_adapter. 1807 */ 1808 adapt->adapt_dev = atac->atac_dev; 1809 adapt->adapt_nchannels = atac->atac_nchannels; 1810 adapt->adapt_request = ahci_atapi_scsipi_request; 1811 adapt->adapt_minphys = ahci_atapi_minphys; 1812 atac->atac_atapi_adapter.atapi_probe_device = ahci_atapi_probe_device; 1813 1814 /* 1815 * Fill in the scsipi_channel. 1816 */ 1817 memset(chan, 0, sizeof(*chan)); 1818 chan->chan_adapter = adapt; 1819 chan->chan_bustype = &ahci_atapi_bustype; 1820 chan->chan_channel = chp->ch_channel; 1821 chan->chan_flags = SCSIPI_CHAN_OPENINGS; 1822 chan->chan_openings = 1; 1823 chan->chan_max_periph = 1; 1824 chan->chan_ntargets = 1; 1825 chan->chan_nluns = 1; 1826 chp->atapibus = config_found(ata_sc->sc_dev, chan, atapiprint, 1827 CFARGS(.iattr = "atapi")); 1828 } 1829 1830 static void 1831 ahci_atapi_minphys(struct buf *bp) 1832 { 1833 if (bp->b_bcount > MAXPHYS) 1834 bp->b_bcount = MAXPHYS; 1835 minphys(bp); 1836 } 1837 1838 /* 1839 * Kill off all pending xfers for a periph. 1840 * 1841 * Must be called at splbio(). 1842 */ 1843 static void 1844 ahci_atapi_kill_pending(struct scsipi_periph *periph) 1845 { 1846 struct atac_softc *atac = 1847 device_private(periph->periph_channel->chan_adapter->adapt_dev); 1848 struct ata_channel *chp = 1849 atac->atac_channels[periph->periph_channel->chan_channel]; 1850 1851 ata_kill_pending(&chp->ch_drive[periph->periph_target]); 1852 } 1853 1854 static const struct ata_xfer_ops ahci_atapi_xfer_ops = { 1855 .c_start = ahci_atapi_start, 1856 .c_poll = ahci_atapi_poll, 1857 .c_abort = ahci_atapi_abort, 1858 .c_intr = ahci_atapi_complete, 1859 .c_kill_xfer = ahci_atapi_kill_xfer, 1860 }; 1861 1862 static void 1863 ahci_atapi_scsipi_request(struct scsipi_channel *chan, 1864 scsipi_adapter_req_t req, void *arg) 1865 { 1866 struct scsipi_adapter *adapt = chan->chan_adapter; 1867 struct scsipi_periph *periph; 1868 struct scsipi_xfer *sc_xfer; 1869 struct ahci_softc *sc = device_private(adapt->adapt_dev); 1870 struct atac_softc *atac = &sc->sc_atac; 1871 struct ata_xfer *xfer; 1872 int channel = chan->chan_channel; 1873 int drive, s; 1874 1875 switch (req) { 1876 case ADAPTER_REQ_RUN_XFER: 1877 sc_xfer = arg; 1878 periph = sc_xfer->xs_periph; 1879 drive = periph->periph_target; 1880 if (!device_is_active(atac->atac_dev)) { 1881 sc_xfer->error = XS_DRIVER_STUFFUP; 1882 scsipi_done(sc_xfer); 1883 return; 1884 } 1885 xfer = ata_get_xfer(atac->atac_channels[channel], false); 1886 if (xfer == NULL) { 1887 sc_xfer->error = XS_RESOURCE_SHORTAGE; 1888 scsipi_done(sc_xfer); 1889 return; 1890 } 1891 1892 if (sc_xfer->xs_control & XS_CTL_POLL) 1893 xfer->c_flags |= C_POLL; 1894 xfer->c_drive = drive; 1895 xfer->c_flags |= C_ATAPI; 1896 xfer->c_databuf = sc_xfer->data; 1897 xfer->c_bcount = sc_xfer->datalen; 1898 xfer->ops = &ahci_atapi_xfer_ops; 1899 xfer->c_scsipi = sc_xfer; 1900 xfer->c_atapi.c_dscpoll = 0; 1901 s = splbio(); 1902 ata_exec_xfer(atac->atac_channels[channel], xfer); 1903 #ifdef DIAGNOSTIC 1904 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 && 1905 (sc_xfer->xs_status & XS_STS_DONE) == 0) 1906 panic("ahci_atapi_scsipi_request: polled command " 1907 "not done"); 1908 #endif 1909 splx(s); 1910 return; 1911 default: 1912 /* Not supported, nothing to do. */ 1913 ; 1914 } 1915 } 1916 1917 static int 1918 ahci_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer) 1919 { 1920 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1921 struct ahci_channel *achp = (struct ahci_channel *)chp; 1922 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 1923 struct ahci_cmd_tbl *cmd_tbl; 1924 struct ahci_cmd_header *cmd_h; 1925 1926 AHCIDEBUG_PRINT(("ahci_atapi_start CI 0x%x\n", 1927 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS); 1928 1929 ata_channel_lock_owned(chp); 1930 1931 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot]; 1932 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel, 1933 cmd_tbl), DEBUG_XFERS); 1934 1935 satafis_rhd_construct_atapi(xfer, cmd_tbl->cmdt_cfis); 1936 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive; 1937 memset(&cmd_tbl->cmdt_acmd, 0, sizeof(cmd_tbl->cmdt_acmd)); 1938 memcpy(cmd_tbl->cmdt_acmd, sc_xfer->cmd, sc_xfer->cmdlen); 1939 1940 cmd_h = &achp->ahcic_cmdh[xfer->c_slot]; 1941 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc), 1942 chp->ch_channel, cmd_h), DEBUG_XFERS); 1943 if (ahci_dma_setup(chp, xfer->c_slot, 1944 sc_xfer->datalen ? sc_xfer->data : NULL, 1945 sc_xfer->datalen, 1946 (sc_xfer->xs_control & XS_CTL_DATA_IN) ? 1947 BUS_DMA_READ : BUS_DMA_WRITE)) { 1948 sc_xfer->error = XS_DRIVER_STUFFUP; 1949 return ATASTART_ABORT; 1950 } 1951 cmd_h->cmdh_flags = htole16( 1952 ((sc_xfer->xs_control & XS_CTL_DATA_OUT) ? AHCI_CMDH_F_WR : 0) | 1953 RHD_FISLEN / 4 | AHCI_CMDH_F_A | 1954 (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT)); 1955 cmd_h->cmdh_prdbc = 0; 1956 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 1957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1958 1959 if (xfer->c_flags & C_POLL) { 1960 /* polled command, disable interrupts */ 1961 AHCI_WRITE(sc, AHCI_GHC, 1962 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE); 1963 } 1964 /* start command */ 1965 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot); 1966 1967 if ((xfer->c_flags & C_POLL) == 0) { 1968 callout_reset(&chp->c_timo_callout, mstohz(sc_xfer->timeout), 1969 ata_timeout, chp); 1970 return ATASTART_STARTED; 1971 } else 1972 return ATASTART_POLL; 1973 } 1974 1975 static int 1976 ahci_atapi_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1977 { 1978 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 1979 struct ahci_channel *achp = (struct ahci_channel *)chp; 1980 1981 /* 1982 * Polled command. 1983 */ 1984 for (int i = 0; i < ATA_DELAY / 10; i++) { 1985 if (xfer->c_scsipi->xs_status & XS_STS_DONE) 1986 break; 1987 ahci_intr_port(achp); 1988 delay(10000); 1989 } 1990 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel, 1991 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS), 1992 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)), 1993 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)), 1994 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)), 1995 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)), 1996 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)), 1997 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), 1998 DEBUG_XFERS); 1999 if ((xfer->c_scsipi->xs_status & XS_STS_DONE) == 0) { 2000 xfer->c_scsipi->error = XS_TIMEOUT; 2001 xfer->ops->c_intr(chp, xfer, 0); 2002 } 2003 /* reenable interrupts */ 2004 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE); 2005 return ATAPOLL_DONE; 2006 } 2007 2008 static void 2009 ahci_atapi_abort(struct ata_channel *chp, struct ata_xfer *xfer) 2010 { 2011 ahci_atapi_complete(chp, xfer, 0); 2012 } 2013 2014 static int 2015 ahci_atapi_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd) 2016 { 2017 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2018 struct ahci_channel *achp = (struct ahci_channel *)chp; 2019 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac; 2020 2021 AHCIDEBUG_PRINT(("ahci_atapi_complete port %d\n", chp->ch_channel), 2022 DEBUG_FUNCS); 2023 2024 if (ata_waitdrain_xfer_check(chp, xfer)) 2025 return 0; 2026 2027 if (xfer->c_flags & C_TIMEOU) { 2028 sc_xfer->error = XS_TIMEOUT; 2029 } 2030 2031 if (xfer->c_bcount > 0) { 2032 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0, 2033 achp->ahcic_datad[xfer->c_slot]->dm_mapsize, 2034 (sc_xfer->xs_control & XS_CTL_DATA_IN) ? 2035 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2036 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]); 2037 } 2038 2039 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot, 2040 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2041 sc_xfer->resid = sc_xfer->datalen; 2042 sc_xfer->resid -= le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc); 2043 AHCIDEBUG_PRINT(("ahci_atapi_complete datalen %d resid %d\n", 2044 sc_xfer->datalen, sc_xfer->resid), DEBUG_XFERS); 2045 if (AHCI_TFD_ST(tfd) & WDCS_ERR && 2046 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 || 2047 sc_xfer->resid == sc_xfer->datalen)) { 2048 sc_xfer->error = XS_SHORTSENSE; 2049 sc_xfer->sense.atapi_sense = AHCI_TFD_ERR(tfd); 2050 if ((sc_xfer->xs_periph->periph_quirks & 2051 PQUIRK_NOSENSE) == 0) { 2052 /* ask scsipi to send a REQUEST_SENSE */ 2053 sc_xfer->error = XS_BUSY; 2054 sc_xfer->status = SCSI_CHECK; 2055 } 2056 } 2057 2058 ata_deactivate_xfer(chp, xfer); 2059 2060 ata_free_xfer(chp, xfer); 2061 scsipi_done(sc_xfer); 2062 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0) 2063 atastart(chp); 2064 return 0; 2065 } 2066 2067 static void 2068 ahci_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 2069 { 2070 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2071 bool deactivate = true; 2072 2073 /* remove this command from xfer queue */ 2074 switch (reason) { 2075 case KILL_GONE_INACTIVE: 2076 deactivate = false; 2077 /* FALLTHROUGH */ 2078 case KILL_GONE: 2079 sc_xfer->error = XS_DRIVER_STUFFUP; 2080 break; 2081 case KILL_RESET: 2082 sc_xfer->error = XS_RESET; 2083 break; 2084 case KILL_REQUEUE: 2085 sc_xfer->error = XS_REQUEUE; 2086 break; 2087 default: 2088 printf("ahci_ata_atapi_kill_xfer: unknown reason %d\n", reason); 2089 panic("ahci_ata_atapi_kill_xfer"); 2090 } 2091 2092 if (deactivate) 2093 ata_deactivate_xfer(chp, xfer); 2094 2095 ata_free_xfer(chp, xfer); 2096 scsipi_done(sc_xfer); 2097 } 2098 2099 static void 2100 ahci_atapi_probe_device(struct atapibus_softc *sc, int target) 2101 { 2102 struct scsipi_channel *chan = sc->sc_channel; 2103 struct scsipi_periph *periph; 2104 struct ataparams ids; 2105 struct ataparams *id = &ids; 2106 struct ahci_softc *ahcic = 2107 device_private(chan->chan_adapter->adapt_dev); 2108 struct atac_softc *atac = &ahcic->sc_atac; 2109 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 2110 struct ata_drive_datas *drvp = &chp->ch_drive[target]; 2111 struct scsipibus_attach_args sa; 2112 char serial_number[21], model[41], firmware_revision[9]; 2113 int s; 2114 2115 /* skip if already attached */ 2116 if (scsipi_lookup_periph(chan, target, 0) != NULL) 2117 return; 2118 2119 /* if no ATAPI device detected at attach time, skip */ 2120 if (drvp->drive_type != ATA_DRIVET_ATAPI) { 2121 AHCIDEBUG_PRINT(("ahci_atapi_probe_device: drive %d " 2122 "not present\n", target), DEBUG_PROBE); 2123 return; 2124 } 2125 2126 /* Some ATAPI devices need a bit more time after software reset. */ 2127 delay(5000); 2128 if (ata_get_params(drvp, AT_WAIT, id) == 0) { 2129 #ifdef ATAPI_DEBUG_PROBE 2130 printf("%s drive %d: cmdsz 0x%x drqtype 0x%x\n", 2131 AHCINAME(ahcic), target, 2132 id->atap_config & ATAPI_CFG_CMD_MASK, 2133 id->atap_config & ATAPI_CFG_DRQ_MASK); 2134 #endif 2135 periph = scsipi_alloc_periph(M_NOWAIT); 2136 if (periph == NULL) { 2137 aprint_error_dev(sc->sc_dev, 2138 "unable to allocate periph for drive %d\n", 2139 target); 2140 return; 2141 } 2142 periph->periph_dev = NULL; 2143 periph->periph_channel = chan; 2144 periph->periph_switch = &atapi_probe_periphsw; 2145 periph->periph_target = target; 2146 periph->periph_lun = 0; 2147 periph->periph_quirks = PQUIRK_ONLYBIG; 2148 2149 #ifdef SCSIPI_DEBUG 2150 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI && 2151 SCSIPI_DEBUG_TARGET == target) 2152 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS; 2153 #endif 2154 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config); 2155 if (id->atap_config & ATAPI_CFG_REMOV) 2156 periph->periph_flags |= PERIPH_REMOVABLE; 2157 if (periph->periph_type == T_SEQUENTIAL) { 2158 s = splbio(); 2159 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW; 2160 splx(s); 2161 } 2162 2163 sa.sa_periph = periph; 2164 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config); 2165 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ? 2166 T_REMOV : T_FIXED; 2167 strnvisx(model, sizeof(model), id->atap_model, 40, 2168 VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2169 strnvisx(serial_number, sizeof(serial_number), id->atap_serial, 2170 20, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2171 strnvisx(firmware_revision, sizeof(firmware_revision), 2172 id->atap_revision, 8, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 2173 sa.sa_inqbuf.vendor = model; 2174 sa.sa_inqbuf.product = serial_number; 2175 sa.sa_inqbuf.revision = firmware_revision; 2176 2177 /* 2178 * Determine the operating mode capabilities of the device. 2179 */ 2180 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16) 2181 periph->periph_cap |= PERIPH_CAP_CMD16; 2182 /* XXX This is gross. */ 2183 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK); 2184 2185 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa); 2186 2187 if (drvp->drv_softc) 2188 ata_probe_caps(drvp); 2189 else { 2190 s = splbio(); 2191 drvp->drive_type = ATA_DRIVET_NONE; 2192 splx(s); 2193 } 2194 } else { 2195 AHCIDEBUG_PRINT(("ahci_atapi_get_params: ATAPI_IDENTIFY_DEVICE " 2196 "failed for drive %s:%d:%d\n", 2197 AHCINAME(ahcic), chp->ch_channel, target), DEBUG_PROBE); 2198 s = splbio(); 2199 drvp->drive_type = ATA_DRIVET_NONE; 2200 splx(s); 2201 } 2202 } 2203 #endif /* NATAPIBUS */ 2204