1 /* $NetBSD: mvsata.c,v 1.54 2020/03/22 16:46:30 macallan Exp $ */ 2 /* 3 * Copyright (c) 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: mvsata.c,v 1.54 2020/03/22 16:46:30 macallan Exp $"); 30 31 #include "opt_mvsata.h" 32 33 #include <sys/param.h> 34 #include <sys/buf.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/device.h> 38 #include <sys/disklabel.h> 39 #include <sys/errno.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 44 #include <machine/vmparam.h> 45 46 #include <dev/ata/atareg.h> 47 #include <dev/ata/atavar.h> 48 #include <dev/ic/wdcvar.h> 49 #include <dev/ata/satafisvar.h> 50 #include <dev/ata/satafisreg.h> 51 #include <dev/ata/satapmpreg.h> 52 #include <dev/ata/satareg.h> 53 #include <dev/ata/satavar.h> 54 55 #include <dev/scsipi/scsi_all.h> /* for SCSI status */ 56 57 #include "atapibus.h" 58 59 #include <dev/pci/pcidevs.h> /* XXX should not be here */ 60 61 /* 62 * Nice things to do: 63 * 64 * - MSI/MSI-X support - though on some models MSI actually doesn't work 65 * even when hardware claims to support it, according to FreeBSD/OpenBSD 66 * - move pci-specific code to the pci attach code 67 * - mvsata(4) use 64-bit DMA on hardware which claims to support it 68 * - e.g. AHA1430SA does not really work, crash in mvsata_intr() on boot 69 */ 70 71 #include <dev/ic/mvsatareg.h> 72 #include <dev/ic/mvsatavar.h> 73 74 #define MVSATA_DEV(sc) ((sc)->sc_wdcdev.sc_atac.atac_dev) 75 #define MVSATA_DEV2(mvport) ((mvport)->port_ata_channel.ch_atac->atac_dev) 76 77 #define MVSATA_HC_READ_4(hc, reg) \ 78 bus_space_read_4((hc)->hc_iot, (hc)->hc_ioh, (reg)) 79 #define MVSATA_HC_WRITE_4(hc, reg, val) \ 80 bus_space_write_4((hc)->hc_iot, (hc)->hc_ioh, (reg), (val)) 81 #define MVSATA_EDMA_READ_4(mvport, reg) \ 82 bus_space_read_4((mvport)->port_iot, (mvport)->port_ioh, (reg)) 83 #define MVSATA_EDMA_WRITE_4(mvport, reg, val) \ 84 bus_space_write_4((mvport)->port_iot, (mvport)->port_ioh, (reg), (val)) 85 #define MVSATA_WDC_READ_2(mvport, reg) \ 86 bus_space_read_2((mvport)->port_iot, (mvport)->port_ioh, \ 87 SHADOW_REG_BLOCK_OFFSET + (reg)) 88 #define MVSATA_WDC_READ_1(mvport, reg) \ 89 bus_space_read_1((mvport)->port_iot, (mvport)->port_ioh, \ 90 SHADOW_REG_BLOCK_OFFSET + (reg)) 91 #define MVSATA_WDC_WRITE_2(mvport, reg, val) \ 92 bus_space_write_2((mvport)->port_iot, (mvport)->port_ioh, \ 93 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 94 #define MVSATA_WDC_WRITE_1(mvport, reg, val) \ 95 bus_space_write_1((mvport)->port_iot, (mvport)->port_ioh, \ 96 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 97 98 #ifdef MVSATA_DEBUG 99 100 #define DEBUG_INTR 0x01 101 #define DEBUG_XFERS 0x02 102 #define DEBUG_FUNCS 0x08 103 #define DEBUG_PROBE 0x10 104 105 #define DPRINTF(n,x) if (mvsata_debug & (n)) printf x 106 int mvsata_debug = 0; 107 #else 108 #define DPRINTF(n,x) 109 #endif 110 111 #define ATA_DELAY 10000 /* 10s for a drive I/O */ 112 #define ATAPI_DELAY 10 /* 10 ms, this is used only before 113 sending a cmd */ 114 #define ATAPI_MODE_DELAY 1000 /* 1s, timeout for SET_FEATURE cmds */ 115 116 #define MVSATA_MAX_SEGS (MAXPHYS / PAGE_SIZE + 1) 117 #define MVSATA_EPRD_MAX_SIZE (sizeof(struct eprd) * MVSATA_MAX_SEGS) 118 119 120 static void mvsata_probe_drive(struct ata_channel *); 121 122 #ifndef MVSATA_WITHOUTDMA 123 static void mvsata_reset_channel(struct ata_channel *, int); 124 static int mvsata_bio(struct ata_drive_datas *, struct ata_xfer *); 125 static void mvsata_reset_drive(struct ata_drive_datas *, int, uint32_t *); 126 static int mvsata_exec_command(struct ata_drive_datas *, struct ata_xfer *); 127 static int mvsata_addref(struct ata_drive_datas *); 128 static void mvsata_delref(struct ata_drive_datas *); 129 static void mvsata_killpending(struct ata_drive_datas *); 130 131 #if NATAPIBUS > 0 132 static void mvsata_atapibus_attach(struct atabus_softc *); 133 static void mvsata_atapi_scsipi_request(struct scsipi_channel *, 134 scsipi_adapter_req_t, void *); 135 static void mvsata_atapi_minphys(struct buf *); 136 static void mvsata_atapi_probe_device(struct atapibus_softc *, int); 137 static void mvsata_atapi_kill_pending(struct scsipi_periph *); 138 #endif 139 #endif 140 141 static void mvsata_setup_channel(struct ata_channel *); 142 143 #ifndef MVSATA_WITHOUTDMA 144 static int mvsata_bio_start(struct ata_channel *, struct ata_xfer *); 145 static int mvsata_bio_intr(struct ata_channel *, struct ata_xfer *, int); 146 static void mvsata_bio_poll(struct ata_channel *, struct ata_xfer *); 147 static void mvsata_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 148 static void mvsata_bio_done(struct ata_channel *, struct ata_xfer *); 149 static int mvsata_bio_ready(struct mvsata_port *, struct ata_bio *, int, 150 int); 151 static int mvsata_wdc_cmd_start(struct ata_channel *, struct ata_xfer *); 152 static int mvsata_wdc_cmd_intr(struct ata_channel *, struct ata_xfer *, int); 153 static void mvsata_wdc_cmd_poll(struct ata_channel *, struct ata_xfer *); 154 static void mvsata_wdc_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, 155 int); 156 static void mvsata_wdc_cmd_done(struct ata_channel *, struct ata_xfer *); 157 static void mvsata_wdc_cmd_done_end(struct ata_channel *, struct ata_xfer *); 158 #if NATAPIBUS > 0 159 static int mvsata_atapi_start(struct ata_channel *, struct ata_xfer *); 160 static int mvsata_atapi_intr(struct ata_channel *, struct ata_xfer *, int); 161 static void mvsata_atapi_poll(struct ata_channel *, struct ata_xfer *); 162 static void mvsata_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, 163 int); 164 static void mvsata_atapi_reset(struct ata_channel *, struct ata_xfer *); 165 static void mvsata_atapi_phase_complete(struct ata_xfer *, int); 166 static void mvsata_atapi_done(struct ata_channel *, struct ata_xfer *); 167 static void mvsata_atapi_polldsc(void *); 168 #endif 169 170 static int mvsata_edma_enqueue(struct mvsata_port *, struct ata_xfer *); 171 static int mvsata_edma_handle(struct mvsata_port *, struct ata_xfer *); 172 static int mvsata_edma_wait(struct mvsata_port *, struct ata_xfer *, int); 173 static void mvsata_edma_rqq_remove(struct mvsata_port *, struct ata_xfer *); 174 #if NATAPIBUS > 0 175 static int mvsata_bdma_init(struct mvsata_port *, struct ata_xfer *); 176 static void mvsata_bdma_start(struct mvsata_port *); 177 #endif 178 #endif 179 180 static int mvsata_nondma_handle(struct mvsata_port *); 181 182 static int mvsata_port_init(struct mvsata_hc *, int); 183 static int mvsata_wdc_reg_init(struct mvsata_port *, struct wdc_regs *); 184 #ifndef MVSATA_WITHOUTDMA 185 static void mvsata_channel_recover(struct ata_channel *, int, uint32_t); 186 static void *mvsata_edma_resource_prepare(struct mvsata_port *, bus_dma_tag_t, 187 bus_dmamap_t *, size_t, int); 188 static void mvsata_edma_resource_purge(struct mvsata_port *, bus_dma_tag_t, 189 bus_dmamap_t, void *); 190 static int mvsata_dma_bufload(struct mvsata_port *, int, void *, size_t, int); 191 static inline void mvsata_dma_bufunload(struct mvsata_port *, int, int); 192 #endif 193 194 static void mvsata_hreset_port(struct mvsata_port *); 195 static void mvsata_reset_port(struct mvsata_port *); 196 static void mvsata_reset_hc(struct mvsata_hc *); 197 static uint32_t mvsata_softreset(struct mvsata_port *, int); 198 #ifndef MVSATA_WITHOUTDMA 199 static void mvsata_edma_reset_qptr(struct mvsata_port *); 200 static inline void mvsata_edma_enable(struct mvsata_port *); 201 static void mvsata_edma_disable(struct mvsata_port *, int, int); 202 static void mvsata_edma_config(struct mvsata_port *, enum mvsata_edmamode); 203 204 static void mvsata_edma_setup_crqb(struct mvsata_port *, int, 205 struct ata_xfer *); 206 #endif 207 static uint32_t mvsata_read_preamps_gen1(struct mvsata_port *); 208 static void mvsata_fix_phy_gen1(struct mvsata_port *); 209 static void mvsata_devconn_gen1(struct mvsata_port *); 210 211 static uint32_t mvsata_read_preamps_gen2(struct mvsata_port *); 212 static void mvsata_fix_phy_gen2(struct mvsata_port *); 213 #ifndef MVSATA_WITHOUTDMA 214 static void mvsata_edma_setup_crqb_gen2e(struct mvsata_port *, int, 215 struct ata_xfer *); 216 217 #ifdef MVSATA_DEBUG 218 static void mvsata_print_crqb(struct mvsata_port *, int); 219 static void mvsata_print_crpb(struct mvsata_port *, int); 220 static void mvsata_print_eprd(struct mvsata_port *, int); 221 #endif 222 223 static const struct ata_bustype mvsata_ata_bustype = { 224 SCSIPI_BUSTYPE_ATA, 225 mvsata_bio, 226 mvsata_reset_drive, 227 mvsata_reset_channel, 228 mvsata_exec_command, 229 ata_get_params, 230 mvsata_addref, 231 mvsata_delref, 232 mvsata_killpending, 233 mvsata_channel_recover, 234 }; 235 236 #if NATAPIBUS > 0 237 static const struct scsipi_bustype mvsata_atapi_bustype = { 238 .bustype_type = SCSIPI_BUSTYPE_ATAPI, 239 .bustype_cmd = atapi_scsipi_cmd, 240 .bustype_interpret_sense = atapi_interpret_sense, 241 .bustype_printaddr = atapi_print_addr, 242 .bustype_kill_pending = mvsata_atapi_kill_pending, 243 .bustype_async_event_xfer_mode = NULL, 244 }; 245 #endif /* NATAPIBUS */ 246 #endif 247 248 static void 249 mvsata_pmp_select(struct mvsata_port *mvport, int pmpport) 250 { 251 uint32_t ifctl; 252 253 KASSERT(pmpport < PMP_MAX_DRIVES); 254 #if defined(DIAGNOSTIC) || defined(MVSATA_DEBUG) 255 if ((MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) != 0) { 256 panic("EDMA enabled"); 257 } 258 #endif 259 260 ifctl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICTL); 261 ifctl &= ~0xf; 262 ifctl |= pmpport; 263 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICTL, ifctl); 264 } 265 266 int 267 mvsata_attach(struct mvsata_softc *sc, const struct mvsata_product *product, 268 int (*mvsata_sreset)(struct mvsata_softc *), 269 int (*mvsata_misc_reset)(struct mvsata_softc *), 270 int read_pre_amps) 271 { 272 struct mvsata_hc *mvhc; 273 struct mvsata_port *mvport; 274 uint32_t (*read_preamps)(struct mvsata_port *) = NULL; 275 void (*_fix_phy)(struct mvsata_port *) = NULL; 276 #ifndef MVSATA_WITHOUTDMA 277 void (*edma_setup_crqb) 278 (struct mvsata_port *, int, struct ata_xfer *) = NULL; 279 #endif 280 int hc, port, channel; 281 282 aprint_normal_dev(MVSATA_DEV(sc), "Gen%s, %dhc, %dport/hc\n", 283 (product->generation == gen1) ? "I" : 284 ((product->generation == gen2) ? "II" : "IIe"), 285 product->hc, product->port); 286 287 288 switch (product->generation) { 289 case gen1: 290 mvsata_sreset = NULL; 291 read_pre_amps = 1; /* MUST */ 292 read_preamps = mvsata_read_preamps_gen1; 293 _fix_phy = mvsata_fix_phy_gen1; 294 #ifndef MVSATA_WITHOUTDMA 295 edma_setup_crqb = mvsata_edma_setup_crqb; 296 #endif 297 break; 298 299 case gen2: 300 read_preamps = mvsata_read_preamps_gen2; 301 _fix_phy = mvsata_fix_phy_gen2; 302 #ifndef MVSATA_WITHOUTDMA 303 edma_setup_crqb = mvsata_edma_setup_crqb; 304 #endif 305 break; 306 307 case gen2e: 308 read_preamps = mvsata_read_preamps_gen2; 309 _fix_phy = mvsata_fix_phy_gen2; 310 #ifndef MVSATA_WITHOUTDMA 311 edma_setup_crqb = mvsata_edma_setup_crqb_gen2e; 312 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_NCQ; 313 #endif 314 break; 315 } 316 317 sc->sc_gen = product->generation; 318 sc->sc_hc = product->hc; 319 sc->sc_port = product->port; 320 sc->sc_flags = product->flags; 321 322 #ifdef MVSATA_WITHOUTDMA 323 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 324 #else 325 sc->sc_edma_setup_crqb = edma_setup_crqb; 326 sc->sc_wdcdev.sc_atac.atac_cap |= 327 (ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA); 328 #endif 329 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 330 #ifdef MVSATA_WITHOUTDMA 331 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 332 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; 333 #else 334 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 335 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 336 #endif 337 sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_ata_channels; 338 sc->sc_wdcdev.sc_atac.atac_nchannels = sc->sc_hc * sc->sc_port; 339 #ifndef MVSATA_WITHOUTDMA 340 sc->sc_wdcdev.sc_atac.atac_bustype_ata = &mvsata_ata_bustype; 341 #if NATAPIBUS > 0 342 sc->sc_wdcdev.sc_atac.atac_atapibus_attach = mvsata_atapibus_attach; 343 #endif 344 #endif 345 sc->sc_wdcdev.wdc_maxdrives = 1; /* SATA is always 1 drive */ 346 sc->sc_wdcdev.sc_atac.atac_probe = mvsata_probe_drive; 347 sc->sc_wdcdev.sc_atac.atac_set_modes = mvsata_setup_channel; 348 349 sc->sc_wdc_regs = 350 malloc(sizeof(struct wdc_regs) * product->hc * product->port, 351 M_DEVBUF, M_WAITOK); 352 sc->sc_wdcdev.regs = sc->sc_wdc_regs; 353 354 for (hc = 0; hc < sc->sc_hc; hc++) { 355 mvhc = &sc->sc_hcs[hc]; 356 mvhc->hc = hc; 357 mvhc->hc_sc = sc; 358 mvhc->hc_iot = sc->sc_iot; 359 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 360 hc * SATAHC_REGISTER_SIZE, SATAHC_REGISTER_SIZE, 361 &mvhc->hc_ioh)) { 362 aprint_error_dev(MVSATA_DEV(sc), 363 "can't subregion SATAHC %d registers\n", hc); 364 continue; 365 } 366 367 for (port = 0; port < sc->sc_port; port++) 368 if (mvsata_port_init(mvhc, port) == 0) { 369 int pre_amps; 370 371 mvport = mvhc->hc_ports[port]; 372 pre_amps = read_pre_amps ? 373 read_preamps(mvport) : 0x00000720; 374 mvport->_fix_phy_param.pre_amps = pre_amps; 375 mvport->_fix_phy_param._fix_phy = _fix_phy; 376 377 if (!mvsata_sreset) 378 mvsata_reset_port(mvport); 379 } 380 381 if (!mvsata_sreset) 382 mvsata_reset_hc(mvhc); 383 } 384 if (mvsata_sreset) 385 mvsata_sreset(sc); 386 387 if (mvsata_misc_reset) 388 mvsata_misc_reset(sc); 389 390 for (hc = 0; hc < sc->sc_hc; hc++) 391 for (port = 0; port < sc->sc_port; port++) { 392 mvport = sc->sc_hcs[hc].hc_ports[port]; 393 if (mvport == NULL) 394 continue; 395 if (mvsata_sreset) 396 mvport->_fix_phy_param._fix_phy(mvport); 397 } 398 for (channel = 0; channel < sc->sc_hc * sc->sc_port; channel++) 399 wdcattach(sc->sc_ata_channels[channel]); 400 401 return 0; 402 } 403 404 int 405 mvsata_intr(struct mvsata_hc *mvhc) 406 { 407 struct mvsata_softc *sc = mvhc->hc_sc; 408 struct mvsata_port *mvport; 409 uint32_t cause; 410 int port, handled = 0; 411 412 cause = MVSATA_HC_READ_4(mvhc, SATAHC_IC); 413 414 DPRINTF(DEBUG_INTR, ("%s:%d: mvsata_intr: cause=0x%08x\n", 415 device_xname(MVSATA_DEV(sc)), mvhc->hc, cause)); 416 417 if (cause & SATAHC_IC_SAINTCOAL) 418 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, ~SATAHC_IC_SAINTCOAL); 419 cause &= ~SATAHC_IC_SAINTCOAL; 420 421 for (port = 0; port < sc->sc_port; port++) { 422 mvport = mvhc->hc_ports[port]; 423 424 if (cause & SATAHC_IC_DONE(port)) { 425 #ifndef MVSATA_WITHOUTDMA 426 handled = mvsata_edma_handle(mvport, NULL); 427 #endif 428 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 429 ~SATAHC_IC_DONE(port)); 430 } 431 432 if (cause & SATAHC_IC_SADEVINTERRUPT(port)) { 433 (void) mvsata_nondma_handle(mvport); 434 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 435 ~SATAHC_IC_SADEVINTERRUPT(port)); 436 handled = 1; 437 } 438 } 439 440 return handled; 441 } 442 443 static int 444 mvsata_nondma_handle(struct mvsata_port *mvport) 445 { 446 struct ata_channel *chp = &mvport->port_ata_channel; 447 struct ata_xfer *xfer; 448 int ret; 449 450 /* 451 * The chip doesn't support several pending non-DMA commands, 452 * and the ata middle layer never issues several non-NCQ commands, 453 * so there must be exactly one active command at this moment. 454 */ 455 xfer = ata_queue_get_active_xfer(chp); 456 if (xfer == NULL) { 457 /* Can happen after error recovery, ignore */ 458 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 459 ("%s:%d: %s: intr without xfer\n", 460 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, 461 __func__)); 462 return 0; 463 } 464 465 ret = xfer->ops->c_intr(chp, xfer, 1); 466 return (ret); 467 } 468 469 int 470 mvsata_error(struct mvsata_port *mvport) 471 { 472 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 473 uint32_t cause; 474 475 cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC); 476 /* 477 * We must ack SATA_SE and SATA_FISIC before acking coresponding bits 478 * in EDMA_IEC. 479 */ 480 if (cause & EDMA_IE_SERRINT) { 481 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, 482 MVSATA_EDMA_READ_4(mvport, SATA_SEIM)); 483 } 484 if (cause & EDMA_IE_ETRANSINT) { 485 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 486 ~MVSATA_EDMA_READ_4(mvport, SATA_FISIM)); 487 } 488 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause); 489 490 DPRINTF(DEBUG_INTR, ("%s:%d:%d:" 491 " mvsata_error: cause=0x%08x, mask=0x%08x, status=0x%08x\n", 492 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 493 mvport->port, cause, MVSATA_EDMA_READ_4(mvport, EDMA_IEM), 494 MVSATA_EDMA_READ_4(mvport, EDMA_S))); 495 496 cause &= MVSATA_EDMA_READ_4(mvport, EDMA_IEM); 497 if (!cause) 498 return 0; 499 500 if (cause & EDMA_IE_EDEVDIS) { 501 aprint_normal("%s:%d:%d: device disconnect\n", 502 device_xname(MVSATA_DEV2(mvport)), 503 mvport->port_hc->hc, mvport->port); 504 } 505 if (cause & EDMA_IE_EDEVCON) { 506 if (sc->sc_gen == gen1) 507 mvsata_devconn_gen1(mvport); 508 509 DPRINTF(DEBUG_INTR, (" device connected\n")); 510 } 511 512 #ifndef MVSATA_WITHOUTDMA 513 if ((sc->sc_gen == gen1 && cause & EDMA_IE_ETRANSINT) || 514 (sc->sc_gen != gen1 && cause & EDMA_IE_ESELFDIS)) { 515 switch (mvport->port_edmamode_curr) { 516 case dma: 517 case queued: 518 case ncq: 519 mvsata_edma_reset_qptr(mvport); 520 mvsata_edma_enable(mvport); 521 if (cause & EDMA_IE_EDEVERR) 522 break; 523 524 /* FALLTHROUGH */ 525 526 case nodma: 527 default: 528 DPRINTF(DEBUG_INTR, 529 ("%s:%d:%d: EDMA self disable happen 0x%x\n", 530 device_xname(MVSATA_DEV2(mvport)), 531 mvport->port_hc->hc, mvport->port, cause)); 532 break; 533 } 534 } 535 #endif 536 if (cause & EDMA_IE_ETRANSINT) { 537 /* hot plug the Port Multiplier */ 538 aprint_normal("%s:%d:%d: detect Port Multiplier?\n", 539 device_xname(MVSATA_DEV2(mvport)), 540 mvport->port_hc->hc, mvport->port); 541 } 542 if (cause & EDMA_IE_EDEVERR) { 543 struct ata_channel *chp = &mvport->port_ata_channel; 544 545 aprint_error("%s:%d:%d: device error, recovering\n", 546 device_xname(MVSATA_DEV2(mvport)), 547 mvport->port_hc->hc, mvport->port); 548 549 ata_channel_lock(chp); 550 ata_thread_run(chp, 0, ATACH_TH_RECOVERY, 551 ATACH_ERR_ST(0, WDCS_ERR)); 552 ata_channel_unlock(chp); 553 } 554 555 return 1; 556 } 557 558 #ifndef MVSATA_WITHOUTDMA 559 static void 560 mvsata_channel_recover(struct ata_channel *chp, int flags, uint32_t tfd) 561 { 562 struct mvsata_port * const mvport = (struct mvsata_port *)chp; 563 int drive; 564 565 ata_channel_lock_owned(chp); 566 567 if (chp->ch_ndrives > PMP_PORT_CTL) { 568 /* Get PM port number for the device in error. This device 569 * doesn't seem to have dedicated register for this, so just 570 * assume last selected port was the one. */ 571 /* XXX FIS-based switching */ 572 drive = MVSATA_EDMA_READ_4(mvport, SATA_SATAICTL) & 0xf; 573 } else 574 drive = 0; 575 576 /* 577 * Controller doesn't need any special action. Simply execute 578 * READ LOG EXT for NCQ to unblock device processing, then continue 579 * as if nothing happened. 580 */ 581 582 ata_recovery_resume(chp, drive, tfd, AT_POLL); 583 584 /* Drive unblocked, back to normal operation */ 585 return; 586 } 587 #endif /* !MVSATA_WITHOUTDMA */ 588 589 /* 590 * ATA callback entry points 591 */ 592 593 static void 594 mvsata_probe_drive(struct ata_channel *chp) 595 { 596 struct mvsata_port * const mvport = (struct mvsata_port *)chp; 597 uint32_t sstat, sig; 598 599 ata_channel_lock(chp); 600 601 sstat = sata_reset_interface(chp, mvport->port_iot, 602 mvport->port_sata_scontrol, mvport->port_sata_sstatus, AT_WAIT); 603 switch (sstat) { 604 case SStatus_DET_DEV: 605 mvsata_pmp_select(mvport, PMP_PORT_CTL); 606 sig = mvsata_softreset(mvport, AT_WAIT); 607 sata_interpret_sig(chp, 0, sig); 608 break; 609 default: 610 break; 611 } 612 613 ata_channel_unlock(chp); 614 } 615 616 #ifndef MVSATA_WITHOUTDMA 617 static void 618 mvsata_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp) 619 { 620 struct ata_channel *chp = drvp->chnl_softc; 621 struct mvsata_port *mvport = (struct mvsata_port *)chp; 622 uint32_t edma_c; 623 uint32_t sig; 624 625 ata_channel_lock_owned(chp); 626 627 edma_c = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 628 629 DPRINTF(DEBUG_FUNCS, 630 ("%s:%d: mvsata_reset_drive: drive=%d (EDMA %sactive)\n", 631 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drvp->drive, 632 (edma_c & EDMA_CMD_EENEDMA) ? "" : "not ")); 633 634 if (edma_c & EDMA_CMD_EENEDMA) 635 mvsata_edma_disable(mvport, 10000, flags); 636 637 mvsata_pmp_select(mvport, drvp->drive); 638 639 sig = mvsata_softreset(mvport, flags); 640 641 if (sigp) 642 *sigp = sig; 643 644 if (edma_c & EDMA_CMD_EENEDMA) { 645 mvsata_edma_reset_qptr(mvport); 646 mvsata_edma_enable(mvport); 647 } 648 } 649 650 static void 651 mvsata_reset_channel(struct ata_channel *chp, int flags) 652 { 653 struct mvsata_port *mvport = (struct mvsata_port *)chp; 654 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 655 uint32_t sstat, ctrl; 656 657 DPRINTF(DEBUG_FUNCS, ("%s: mvsata_reset_channel: channel=%d\n", 658 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 659 660 ata_channel_lock_owned(chp); 661 662 mvsata_hreset_port(mvport); 663 sstat = sata_reset_interface(chp, mvport->port_iot, 664 mvport->port_sata_scontrol, mvport->port_sata_sstatus, flags); 665 666 if (flags & AT_WAIT && sstat == SStatus_DET_DEV_NE && 667 sc->sc_gen != gen1) { 668 /* Downgrade to GenI */ 669 const uint32_t val = SControl_IPM_NONE | SControl_SPD_ANY | 670 SControl_DET_DISABLE; 671 672 bus_space_write_4(mvport->port_iot, 673 mvport->port_sata_scontrol, 0, val); 674 675 ctrl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICFG); 676 ctrl &= ~(1 << 17); /* Disable GenII */ 677 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICFG, ctrl); 678 679 mvsata_hreset_port(mvport); 680 sata_reset_interface(chp, mvport->port_iot, 681 mvport->port_sata_scontrol, mvport->port_sata_sstatus, 682 flags); 683 } 684 685 ata_kill_active(chp, KILL_RESET, flags); 686 687 mvsata_edma_config(mvport, mvport->port_edmamode_curr); 688 mvsata_edma_reset_qptr(mvport); 689 mvsata_edma_enable(mvport); 690 } 691 692 static int 693 mvsata_addref(struct ata_drive_datas *drvp) 694 { 695 696 return 0; 697 } 698 699 static void 700 mvsata_delref(struct ata_drive_datas *drvp) 701 { 702 703 return; 704 } 705 706 static void 707 mvsata_killpending(struct ata_drive_datas *drvp) 708 { 709 710 return; 711 } 712 713 #if NATAPIBUS > 0 714 static void 715 mvsata_atapibus_attach(struct atabus_softc *ata_sc) 716 { 717 struct ata_channel *chp = ata_sc->sc_chan; 718 struct atac_softc *atac = chp->ch_atac; 719 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 720 struct scsipi_channel *chan = &chp->ch_atapi_channel; 721 722 /* 723 * Fill in the scsipi_adapter. 724 */ 725 adapt->adapt_dev = atac->atac_dev; 726 adapt->adapt_nchannels = atac->atac_nchannels; 727 adapt->adapt_request = mvsata_atapi_scsipi_request; 728 adapt->adapt_minphys = mvsata_atapi_minphys; 729 atac->atac_atapi_adapter.atapi_probe_device = mvsata_atapi_probe_device; 730 731 /* 732 * Fill in the scsipi_channel. 733 */ 734 memset(chan, 0, sizeof(*chan)); 735 chan->chan_adapter = adapt; 736 chan->chan_bustype = &mvsata_atapi_bustype; 737 chan->chan_channel = chp->ch_channel; 738 chan->chan_flags = SCSIPI_CHAN_OPENINGS; 739 chan->chan_openings = 1; 740 chan->chan_max_periph = 1; 741 chan->chan_ntargets = 1; 742 chan->chan_nluns = 1; 743 744 chp->atapibus = 745 config_found_ia(ata_sc->sc_dev, "atapi", chan, atapiprint); 746 } 747 748 static void 749 mvsata_atapi_minphys(struct buf *bp) 750 { 751 752 if (bp->b_bcount > MAXPHYS) 753 bp->b_bcount = MAXPHYS; 754 minphys(bp); 755 } 756 757 static void 758 mvsata_atapi_probe_device(struct atapibus_softc *sc, int target) 759 { 760 struct scsipi_channel *chan = sc->sc_channel; 761 struct scsipi_periph *periph; 762 struct ataparams ids; 763 struct ataparams *id = &ids; 764 struct mvsata_softc *mvc = 765 device_private(chan->chan_adapter->adapt_dev); 766 struct atac_softc *atac = &mvc->sc_wdcdev.sc_atac; 767 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 768 struct ata_drive_datas *drvp = &chp->ch_drive[target]; 769 struct scsipibus_attach_args sa; 770 char serial_number[21], model[41], firmware_revision[9]; 771 int s; 772 773 /* skip if already attached */ 774 if (scsipi_lookup_periph(chan, target, 0) != NULL) 775 return; 776 777 /* if no ATAPI device detected at attach time, skip */ 778 if (drvp->drive_type != ATA_DRIVET_ATAPI) { 779 DPRINTF(DEBUG_PROBE, ("%s:%d: mvsata_atapi_probe_device:" 780 " drive %d not present\n", 781 device_xname(atac->atac_dev), chp->ch_channel, target)); 782 return; 783 } 784 785 /* Some ATAPI devices need a bit more time after software reset. */ 786 delay(5000); 787 if (ata_get_params(drvp, AT_WAIT, id) == 0) { 788 #ifdef ATAPI_DEBUG_PROBE 789 printf("%s drive %d: cmdsz 0x%x drqtype 0x%x\n", 790 device_xname(sc->sc_dev), target, 791 id->atap_config & ATAPI_CFG_CMD_MASK, 792 id->atap_config & ATAPI_CFG_DRQ_MASK); 793 #endif 794 periph = scsipi_alloc_periph(M_WAITOK); 795 periph->periph_dev = NULL; 796 periph->periph_channel = chan; 797 periph->periph_switch = &atapi_probe_periphsw; 798 periph->periph_target = target; 799 periph->periph_lun = 0; 800 periph->periph_quirks = PQUIRK_ONLYBIG; 801 802 #ifdef SCSIPI_DEBUG 803 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI && 804 SCSIPI_DEBUG_TARGET == target) 805 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS; 806 #endif 807 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config); 808 if (id->atap_config & ATAPI_CFG_REMOV) 809 periph->periph_flags |= PERIPH_REMOVABLE; 810 if (periph->periph_type == T_SEQUENTIAL) { 811 s = splbio(); 812 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW; 813 splx(s); 814 } 815 816 sa.sa_periph = periph; 817 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config); 818 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ? 819 T_REMOV : T_FIXED; 820 strnvisx(model, sizeof(model), id->atap_model, 40, 821 VIS_TRIM|VIS_SAFE|VIS_OCTAL); 822 strnvisx(serial_number, sizeof(serial_number), id->atap_serial, 823 20, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 824 strnvisx(firmware_revision, sizeof(firmware_revision), 825 id->atap_revision, 8, VIS_TRIM|VIS_SAFE|VIS_OCTAL); 826 sa.sa_inqbuf.vendor = model; 827 sa.sa_inqbuf.product = serial_number; 828 sa.sa_inqbuf.revision = firmware_revision; 829 830 /* 831 * Determine the operating mode capabilities of the device. 832 */ 833 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16) 834 periph->periph_cap |= PERIPH_CAP_CMD16; 835 /* XXX This is gross. */ 836 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK); 837 838 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa); 839 840 if (drvp->drv_softc) 841 ata_probe_caps(drvp); 842 else { 843 s = splbio(); 844 drvp->drive_type = ATA_DRIVET_NONE; 845 splx(s); 846 } 847 } else { 848 DPRINTF(DEBUG_PROBE, ("%s:%d: mvsata_atapi_probe_device:" 849 " ATAPI_IDENTIFY_DEVICE failed for drive %d: error\n", 850 device_xname(atac->atac_dev), chp->ch_channel, target)); 851 s = splbio(); 852 drvp->drive_type = ATA_DRIVET_NONE; 853 splx(s); 854 } 855 } 856 857 /* 858 * Kill off all pending xfers for a periph. 859 * 860 * Must be called at splbio(). 861 */ 862 static void 863 mvsata_atapi_kill_pending(struct scsipi_periph *periph) 864 { 865 struct atac_softc *atac = 866 device_private(periph->periph_channel->chan_adapter->adapt_dev); 867 struct ata_channel *chp = 868 atac->atac_channels[periph->periph_channel->chan_channel]; 869 870 ata_kill_pending(&chp->ch_drive[periph->periph_target]); 871 } 872 #endif /* NATAPIBUS > 0 */ 873 #endif /* MVSATA_WITHOUTDMA */ 874 875 876 /* 877 * mvsata_setup_channel() 878 * Setup EDMA registers and prepare/purge DMA resources. 879 * We assuming already stopped the EDMA. 880 */ 881 static void 882 mvsata_setup_channel(struct ata_channel *chp) 883 { 884 #ifndef MVSATA_WITHOUTDMA 885 struct mvsata_port *mvport = (struct mvsata_port *)chp; 886 struct ata_drive_datas *drvp; 887 int drive, s; 888 uint32_t edma_mode = nodma; 889 int i; 890 const int crqb_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 891 const int crpb_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 892 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 893 894 DPRINTF(DEBUG_FUNCS, ("%s:%d: mvsata_setup_channel: ", 895 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 896 897 for (drive = 0; drive < chp->ch_ndrives; drive++) { 898 drvp = &chp->ch_drive[drive]; 899 900 /* If no drive, skip */ 901 if (drvp->drive_type == ATA_DRIVET_NONE) 902 continue; 903 904 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 905 /* use Ultra/DMA */ 906 s = splbio(); 907 drvp->drive_flags &= ~ATA_DRIVE_DMA; 908 splx(s); 909 } 910 911 if (drvp->drive_flags & (ATA_DRIVE_UDMA | ATA_DRIVE_DMA)) { 912 if (drvp->drive_flags & ATA_DRIVE_NCQ) 913 edma_mode = ncq; 914 else if (drvp->drive_type == ATA_DRIVET_ATA) 915 edma_mode = dma; 916 } 917 } 918 919 DPRINTF(DEBUG_FUNCS, 920 ("EDMA %sactive mode\n", (edma_mode == nodma) ? "not " : "")); 921 922 if (edma_mode == nodma) { 923 no_edma: 924 if (mvport->port_crqb != NULL) 925 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 926 mvport->port_crqb_dmamap, mvport->port_crqb); 927 if (mvport->port_crpb != NULL) 928 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 929 mvport->port_crpb_dmamap, mvport->port_crpb); 930 if (mvport->port_eprd != NULL) 931 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 932 mvport->port_eprd_dmamap, mvport->port_eprd); 933 934 return; 935 } 936 937 if (mvport->port_crqb == NULL) 938 mvport->port_crqb = mvsata_edma_resource_prepare(mvport, 939 mvport->port_dmat, &mvport->port_crqb_dmamap, crqb_size, 1); 940 if (mvport->port_crpb == NULL) 941 mvport->port_crpb = mvsata_edma_resource_prepare(mvport, 942 mvport->port_dmat, &mvport->port_crpb_dmamap, crpb_size, 0); 943 if (mvport->port_eprd == NULL) { 944 mvport->port_eprd = mvsata_edma_resource_prepare(mvport, 945 mvport->port_dmat, &mvport->port_eprd_dmamap, eprd_buf_size, 946 1); 947 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 948 mvport->port_reqtbl[i].eprd_offset = 949 i * MVSATA_EPRD_MAX_SIZE; 950 mvport->port_reqtbl[i].eprd = mvport->port_eprd + 951 i * MVSATA_EPRD_MAX_SIZE / sizeof(struct eprd); 952 } 953 } 954 955 if (mvport->port_crqb == NULL || mvport->port_crpb == NULL || 956 mvport->port_eprd == NULL) { 957 aprint_error_dev(MVSATA_DEV2(mvport), 958 "channel %d: can't use EDMA\n", chp->ch_channel); 959 s = splbio(); 960 for (drive = 0; drive < chp->ch_ndrives; drive++) { 961 drvp = &chp->ch_drive[drive]; 962 963 /* If no drive, skip */ 964 if (drvp->drive_type == ATA_DRIVET_NONE) 965 continue; 966 967 drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA); 968 } 969 splx(s); 970 goto no_edma; 971 } 972 973 mvsata_edma_config(mvport, edma_mode); 974 mvsata_edma_reset_qptr(mvport); 975 mvsata_edma_enable(mvport); 976 #endif 977 } 978 979 #ifndef MVSATA_WITHOUTDMA 980 static const struct ata_xfer_ops mvsata_bio_xfer_ops = { 981 .c_start = mvsata_bio_start, 982 .c_intr = mvsata_bio_intr, 983 .c_poll = mvsata_bio_poll, 984 .c_abort = mvsata_bio_done, 985 .c_kill_xfer = mvsata_bio_kill_xfer, 986 }; 987 988 static int 989 mvsata_bio(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 990 { 991 struct ata_channel *chp = drvp->chnl_softc; 992 struct atac_softc *atac = chp->ch_atac; 993 struct ata_bio *ata_bio = &xfer->c_bio; 994 995 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 996 ("%s:%d: mvsata_bio: drive=%d, blkno=%" PRId64 997 ", bcount=%ld\n", device_xname(atac->atac_dev), chp->ch_channel, 998 drvp->drive, ata_bio->blkno, ata_bio->bcount)); 999 1000 if (atac->atac_cap & ATAC_CAP_NOIRQ) 1001 ata_bio->flags |= ATA_POLL; 1002 if (ata_bio->flags & ATA_POLL) 1003 xfer->c_flags |= C_POLL; 1004 if ((drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) && 1005 (ata_bio->flags & ATA_SINGLE) == 0) 1006 xfer->c_flags |= C_DMA; 1007 xfer->c_drive = drvp->drive; 1008 xfer->c_databuf = ata_bio->databuf; 1009 xfer->c_bcount = ata_bio->bcount; 1010 xfer->ops = &mvsata_bio_xfer_ops; 1011 ata_exec_xfer(chp, xfer); 1012 return (ata_bio->flags & ATA_ITSDONE) ? ATACMD_COMPLETE : ATACMD_QUEUED; 1013 } 1014 1015 static int 1016 mvsata_bio_start(struct ata_channel *chp, struct ata_xfer *xfer) 1017 { 1018 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1019 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 1020 struct atac_softc *atac = chp->ch_atac; 1021 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1022 struct ata_bio *ata_bio = &xfer->c_bio; 1023 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1024 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1025 u_int16_t cyl; 1026 u_int8_t head, sect, cmd = 0; 1027 int nblks, error, tfd; 1028 1029 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ("%s:%d: mvsata_bio_start: drive=%d\n", 1030 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1031 1032 ata_channel_lock_owned(chp); 1033 1034 if (xfer->c_flags & C_DMA) 1035 if (drvp->n_xfers <= NXFER) 1036 drvp->n_xfers++; 1037 1038 /* 1039 * 1040 * When starting a multi-sector transfer, or doing single-sector 1041 * transfers... 1042 */ 1043 if (xfer->c_skip == 0 || (ata_bio->flags & ATA_SINGLE) != 0) { 1044 if (ata_bio->flags & ATA_SINGLE) 1045 nblks = 1; 1046 else 1047 nblks = xfer->c_bcount / drvp->lp->d_secsize; 1048 /* Check for bad sectors and adjust transfer, if necessary. */ 1049 if ((drvp->lp->d_flags & D_BADSECT) != 0) { 1050 long blkdiff; 1051 int i; 1052 1053 for (i = 0; (blkdiff = drvp->badsect[i]) != -1; 1054 i++) { 1055 blkdiff -= ata_bio->blkno; 1056 if (blkdiff < 0) 1057 continue; 1058 if (blkdiff == 0) 1059 /* Replace current block of transfer. */ 1060 ata_bio->blkno = 1061 drvp->lp->d_secperunit - 1062 drvp->lp->d_nsectors - i - 1; 1063 if (blkdiff < nblks) { 1064 /* Bad block inside transfer. */ 1065 ata_bio->flags |= ATA_SINGLE; 1066 nblks = 1; 1067 } 1068 break; 1069 } 1070 /* Transfer is okay now. */ 1071 } 1072 if (xfer->c_flags & C_DMA) { 1073 enum mvsata_edmamode dmamode; 1074 1075 ata_bio->nblks = nblks; 1076 ata_bio->nbytes = xfer->c_bcount; 1077 1078 /* switch to appropriate dma mode if necessary */ 1079 dmamode = (xfer->c_flags & C_NCQ) ? ncq : dma; 1080 if (mvport->port_edmamode_curr != dmamode) 1081 mvsata_edma_config(mvport, dmamode); 1082 1083 if (xfer->c_flags & C_POLL) 1084 sc->sc_enable_intr(mvport, 0 /*off*/); 1085 error = mvsata_edma_enqueue(mvport, xfer); 1086 if (error) { 1087 if (error == EINVAL) { 1088 /* 1089 * We can't do DMA on this transfer 1090 * for some reason. Fall back to 1091 * PIO. 1092 */ 1093 xfer->c_flags &= ~C_DMA; 1094 error = 0; 1095 goto do_pio; 1096 } 1097 if (error == EBUSY) { 1098 aprint_error_dev(atac->atac_dev, 1099 "channel %d: EDMA Queue full\n", 1100 chp->ch_channel); 1101 /* 1102 * XXX: Perhaps, after it waits for 1103 * a while, it is necessary to call 1104 * bio_start again. 1105 */ 1106 } 1107 ata_bio->error = ERR_DMA; 1108 ata_bio->r_error = 0; 1109 return ATASTART_ABORT; 1110 } 1111 chp->ch_flags |= ATACH_DMA_WAIT; 1112 /* start timeout machinery */ 1113 if ((xfer->c_flags & C_POLL) == 0) 1114 callout_reset(&chp->c_timo_callout, 1115 mstohz(ATA_DELAY), ata_timeout, chp); 1116 /* wait for irq */ 1117 goto intr; 1118 } /* else not DMA */ 1119 do_pio: 1120 if (ata_bio->flags & ATA_LBA48) { 1121 sect = 0; 1122 cyl = 0; 1123 head = 0; 1124 } else if (ata_bio->flags & ATA_LBA) { 1125 sect = (ata_bio->blkno >> 0) & 0xff; 1126 cyl = (ata_bio->blkno >> 8) & 0xffff; 1127 head = (ata_bio->blkno >> 24) & 0x0f; 1128 head |= WDSD_LBA; 1129 } else { 1130 int blkno = ata_bio->blkno; 1131 sect = blkno % drvp->lp->d_nsectors; 1132 sect++; /* Sectors begin with 1, not 0. */ 1133 blkno /= drvp->lp->d_nsectors; 1134 head = blkno % drvp->lp->d_ntracks; 1135 blkno /= drvp->lp->d_ntracks; 1136 cyl = blkno; 1137 head |= WDSD_CHS; 1138 } 1139 ata_bio->nblks = uimin(nblks, drvp->multi); 1140 ata_bio->nbytes = ata_bio->nblks * drvp->lp->d_secsize; 1141 KASSERT(nblks == 1 || (ata_bio->flags & ATA_SINGLE) == 0); 1142 if (ata_bio->nblks > 1) 1143 cmd = (ata_bio->flags & ATA_READ) ? 1144 WDCC_READMULTI : WDCC_WRITEMULTI; 1145 else 1146 cmd = (ata_bio->flags & ATA_READ) ? 1147 WDCC_READ : WDCC_WRITE; 1148 1149 /* EDMA disable, if enabled this channel. */ 1150 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 1151 if (mvport->port_edmamode_curr != nodma) 1152 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1153 1154 mvsata_pmp_select(mvport, xfer->c_drive); 1155 1156 /* Do control operations specially. */ 1157 if (__predict_false(drvp->state < READY)) { 1158 /* 1159 * Actually, we want to be careful not to mess with 1160 * the control state if the device is currently busy, 1161 * but we can assume that we never get to this point 1162 * if that's the case. 1163 */ 1164 /* 1165 * If it's not a polled command, we need the kernel 1166 * thread 1167 */ 1168 if ((xfer->c_flags & C_POLL) == 0 && 1169 (chp->ch_flags & ATACH_TH_RUN) == 0) { 1170 return ATASTART_TH; 1171 } 1172 if (mvsata_bio_ready(mvport, ata_bio, xfer->c_drive, 1173 (xfer->c_flags & C_POLL) ? AT_POLL : 0) != 0) { 1174 return ATASTART_ABORT; 1175 } 1176 } 1177 1178 /* Initiate command! */ 1179 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1180 switch(wdc_wait_for_ready(chp, ATA_DELAY, wait_flags, &tfd)) { 1181 case WDCWAIT_OK: 1182 break; 1183 case WDCWAIT_TOUT: 1184 goto timeout; 1185 case WDCWAIT_THR: 1186 return ATASTART_TH; 1187 } 1188 if (ata_bio->flags & ATA_LBA48) 1189 wdccommandext(chp, 0, atacmd_to48(cmd), 1190 ata_bio->blkno, nblks, 0, WDSD_LBA); 1191 else 1192 wdccommand(chp, 0, cmd, cyl, 1193 head, sect, nblks, 1194 (drvp->lp->d_type == DKTYPE_ST506) ? 1195 drvp->lp->d_precompcyl / 4 : 0); 1196 1197 /* start timeout machinery */ 1198 if ((xfer->c_flags & C_POLL) == 0) 1199 callout_reset(&chp->c_timo_callout, 1200 mstohz(ATA_DELAY), wdctimeout, chp); 1201 } else if (ata_bio->nblks > 1) { 1202 /* The number of blocks in the last stretch may be smaller. */ 1203 nblks = xfer->c_bcount / drvp->lp->d_secsize; 1204 if (ata_bio->nblks > nblks) { 1205 ata_bio->nblks = nblks; 1206 ata_bio->nbytes = xfer->c_bcount; 1207 } 1208 } 1209 /* If this was a write and not using DMA, push the data. */ 1210 if ((ata_bio->flags & ATA_READ) == 0) { 1211 /* 1212 * we have to busy-wait here, we can't rely on running in 1213 * thread context. 1214 */ 1215 if (wdc_wait_for_drq(chp, ATA_DELAY, AT_POLL, &tfd) != 0) { 1216 aprint_error_dev(atac->atac_dev, 1217 "channel %d: drive %d timeout waiting for DRQ," 1218 " st=0x%02x, err=0x%02x\n", 1219 chp->ch_channel, xfer->c_drive, ATACH_ST(tfd), 1220 ATACH_ERR(tfd)); 1221 ata_bio->error = TIMEOUT; 1222 return ATASTART_ABORT; 1223 } 1224 if (ATACH_ST(tfd) & WDCS_ERR) { 1225 ata_bio->error = ERROR; 1226 ata_bio->r_error = ATACH_ERR(tfd); 1227 mvsata_bio_done(chp, xfer); 1228 return ATASTART_ABORT; 1229 } 1230 1231 wdc->dataout_pio(chp, drvp->drive_flags, 1232 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1233 } 1234 1235 intr: 1236 KASSERTMSG(((xfer->c_flags & C_DMA) != 0) 1237 == (mvport->port_edmamode_curr != nodma), 1238 "DMA mode mismatch: flags %x vs edmamode %d != %d", 1239 xfer->c_flags, mvport->port_edmamode_curr, nodma); 1240 1241 /* Wait for IRQ (either real or polled) */ 1242 if ((ata_bio->flags & ATA_POLL) != 0) 1243 return ATASTART_POLL; 1244 else 1245 return ATASTART_STARTED; 1246 1247 timeout: 1248 aprint_error_dev(atac->atac_dev, 1249 "channel %d: drive %d not ready, st=0x%02x, err=0x%02x\n", 1250 chp->ch_channel, xfer->c_drive, ATACH_ST(tfd), ATACH_ERR(tfd)); 1251 ata_bio->error = TIMEOUT; 1252 return ATASTART_ABORT; 1253 } 1254 1255 static void 1256 mvsata_bio_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1257 { 1258 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1259 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 1260 1261 /* Wait for at last 400ns for status bit to be valid */ 1262 delay(1); 1263 if (chp->ch_flags & ATACH_DMA_WAIT) { 1264 mvsata_edma_wait(mvport, xfer, ATA_DELAY); 1265 sc->sc_enable_intr(mvport, 1 /*on*/); 1266 chp->ch_flags &= ~ATACH_DMA_WAIT; 1267 } 1268 1269 if ((xfer->c_bio.flags & ATA_ITSDONE) == 0) { 1270 KASSERT(xfer->c_flags & C_TIMEOU); 1271 mvsata_bio_intr(chp, xfer, 0); 1272 } 1273 } 1274 1275 static int 1276 mvsata_bio_intr(struct ata_channel *chp, struct ata_xfer *xfer, int intr_arg) 1277 { 1278 struct atac_softc *atac = chp->ch_atac; 1279 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1280 struct ata_bio *ata_bio = &xfer->c_bio; 1281 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1282 int irq = ISSET(xfer->c_flags, (C_POLL|C_TIMEOU)) ? 0 : 1; 1283 int tfd = 0; 1284 1285 if (ISSET(xfer->c_flags, C_DMA|C_RECOVERED) && irq) { 1286 /* Invoked via mvsata_edma_handle() or recovery */ 1287 tfd = intr_arg; 1288 1289 if (tfd > 0 && ata_bio->error == NOERROR) { 1290 if (ATACH_ST(tfd) & WDCS_ERR) 1291 ata_bio->error = ERROR; 1292 if (ATACH_ST(tfd) & WDCS_BSY) 1293 ata_bio->error = TIMEOUT; 1294 ata_bio->r_error = ATACH_ERR(tfd); 1295 } 1296 } 1297 1298 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ("%s:%d: %s: drive=%d\n", 1299 device_xname(atac->atac_dev), chp->ch_channel, __func__, 1300 xfer->c_drive)); 1301 1302 /* Cleanup EDMA if invoked from wdctimeout()/ata_timeout() */ 1303 if (ISSET(xfer->c_flags, C_TIMEOU) && ISSET(xfer->c_flags, C_DMA) 1304 && !ISSET(xfer->c_flags, C_POLL)) { 1305 mvsata_edma_rqq_remove((struct mvsata_port *)chp, xfer); 1306 } 1307 1308 ata_channel_lock(chp); 1309 1310 chp->ch_flags &= ~(ATACH_DMA_WAIT); 1311 1312 /* 1313 * If we missed an interrupt transfer, reset and restart. 1314 * Don't try to continue transfer, we may have missed cycles. 1315 */ 1316 if (xfer->c_flags & C_TIMEOU) { 1317 ata_bio->error = TIMEOUT; 1318 ata_channel_unlock(chp); 1319 mvsata_bio_done(chp, xfer); 1320 return 1; 1321 } 1322 1323 /* Is it not a transfer, but a control operation? */ 1324 if (!(xfer->c_flags & C_DMA) && drvp->state < READY) { 1325 aprint_error_dev(atac->atac_dev, 1326 "channel %d: drive %d bad state %d in %s\n", 1327 chp->ch_channel, xfer->c_drive, drvp->state, __func__); 1328 panic("%s: bad state", __func__); 1329 } 1330 1331 /* Ack interrupt done by wdc_wait_for_unbusy */ 1332 if (!(xfer->c_flags & C_DMA) && 1333 (wdc_wait_for_unbusy(chp, (irq == 0) ? ATA_DELAY : 0, AT_POLL, &tfd) 1334 == WDCWAIT_TOUT)) { 1335 if (irq && (xfer->c_flags & C_TIMEOU) == 0) { 1336 ata_channel_unlock(chp); 1337 return 0; /* IRQ was not for us */ 1338 } 1339 aprint_error_dev(atac->atac_dev, 1340 "channel %d: drive %d timeout, c_bcount=%d, c_skip%d\n", 1341 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 1342 xfer->c_skip); 1343 ata_bio->error = TIMEOUT; 1344 ata_channel_unlock(chp); 1345 mvsata_bio_done(chp, xfer); 1346 return 1; 1347 } 1348 1349 if (xfer->c_flags & C_DMA) { 1350 if (ata_bio->error == NOERROR) 1351 goto end; 1352 if (ata_bio->error == ERR_DMA) { 1353 ata_dmaerr(drvp, 1354 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 1355 ata_channel_unlock(chp); 1356 goto err; 1357 } 1358 } 1359 1360 /* if we had an error, end */ 1361 if (ata_bio->error != NOERROR) { 1362 ata_channel_unlock(chp); 1363 err: 1364 mvsata_bio_done(chp, xfer); 1365 return 1; 1366 } 1367 1368 /* If this was a read and not using DMA, fetch the data. */ 1369 if ((ata_bio->flags & ATA_READ) != 0) { 1370 if ((ATACH_ST(tfd) & WDCS_DRQ) != WDCS_DRQ) { 1371 aprint_error_dev(atac->atac_dev, 1372 "channel %d: drive %d read intr before drq\n", 1373 chp->ch_channel, xfer->c_drive); 1374 ata_bio->error = TIMEOUT; 1375 ata_channel_unlock(chp); 1376 mvsata_bio_done(chp, xfer); 1377 return 1; 1378 } 1379 wdc->datain_pio(chp, drvp->drive_flags, 1380 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1381 } 1382 1383 end: 1384 ata_bio->blkno += ata_bio->nblks; 1385 ata_bio->blkdone += ata_bio->nblks; 1386 xfer->c_skip += ata_bio->nbytes; 1387 xfer->c_bcount -= ata_bio->nbytes; 1388 1389 /* See if this transfer is complete. */ 1390 if (xfer->c_bcount > 0) { 1391 if ((ata_bio->flags & ATA_POLL) == 0) { 1392 /* Start the next operation */ 1393 ata_xfer_start(xfer); 1394 } else { 1395 /* Let mvsata_bio_start do the loop */ 1396 } 1397 ata_channel_unlock(chp); 1398 } else { /* Done with this transfer */ 1399 ata_bio->error = NOERROR; 1400 ata_channel_unlock(chp); 1401 mvsata_bio_done(chp, xfer); 1402 } 1403 return 1; 1404 } 1405 1406 static void 1407 mvsata_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1408 { 1409 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1410 struct atac_softc *atac = chp->ch_atac; 1411 struct ata_bio *ata_bio = &xfer->c_bio; 1412 int drive = xfer->c_drive; 1413 bool deactivate = true; 1414 1415 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1416 ("%s:%d: mvsata_bio_kill_xfer: drive=%d\n", 1417 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1418 1419 /* EDMA restart, if enabled */ 1420 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode_curr != nodma) { 1421 mvsata_edma_reset_qptr(mvport); 1422 mvsata_edma_enable(mvport); 1423 } 1424 1425 ata_bio->flags |= ATA_ITSDONE; 1426 switch (reason) { 1427 case KILL_GONE_INACTIVE: 1428 deactivate = false; 1429 /* FALLTHROUGH */ 1430 case KILL_GONE: 1431 ata_bio->error = ERR_NODEV; 1432 break; 1433 case KILL_RESET: 1434 ata_bio->error = ERR_RESET; 1435 break; 1436 case KILL_REQUEUE: 1437 ata_bio->error = REQUEUE; 1438 break; 1439 default: 1440 aprint_error_dev(atac->atac_dev, 1441 "mvsata_bio_kill_xfer: unknown reason %d\n", reason); 1442 panic("mvsata_bio_kill_xfer"); 1443 } 1444 ata_bio->r_error = WDCE_ABRT; 1445 1446 if (deactivate) 1447 ata_deactivate_xfer(chp, xfer); 1448 1449 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1450 } 1451 1452 static void 1453 mvsata_bio_done(struct ata_channel *chp, struct ata_xfer *xfer) 1454 { 1455 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1456 struct ata_bio *ata_bio = &xfer->c_bio; 1457 int drive = xfer->c_drive; 1458 bool iserror = (ata_bio->error != NOERROR); 1459 1460 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1461 ("%s:%d: mvsata_bio_done: drive=%d, flags=0x%x\n", 1462 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive, 1463 (u_int)xfer->c_flags)); 1464 1465 /* EDMA restart, if enabled */ 1466 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode_curr != nodma) { 1467 mvsata_edma_reset_qptr(mvport); 1468 mvsata_edma_enable(mvport); 1469 } 1470 1471 if (ata_waitdrain_xfer_check(chp, xfer)) 1472 return; 1473 1474 /* feed back residual bcount to our caller */ 1475 ata_bio->bcount = xfer->c_bcount; 1476 1477 /* mark controller inactive and free xfer */ 1478 ata_deactivate_xfer(chp, xfer); 1479 1480 ata_bio->flags |= ATA_ITSDONE; 1481 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer); 1482 if (!iserror) 1483 atastart(chp); 1484 } 1485 1486 static int 1487 mvsata_bio_ready(struct mvsata_port *mvport, struct ata_bio *ata_bio, int drive, 1488 int flags) 1489 { 1490 struct ata_channel *chp = &mvport->port_ata_channel; 1491 struct atac_softc *atac = chp->ch_atac; 1492 struct ata_drive_datas *drvp = &chp->ch_drive[drive]; 1493 const char *errstring; 1494 int tfd; 1495 1496 flags |= AT_POLL; /* XXX */ 1497 1498 ata_channel_lock_owned(chp); 1499 1500 /* 1501 * disable interrupts, all commands here should be quick 1502 * enough to be able to poll, and we don't go here that often 1503 */ 1504 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1505 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1506 DELAY(10); 1507 errstring = "wait"; 1508 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1509 goto ctrltimeout; 1510 wdccommandshort(chp, 0, WDCC_RECAL); 1511 /* Wait for at least 400ns for status bit to be valid */ 1512 DELAY(1); 1513 errstring = "recal"; 1514 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1515 goto ctrltimeout; 1516 if (ATACH_ST(tfd) & (WDCS_ERR | WDCS_DWF)) 1517 goto ctrlerror; 1518 /* Don't try to set modes if controller can't be adjusted */ 1519 if (atac->atac_set_modes == NULL) 1520 goto geometry; 1521 /* Also don't try if the drive didn't report its mode */ 1522 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 1523 goto geometry; 1524 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1525 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 1526 errstring = "piomode-bio"; 1527 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1528 goto ctrltimeout; 1529 if (ATACH_ST(tfd) & (WDCS_ERR | WDCS_DWF)) 1530 goto ctrlerror; 1531 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1532 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1533 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 1534 else if (drvp->drive_flags & ATA_DRIVE_DMA) 1535 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1536 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 1537 else 1538 goto geometry; 1539 errstring = "dmamode-bio"; 1540 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1541 goto ctrltimeout; 1542 if (ATACH_ST(tfd) & (WDCS_ERR | WDCS_DWF)) 1543 goto ctrlerror; 1544 geometry: 1545 if (ata_bio->flags & ATA_LBA) 1546 goto multimode; 1547 wdccommand(chp, 0, WDCC_IDP, drvp->lp->d_ncylinders, 1548 drvp->lp->d_ntracks - 1, 0, drvp->lp->d_nsectors, 1549 (drvp->lp->d_type == DKTYPE_ST506) ? 1550 drvp->lp->d_precompcyl / 4 : 0); 1551 errstring = "geometry"; 1552 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1553 goto ctrltimeout; 1554 if (ATACH_ST(tfd) & (WDCS_ERR | WDCS_DWF)) 1555 goto ctrlerror; 1556 multimode: 1557 if (drvp->multi == 1) 1558 goto ready; 1559 wdccommand(chp, 0, WDCC_SETMULTI, 0, 0, 0, drvp->multi, 0); 1560 errstring = "setmulti"; 1561 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags, &tfd)) 1562 goto ctrltimeout; 1563 if (ATACH_ST(tfd) & (WDCS_ERR | WDCS_DWF)) 1564 goto ctrlerror; 1565 ready: 1566 drvp->state = READY; 1567 /* 1568 * The drive is usable now 1569 */ 1570 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1571 delay(10); /* some drives need a little delay here */ 1572 return 0; 1573 1574 ctrltimeout: 1575 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s timed out\n", 1576 chp->ch_channel, drive, errstring); 1577 ata_bio->error = TIMEOUT; 1578 goto ctrldone; 1579 ctrlerror: 1580 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s ", 1581 chp->ch_channel, drive, errstring); 1582 if (ATACH_ST(tfd) & WDCS_DWF) { 1583 aprint_error("drive fault\n"); 1584 ata_bio->error = ERR_DF; 1585 } else { 1586 ata_bio->r_error = ATACH_ERR(tfd); 1587 ata_bio->error = ERROR; 1588 aprint_error("error (%x)\n", ata_bio->r_error); 1589 } 1590 ctrldone: 1591 drvp->state = 0; 1592 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1593 return -1; 1594 } 1595 1596 static const struct ata_xfer_ops mvsata_wdc_cmd_xfer_ops = { 1597 .c_start = mvsata_wdc_cmd_start, 1598 .c_intr = mvsata_wdc_cmd_intr, 1599 .c_poll = mvsata_wdc_cmd_poll, 1600 .c_abort = mvsata_wdc_cmd_done, 1601 .c_kill_xfer = mvsata_wdc_cmd_kill_xfer, 1602 }; 1603 1604 static int 1605 mvsata_exec_command(struct ata_drive_datas *drvp, struct ata_xfer *xfer) 1606 { 1607 struct ata_channel *chp = drvp->chnl_softc; 1608 struct ata_command *ata_c = &xfer->c_ata_c; 1609 int rv, s; 1610 1611 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1612 ("%s:%d: mvsata_exec_command: drive=%d, bcount=%d," 1613 " r_lba=0x%012"PRIx64", r_count=0x%04x, r_features=0x%04x," 1614 " r_device=0x%02x, r_command=0x%02x\n", 1615 device_xname(MVSATA_DEV2((struct mvsata_port *)chp)), 1616 chp->ch_channel, 1617 drvp->drive, ata_c->bcount, ata_c->r_lba, ata_c->r_count, 1618 ata_c->r_features, ata_c->r_device, ata_c->r_command)); 1619 1620 if (ata_c->flags & AT_POLL) 1621 xfer->c_flags |= C_POLL; 1622 if (ata_c->flags & AT_WAIT) 1623 xfer->c_flags |= C_WAIT; 1624 xfer->c_drive = drvp->drive; 1625 xfer->c_databuf = ata_c->data; 1626 xfer->c_bcount = ata_c->bcount; 1627 xfer->ops = &mvsata_wdc_cmd_xfer_ops; 1628 s = splbio(); 1629 ata_exec_xfer(chp, xfer); 1630 #ifdef DIAGNOSTIC 1631 if ((ata_c->flags & AT_POLL) != 0 && 1632 (ata_c->flags & AT_DONE) == 0) 1633 panic("mvsata_exec_command: polled command not done"); 1634 #endif 1635 if (ata_c->flags & AT_DONE) 1636 rv = ATACMD_COMPLETE; 1637 else { 1638 if (ata_c->flags & AT_WAIT) { 1639 ata_wait_cmd(chp, xfer); 1640 rv = ATACMD_COMPLETE; 1641 } else 1642 rv = ATACMD_QUEUED; 1643 } 1644 splx(s); 1645 return rv; 1646 } 1647 1648 static int 1649 mvsata_wdc_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer) 1650 { 1651 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1652 int drive = xfer->c_drive; 1653 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1654 struct ata_command *ata_c = &xfer->c_ata_c; 1655 int tfd; 1656 1657 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1658 ("%s:%d: mvsata_cmd_start: drive=%d\n", 1659 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drive)); 1660 1661 ata_channel_lock_owned(chp); 1662 1663 /* First, EDMA disable, if enabled this channel. */ 1664 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 1665 if (mvport->port_edmamode_curr != nodma) 1666 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1667 1668 mvsata_pmp_select(mvport, drive); 1669 1670 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1671 switch(wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1672 ata_c->r_st_bmask, ata_c->timeout, wait_flags, &tfd)) { 1673 case WDCWAIT_OK: 1674 break; 1675 case WDCWAIT_TOUT: 1676 ata_c->flags |= AT_TIMEOU; 1677 return ATASTART_ABORT; 1678 case WDCWAIT_THR: 1679 return ATASTART_TH; 1680 } 1681 if (ata_c->flags & AT_POLL) 1682 /* polled command, disable interrupts */ 1683 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1684 if ((ata_c->flags & AT_LBA48) != 0) { 1685 wdccommandext(chp, 0, ata_c->r_command, 1686 ata_c->r_lba, ata_c->r_count, ata_c->r_features, 1687 ata_c->r_device & ~0x10); 1688 } else { 1689 wdccommand(chp, 0, ata_c->r_command, 1690 (ata_c->r_lba >> 8) & 0xffff, 1691 (((ata_c->flags & AT_LBA) != 0) ? WDSD_LBA : 0) | 1692 ((ata_c->r_lba >> 24) & 0x0f), 1693 ata_c->r_lba & 0xff, 1694 ata_c->r_count & 0xff, 1695 ata_c->r_features & 0xff); 1696 } 1697 1698 if ((ata_c->flags & AT_POLL) == 0) { 1699 callout_reset(&chp->c_timo_callout, ata_c->timeout / 1000 * hz, 1700 wdctimeout, chp); 1701 return ATASTART_STARTED; 1702 } 1703 1704 return ATASTART_POLL; 1705 } 1706 1707 static void 1708 mvsata_wdc_cmd_poll(struct ata_channel *chp, struct ata_xfer *xfer) 1709 { 1710 /* 1711 * Polled command. Wait for drive ready or drq. Done in intr(). 1712 * Wait for at last 400ns for status bit to be valid. 1713 */ 1714 delay(10); /* 400ns delay */ 1715 mvsata_wdc_cmd_intr(chp, xfer, 0); 1716 } 1717 1718 static int 1719 mvsata_wdc_cmd_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 1720 { 1721 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1722 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1723 struct ata_command *ata_c = &xfer->c_ata_c; 1724 int bcount = ata_c->bcount; 1725 char *data = ata_c->data; 1726 int wflags; 1727 int drive_flags; 1728 int tfd; 1729 1730 ata_channel_lock(chp); 1731 1732 if (ata_c->r_command == WDCC_IDENTIFY || 1733 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) 1734 /* 1735 * The IDENTIFY data has been designed as an array of 1736 * u_int16_t, so we can byteswap it on the fly. 1737 * Historically it's what we have always done so keeping it 1738 * here ensure binary backward compatibility. 1739 */ 1740 drive_flags = ATA_DRIVE_NOSTREAM | 1741 chp->ch_drive[xfer->c_drive].drive_flags; 1742 else 1743 /* 1744 * Other data structure are opaque and should be transferred 1745 * as is. 1746 */ 1747 drive_flags = chp->ch_drive[xfer->c_drive].drive_flags; 1748 1749 if ((ata_c->flags & (AT_WAIT | AT_POLL)) == (AT_WAIT | AT_POLL)) 1750 /* both wait and poll, we can kpause here */ 1751 wflags = AT_WAIT | AT_POLL; 1752 else 1753 wflags = AT_POLL; 1754 1755 again: 1756 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ("%s:%d: %s: drive=%d\n", 1757 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, 1758 __func__, xfer->c_drive)); 1759 1760 /* 1761 * after a ATAPI_SOFT_RESET, the device will have released the bus. 1762 * Reselect again, it doesn't hurt for others commands, and the time 1763 * penalty for the extra register write is acceptable, 1764 * wdc_exec_command() isn't called often (mostly for autoconfig) 1765 */ 1766 if ((xfer->c_flags & C_ATAPI) != 0) { 1767 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1768 } 1769 if ((ata_c->flags & AT_XFDONE) != 0) { 1770 /* 1771 * We have completed a data xfer. The drive should now be 1772 * in its initial state 1773 */ 1774 if (wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1775 ata_c->r_st_bmask, (irq == 0) ? ata_c->timeout : 0, 1776 wflags, &tfd) == WDCWAIT_TOUT) { 1777 if (irq && (xfer->c_flags & C_TIMEOU) == 0) { 1778 ata_channel_unlock(chp); 1779 return 0; /* IRQ was not for us */ 1780 } 1781 ata_c->flags |= AT_TIMEOU; 1782 } 1783 goto out; 1784 } 1785 if (wdcwait(chp, ata_c->r_st_pmask, ata_c->r_st_pmask, 1786 (irq == 0) ? ata_c->timeout : 0, wflags, &tfd) == WDCWAIT_TOUT) { 1787 if (irq && (xfer->c_flags & C_TIMEOU) == 0) { 1788 ata_channel_unlock(chp); 1789 return 0; /* IRQ was not for us */ 1790 } 1791 ata_c->flags |= AT_TIMEOU; 1792 goto out; 1793 } 1794 delay(20); /* XXXXX: Delay more times. */ 1795 if (ata_c->flags & AT_READ) { 1796 if ((ATACH_ST(tfd) & WDCS_DRQ) == 0) { 1797 ata_c->flags |= AT_TIMEOU; 1798 goto out; 1799 } 1800 wdc->datain_pio(chp, drive_flags, data, bcount); 1801 /* at this point the drive should be in its initial state */ 1802 ata_c->flags |= AT_XFDONE; 1803 /* 1804 * XXX checking the status register again here cause some 1805 * hardware to timeout. 1806 */ 1807 } else if (ata_c->flags & AT_WRITE) { 1808 if ((ATACH_ST(tfd) & WDCS_DRQ) == 0) { 1809 ata_c->flags |= AT_TIMEOU; 1810 goto out; 1811 } 1812 wdc->dataout_pio(chp, drive_flags, data, bcount); 1813 ata_c->flags |= AT_XFDONE; 1814 if ((ata_c->flags & AT_POLL) == 0) { 1815 callout_reset(&chp->c_timo_callout, 1816 mstohz(ata_c->timeout), wdctimeout, chp); 1817 ata_channel_unlock(chp); 1818 return 1; 1819 } else 1820 goto again; 1821 } 1822 out: 1823 if (ATACH_ST(tfd) & WDCS_DWF) 1824 ata_c->flags |= AT_DF; 1825 if (ATACH_ST(tfd) & WDCS_ERR) { 1826 ata_c->flags |= AT_ERROR; 1827 ata_c->r_error = ATACH_ERR(tfd); 1828 } 1829 ata_channel_unlock(chp); 1830 mvsata_wdc_cmd_done(chp, xfer); 1831 1832 if ((ATACH_ST(tfd) & WDCS_ERR) == 0) 1833 atastart(chp); 1834 1835 return 1; 1836 } 1837 1838 static void 1839 mvsata_wdc_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 1840 int reason) 1841 { 1842 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1843 struct ata_command *ata_c = &xfer->c_ata_c; 1844 bool deactivate = true; 1845 1846 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1847 ("%s:%d: mvsata_cmd_kill_xfer: drive=%d\n", 1848 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive)); 1849 1850 switch (reason) { 1851 case KILL_GONE_INACTIVE: 1852 deactivate = false; 1853 /* FALLTHROUGH */ 1854 case KILL_GONE: 1855 ata_c->flags |= AT_GONE; 1856 break; 1857 case KILL_RESET: 1858 ata_c->flags |= AT_RESET; 1859 break; 1860 case KILL_REQUEUE: 1861 panic("%s: not supposed to be requeued\n", __func__); 1862 break; 1863 default: 1864 aprint_error_dev(MVSATA_DEV2(mvport), 1865 "mvsata_cmd_kill_xfer: unknown reason %d\n", reason); 1866 panic("mvsata_cmd_kill_xfer"); 1867 } 1868 1869 mvsata_wdc_cmd_done_end(chp, xfer); 1870 1871 if (deactivate) 1872 ata_deactivate_xfer(chp, xfer); 1873 } 1874 1875 static void 1876 mvsata_wdc_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer) 1877 { 1878 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1879 struct atac_softc *atac = chp->ch_atac; 1880 struct ata_command *ata_c = &xfer->c_ata_c; 1881 1882 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 1883 ("%s:%d: mvsata_cmd_done: drive=%d, flags=0x%x\n", 1884 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 1885 ata_c->flags)); 1886 1887 if (ata_waitdrain_xfer_check(chp, xfer)) 1888 return; 1889 1890 if ((ata_c->flags & AT_READREG) != 0 && 1891 device_is_active(atac->atac_dev) && 1892 (ata_c->flags & (AT_ERROR | AT_DF)) == 0) { 1893 ata_c->r_status = MVSATA_WDC_READ_1(mvport, SRB_CS); 1894 ata_c->r_error = MVSATA_WDC_READ_1(mvport, SRB_FE); 1895 ata_c->r_count = MVSATA_WDC_READ_1(mvport, SRB_SC); 1896 ata_c->r_lba = 1897 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 0; 1898 ata_c->r_lba |= 1899 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 8; 1900 ata_c->r_lba |= 1901 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 16; 1902 ata_c->r_device = MVSATA_WDC_READ_1(mvport, SRB_H); 1903 if ((ata_c->flags & AT_LBA48) != 0) { 1904 if ((ata_c->flags & AT_POLL) != 0) { 1905 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1906 WDCTL_HOB|WDCTL_4BIT|WDCTL_IDS); 1907 } else { 1908 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1909 WDCTL_HOB|WDCTL_4BIT); 1910 } 1911 ata_c->r_count |= 1912 MVSATA_WDC_READ_1(mvport, SRB_SC) << 8; 1913 ata_c->r_lba |= 1914 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 24; 1915 ata_c->r_lba |= 1916 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 32; 1917 ata_c->r_lba |= 1918 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 40; 1919 if ((ata_c->flags & AT_POLL) != 0) { 1920 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1921 WDCTL_4BIT|WDCTL_IDS); 1922 } else { 1923 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1924 WDCTL_4BIT); 1925 } 1926 } else { 1927 ata_c->r_lba |= 1928 (uint64_t)(ata_c->r_device & 0x0f) << 24; 1929 } 1930 } 1931 1932 if (ata_c->flags & AT_POLL) { 1933 /* enable interrupts */ 1934 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1935 delay(10); /* some drives need a little delay here */ 1936 } 1937 1938 mvsata_wdc_cmd_done_end(chp, xfer); 1939 1940 ata_deactivate_xfer(chp, xfer); 1941 } 1942 1943 static void 1944 mvsata_wdc_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer) 1945 { 1946 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1947 struct ata_command *ata_c = &xfer->c_ata_c; 1948 1949 /* EDMA restart, if enabled */ 1950 if (mvport->port_edmamode_curr != nodma) { 1951 mvsata_edma_reset_qptr(mvport); 1952 mvsata_edma_enable(mvport); 1953 } 1954 1955 ata_c->flags |= AT_DONE; 1956 } 1957 1958 #if NATAPIBUS > 0 1959 static const struct ata_xfer_ops mvsata_atapi_xfer_ops = { 1960 .c_start = mvsata_atapi_start, 1961 .c_intr = mvsata_atapi_intr, 1962 .c_poll = mvsata_atapi_poll, 1963 .c_abort = mvsata_atapi_reset, 1964 .c_kill_xfer = mvsata_atapi_kill_xfer, 1965 }; 1966 1967 static void 1968 mvsata_atapi_scsipi_request(struct scsipi_channel *chan, 1969 scsipi_adapter_req_t req, void *arg) 1970 { 1971 struct scsipi_adapter *adapt = chan->chan_adapter; 1972 struct scsipi_periph *periph; 1973 struct scsipi_xfer *sc_xfer; 1974 struct mvsata_softc *sc = device_private(adapt->adapt_dev); 1975 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 1976 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 1977 struct ata_xfer *xfer; 1978 int drive, s; 1979 1980 switch (req) { 1981 case ADAPTER_REQ_RUN_XFER: 1982 sc_xfer = arg; 1983 periph = sc_xfer->xs_periph; 1984 drive = periph->periph_target; 1985 1986 if (!device_is_active(atac->atac_dev)) { 1987 sc_xfer->error = XS_DRIVER_STUFFUP; 1988 scsipi_done(sc_xfer); 1989 return; 1990 } 1991 xfer = ata_get_xfer(chp, false); 1992 if (xfer == NULL) { 1993 sc_xfer->error = XS_RESOURCE_SHORTAGE; 1994 scsipi_done(sc_xfer); 1995 return; 1996 } 1997 1998 if (sc_xfer->xs_control & XS_CTL_POLL) 1999 xfer->c_flags |= C_POLL; 2000 xfer->c_drive = drive; 2001 xfer->c_flags |= C_ATAPI; 2002 xfer->c_databuf = sc_xfer->data; 2003 xfer->c_bcount = sc_xfer->datalen; 2004 xfer->ops = &mvsata_atapi_xfer_ops; 2005 xfer->c_scsipi = sc_xfer; 2006 xfer->c_atapi.c_dscpoll = 0; 2007 s = splbio(); 2008 ata_exec_xfer(chp, xfer); 2009 #ifdef DIAGNOSTIC 2010 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 && 2011 (sc_xfer->xs_status & XS_STS_DONE) == 0) 2012 panic("mvsata_atapi_scsipi_request:" 2013 " polled command not done"); 2014 #endif 2015 splx(s); 2016 return; 2017 2018 default: 2019 /* Not supported, nothing to do. */ 2020 ; 2021 } 2022 } 2023 2024 static int 2025 mvsata_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer) 2026 { 2027 struct mvsata_softc *sc = (struct mvsata_softc *)chp->ch_atac; 2028 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2029 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 2030 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2031 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2032 const int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 2033 const char *errstring; 2034 int tfd; 2035 2036 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2037 ("%s:%d:%d: mvsata_atapi_start: scsi flags 0x%x\n", 2038 device_xname(chp->ch_atac->atac_dev), chp->ch_channel, 2039 xfer->c_drive, sc_xfer->xs_control)); 2040 2041 ata_channel_lock_owned(chp); 2042 2043 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 2044 if (mvport->port_edmamode_curr != nodma) 2045 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 2046 2047 mvsata_pmp_select(mvport, xfer->c_drive); 2048 2049 if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER)) 2050 drvp->n_xfers++; 2051 2052 /* Do control operations specially. */ 2053 if (__predict_false(drvp->state < READY)) { 2054 /* If it's not a polled command, we need the kernel thread */ 2055 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 && 2056 (chp->ch_flags & ATACH_TH_RUN) == 0) { 2057 return ATASTART_TH; 2058 } 2059 /* 2060 * disable interrupts, all commands here should be quick 2061 * enough to be able to poll, and we don't go here that often 2062 */ 2063 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 2064 2065 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 2066 /* Don't try to set mode if controller can't be adjusted */ 2067 if (atac->atac_set_modes == NULL) 2068 goto ready; 2069 /* Also don't try if the drive didn't report its mode */ 2070 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 2071 goto ready; 2072 errstring = "unbusy"; 2073 if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags, &tfd)) 2074 goto timeout; 2075 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 2076 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 2077 errstring = "piomode-atapi"; 2078 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags, 2079 &tfd)) 2080 goto timeout; 2081 if (ATACH_ST(tfd) & WDCS_ERR) { 2082 if (ATACH_ERR(tfd) == WDCE_ABRT) { 2083 /* 2084 * Some ATAPI drives reject PIO settings. 2085 * Fall back to PIO mode 3 since that's the 2086 * minimum for ATAPI. 2087 */ 2088 aprint_error_dev(atac->atac_dev, 2089 "channel %d drive %d: PIO mode %d rejected," 2090 " falling back to PIO mode 3\n", 2091 chp->ch_channel, xfer->c_drive, 2092 drvp->PIO_mode); 2093 if (drvp->PIO_mode > 3) 2094 drvp->PIO_mode = 3; 2095 } else 2096 goto error; 2097 } 2098 if (drvp->drive_flags & ATA_DRIVE_UDMA) 2099 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 2100 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 2101 else 2102 if (drvp->drive_flags & ATA_DRIVE_DMA) 2103 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 2104 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 2105 else 2106 goto ready; 2107 errstring = "dmamode-atapi"; 2108 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags, 2109 &tfd)) 2110 goto timeout; 2111 if (ATACH_ST(tfd) & WDCS_ERR) { 2112 if (ATACH_ERR(tfd) == WDCE_ABRT) { 2113 if (drvp->drive_flags & ATA_DRIVE_UDMA) 2114 goto error; 2115 else { 2116 /* 2117 * The drive rejected our DMA setting. 2118 * Fall back to mode 1. 2119 */ 2120 aprint_error_dev(atac->atac_dev, 2121 "channel %d drive %d:" 2122 " DMA mode %d rejected," 2123 " falling back to DMA mode 0\n", 2124 chp->ch_channel, xfer->c_drive, 2125 drvp->DMA_mode); 2126 if (drvp->DMA_mode > 0) 2127 drvp->DMA_mode = 0; 2128 } 2129 } else 2130 goto error; 2131 } 2132 ready: 2133 drvp->state = READY; 2134 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2135 delay(10); /* some drives need a little delay here */ 2136 } 2137 /* start timeout machinery */ 2138 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2139 callout_reset(&chp->c_timo_callout, mstohz(sc_xfer->timeout), 2140 wdctimeout, chp); 2141 2142 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 2143 if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags, &tfd) != 0) { 2144 aprint_error_dev(atac->atac_dev, "not ready, st = %02x\n", 2145 ATACH_ST(tfd)); 2146 sc_xfer->error = XS_TIMEOUT; 2147 return ATASTART_ABORT; 2148 } 2149 2150 /* 2151 * Even with WDCS_ERR, the device should accept a command packet 2152 * Limit length to what can be stuffed into the cylinder register 2153 * (16 bits). Some CD-ROMs seem to interpret '0' as 65536, 2154 * but not all devices do that and it's not obvious from the 2155 * ATAPI spec that that behaviour should be expected. If more 2156 * data is necessary, multiple data transfer phases will be done. 2157 */ 2158 2159 wdccommand(chp, 0, ATAPI_PKT_CMD, 2160 xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff, 0, 0, 0, 2161 (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA : 0); 2162 2163 /* 2164 * If there is no interrupt for CMD input, busy-wait for it (done in 2165 * the interrupt routine. Poll routine will exit early in this case. 2166 */ 2167 if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) != 2168 ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) 2169 return ATASTART_POLL; 2170 else 2171 return ATASTART_STARTED; 2172 2173 timeout: 2174 aprint_error_dev(atac->atac_dev, "channel %d drive %d: %s timed out\n", 2175 chp->ch_channel, xfer->c_drive, errstring); 2176 sc_xfer->error = XS_TIMEOUT; 2177 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2178 delay(10); /* some drives need a little delay here */ 2179 return ATASTART_ABORT; 2180 2181 error: 2182 aprint_error_dev(atac->atac_dev, 2183 "channel %d drive %d: %s error (0x%x)\n", 2184 chp->ch_channel, xfer->c_drive, errstring, ATACH_ERR(tfd)); 2185 sc_xfer->error = XS_SHORTSENSE; 2186 sc_xfer->sense.atapi_sense = ATACH_ERR(tfd); 2187 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2188 delay(10); /* some drives need a little delay here */ 2189 return ATASTART_ABORT; 2190 } 2191 2192 static void 2193 mvsata_atapi_poll(struct ata_channel *chp, struct ata_xfer *xfer) 2194 { 2195 /* 2196 * If there is no interrupt for CMD input, busy-wait for it (done in 2197 * the interrupt routine. If it is a polled command, call the interrupt 2198 * routine until command is done. 2199 */ 2200 const bool poll = ((xfer->c_scsipi->xs_control & XS_CTL_POLL) != 0); 2201 2202 /* Wait for at last 400ns for status bit to be valid */ 2203 DELAY(1); 2204 mvsata_atapi_intr(chp, xfer, 0); 2205 2206 if (!poll) 2207 return; 2208 2209 if (chp->ch_flags & ATACH_DMA_WAIT) { 2210 wdc_dmawait(chp, xfer, xfer->c_scsipi->timeout); 2211 chp->ch_flags &= ~ATACH_DMA_WAIT; 2212 } 2213 2214 while ((xfer->c_scsipi->xs_status & XS_STS_DONE) == 0) { 2215 /* Wait for at last 400ns for status bit to be valid */ 2216 DELAY(1); 2217 mvsata_atapi_intr(chp, xfer, 0); 2218 } 2219 } 2220 2221 static int 2222 mvsata_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 2223 { 2224 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2225 struct atac_softc *atac = chp->ch_atac; 2226 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2227 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2228 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2229 int len, phase, ire, error, retries=0, i; 2230 int tfd; 2231 void *cmd; 2232 2233 ata_channel_lock(chp); 2234 2235 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2236 ("%s:%d:%d: mvsata_atapi_intr\n", 2237 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 2238 2239 /* Is it not a transfer, but a control operation? */ 2240 if (drvp->state < READY) { 2241 aprint_error_dev(atac->atac_dev, 2242 "channel %d drive %d: bad state %d\n", 2243 chp->ch_channel, xfer->c_drive, drvp->state); 2244 panic("mvsata_atapi_intr: bad state"); 2245 } 2246 /* 2247 * If we missed an interrupt in a PIO transfer, reset and restart. 2248 * Don't try to continue transfer, we may have missed cycles. 2249 */ 2250 if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) { 2251 ata_channel_unlock(chp); 2252 sc_xfer->error = XS_TIMEOUT; 2253 mvsata_atapi_reset(chp, xfer); 2254 return 1; 2255 } 2256 2257 /* Ack interrupt done in wdc_wait_for_unbusy */ 2258 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 2259 if (wdc_wait_for_unbusy(chp, 2260 (irq == 0) ? sc_xfer->timeout : 0, AT_POLL, &tfd) == WDCWAIT_TOUT) { 2261 if (irq && (xfer->c_flags & C_TIMEOU) == 0) { 2262 ata_channel_unlock(chp); 2263 return 0; /* IRQ was not for us */ 2264 } 2265 aprint_error_dev(atac->atac_dev, 2266 "channel %d: device timeout, c_bcount=%d, c_skip=%d\n", 2267 chp->ch_channel, xfer->c_bcount, xfer->c_skip); 2268 if (xfer->c_flags & C_DMA) 2269 ata_dmaerr(drvp, 2270 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2271 sc_xfer->error = XS_TIMEOUT; 2272 ata_channel_unlock(chp); 2273 mvsata_atapi_reset(chp, xfer); 2274 return 1; 2275 } 2276 2277 /* 2278 * If we missed an IRQ and were using DMA, flag it as a DMA error 2279 * and reset device. 2280 */ 2281 if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) { 2282 ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2283 sc_xfer->error = XS_RESET; 2284 ata_channel_unlock(chp); 2285 mvsata_atapi_reset(chp, xfer); 2286 return (1); 2287 } 2288 /* 2289 * if the request sense command was aborted, report the short sense 2290 * previously recorded, else continue normal processing 2291 */ 2292 2293 again: 2294 len = MVSATA_WDC_READ_1(mvport, SRB_LBAM) + 2295 256 * MVSATA_WDC_READ_1(mvport, SRB_LBAH); 2296 ire = MVSATA_WDC_READ_1(mvport, SRB_SC); 2297 phase = (ire & (WDCI_CMD | WDCI_IN)) | (ATACH_ST(tfd) & WDCS_DRQ); 2298 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ( 2299 "mvsata_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x ire 0x%x :", 2300 xfer->c_bcount, len, ATACH_ST(tfd), ATACH_ERR(tfd), ire)); 2301 2302 switch (phase) { 2303 case PHASE_CMDOUT: 2304 cmd = sc_xfer->cmd; 2305 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ("PHASE_CMDOUT\n")); 2306 /* Init the DMA channel if necessary */ 2307 if (xfer->c_flags & C_DMA) { 2308 error = mvsata_bdma_init(mvport, xfer); 2309 if (error) { 2310 if (error == EINVAL) { 2311 /* 2312 * We can't do DMA on this transfer 2313 * for some reason. Fall back to PIO. 2314 */ 2315 xfer->c_flags &= ~C_DMA; 2316 error = 0; 2317 } else { 2318 sc_xfer->error = XS_DRIVER_STUFFUP; 2319 break; 2320 } 2321 } 2322 } 2323 2324 /* send packet command */ 2325 /* Commands are 12 or 16 bytes long. It's 32-bit aligned */ 2326 wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen); 2327 2328 /* Start the DMA channel if necessary */ 2329 if (xfer->c_flags & C_DMA) { 2330 mvsata_bdma_start(mvport); 2331 chp->ch_flags |= ATACH_DMA_WAIT; 2332 } 2333 ata_channel_unlock(chp); 2334 return 1; 2335 2336 case PHASE_DATAOUT: 2337 /* write data */ 2338 DPRINTF(DEBUG_XFERS, ("PHASE_DATAOUT\n")); 2339 if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 || 2340 (xfer->c_flags & C_DMA) != 0) { 2341 aprint_error_dev(atac->atac_dev, 2342 "channel %d drive %d: bad data phase DATAOUT\n", 2343 chp->ch_channel, xfer->c_drive); 2344 if (xfer->c_flags & C_DMA) 2345 ata_dmaerr(drvp, 2346 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2347 sc_xfer->error = XS_TIMEOUT; 2348 ata_channel_unlock(chp); 2349 mvsata_atapi_reset(chp, xfer); 2350 return 1; 2351 } 2352 xfer->c_atapi.c_lenoff = len - xfer->c_bcount; 2353 if (xfer->c_bcount < len) { 2354 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2355 " warning: write only %d of %d requested bytes\n", 2356 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2357 len); 2358 len = xfer->c_bcount; 2359 } 2360 2361 wdc->dataout_pio(chp, drvp->drive_flags, 2362 (char *)xfer->c_databuf + xfer->c_skip, len); 2363 2364 for (i = xfer->c_atapi.c_lenoff; i > 0; i -= 2) 2365 MVSATA_WDC_WRITE_2(mvport, SRB_PIOD, 0); 2366 2367 xfer->c_skip += len; 2368 xfer->c_bcount -= len; 2369 ata_channel_unlock(chp); 2370 return 1; 2371 2372 case PHASE_DATAIN: 2373 /* Read data */ 2374 DPRINTF(DEBUG_XFERS, ("PHASE_DATAIN\n")); 2375 if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 || 2376 (xfer->c_flags & C_DMA) != 0) { 2377 aprint_error_dev(atac->atac_dev, 2378 "channel %d drive %d: bad data phase DATAIN\n", 2379 chp->ch_channel, xfer->c_drive); 2380 if (xfer->c_flags & C_DMA) 2381 ata_dmaerr(drvp, 2382 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2383 ata_channel_unlock(chp); 2384 sc_xfer->error = XS_TIMEOUT; 2385 mvsata_atapi_reset(chp, xfer); 2386 return 1; 2387 } 2388 xfer->c_atapi.c_lenoff = len - xfer->c_bcount; 2389 if (xfer->c_bcount < len) { 2390 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2391 " warning: reading only %d of %d bytes\n", 2392 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2393 len); 2394 len = xfer->c_bcount; 2395 } 2396 2397 wdc->datain_pio(chp, drvp->drive_flags, 2398 (char *)xfer->c_databuf + xfer->c_skip, len); 2399 2400 if (xfer->c_atapi.c_lenoff > 0) 2401 wdcbit_bucket(chp, len - xfer->c_bcount); 2402 2403 xfer->c_skip += len; 2404 xfer->c_bcount -= len; 2405 ata_channel_unlock(chp); 2406 return 1; 2407 2408 case PHASE_ABORTED: 2409 case PHASE_COMPLETED: 2410 DPRINTF(DEBUG_XFERS, ("PHASE_COMPLETED\n")); 2411 if (xfer->c_flags & C_DMA) 2412 xfer->c_bcount -= sc_xfer->datalen; 2413 sc_xfer->resid = xfer->c_bcount; 2414 /* this will unlock channel lock too */ 2415 mvsata_atapi_phase_complete(xfer, tfd); 2416 return 1; 2417 2418 default: 2419 if (++retries<500) { 2420 DELAY(100); 2421 tfd = ATACH_ERR_ST( 2422 MVSATA_WDC_READ_1(mvport, SRB_FE), 2423 MVSATA_WDC_READ_1(mvport, SRB_CS) 2424 ); 2425 goto again; 2426 } 2427 aprint_error_dev(atac->atac_dev, 2428 "channel %d drive %d: unknown phase 0x%x\n", 2429 chp->ch_channel, xfer->c_drive, phase); 2430 if (ATACH_ST(tfd) & WDCS_ERR) { 2431 sc_xfer->error = XS_SHORTSENSE; 2432 sc_xfer->sense.atapi_sense = ATACH_ERR(tfd); 2433 } else { 2434 if (xfer->c_flags & C_DMA) 2435 ata_dmaerr(drvp, 2436 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2437 sc_xfer->error = XS_RESET; 2438 ata_channel_unlock(chp); 2439 mvsata_atapi_reset(chp, xfer); 2440 return (1); 2441 } 2442 } 2443 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2444 ("mvsata_atapi_intr: %s (end), error 0x%x " 2445 "sense 0x%x\n", __func__, 2446 sc_xfer->error, sc_xfer->sense.atapi_sense)); 2447 ata_channel_unlock(chp); 2448 mvsata_atapi_done(chp, xfer); 2449 return 1; 2450 } 2451 2452 static void 2453 mvsata_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 2454 int reason) 2455 { 2456 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2457 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2458 bool deactivate = true; 2459 2460 /* remove this command from xfer queue */ 2461 switch (reason) { 2462 case KILL_GONE_INACTIVE: 2463 deactivate = false; 2464 /* FALLTHROUGH */ 2465 case KILL_GONE: 2466 sc_xfer->error = XS_DRIVER_STUFFUP; 2467 break; 2468 case KILL_RESET: 2469 sc_xfer->error = XS_RESET; 2470 break; 2471 case KILL_REQUEUE: 2472 sc_xfer->error = XS_REQUEUE; 2473 break; 2474 default: 2475 aprint_error_dev(MVSATA_DEV2(mvport), 2476 "mvsata_atapi_kill_xfer: unknown reason %d\n", reason); 2477 panic("mvsata_atapi_kill_xfer"); 2478 } 2479 2480 if (deactivate) 2481 ata_deactivate_xfer(chp, xfer); 2482 2483 ata_free_xfer(chp, xfer); 2484 scsipi_done(sc_xfer); 2485 } 2486 2487 static void 2488 mvsata_atapi_reset(struct ata_channel *chp, struct ata_xfer *xfer) 2489 { 2490 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2491 struct atac_softc *atac = chp->ch_atac; 2492 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2493 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2494 int tfd; 2495 2496 ata_channel_lock(chp); 2497 2498 mvsata_pmp_select(mvport, xfer->c_drive); 2499 2500 wdccommandshort(chp, 0, ATAPI_SOFT_RESET); 2501 drvp->state = 0; 2502 if (wdc_wait_for_unbusy(chp, WDC_RESET_WAIT, AT_POLL, &tfd) != 0) { 2503 printf("%s:%d:%d: reset failed\n", device_xname(atac->atac_dev), 2504 chp->ch_channel, xfer->c_drive); 2505 sc_xfer->error = XS_SELTIMEOUT; 2506 } 2507 2508 ata_channel_unlock(chp); 2509 2510 mvsata_atapi_done(chp, xfer); 2511 return; 2512 } 2513 2514 static void 2515 mvsata_atapi_phase_complete(struct ata_xfer *xfer, int tfd) 2516 { 2517 struct ata_channel *chp = xfer->c_chp; 2518 struct atac_softc *atac = chp->ch_atac; 2519 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2520 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2521 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2522 2523 ata_channel_lock_owned(chp); 2524 2525 /* wait for DSC if needed */ 2526 if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) { 2527 DPRINTF(DEBUG_XFERS, 2528 ("%s:%d:%d: mvsata_atapi_phase_complete: polldsc %d\n", 2529 device_xname(atac->atac_dev), chp->ch_channel, 2530 xfer->c_drive, xfer->c_atapi.c_dscpoll)); 2531 if (cold) 2532 panic("mvsata_atapi_phase_complete: cold"); 2533 2534 if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10, AT_POLL, &tfd) == 2535 WDCWAIT_TOUT) { 2536 /* 10ms not enough, try again in 1 tick */ 2537 if (xfer->c_atapi.c_dscpoll++ > 2538 mstohz(sc_xfer->timeout)) { 2539 aprint_error_dev(atac->atac_dev, 2540 "channel %d: wait_for_dsc failed\n", 2541 chp->ch_channel); 2542 ata_channel_unlock(chp); 2543 sc_xfer->error = XS_TIMEOUT; 2544 mvsata_atapi_reset(chp, xfer); 2545 } else { 2546 callout_reset(&chp->c_timo_callout, 1, 2547 mvsata_atapi_polldsc, chp); 2548 ata_channel_unlock(chp); 2549 } 2550 return; 2551 } 2552 } 2553 2554 /* 2555 * Some drive occasionally set WDCS_ERR with 2556 * "ATA illegal length indication" in the error 2557 * register. If we read some data the sense is valid 2558 * anyway, so don't report the error. 2559 */ 2560 if (ATACH_ST(tfd) & WDCS_ERR && 2561 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 || 2562 sc_xfer->resid == sc_xfer->datalen)) { 2563 /* save the short sense */ 2564 sc_xfer->error = XS_SHORTSENSE; 2565 sc_xfer->sense.atapi_sense = ATACH_ERR(tfd); 2566 if ((sc_xfer->xs_periph->periph_quirks & PQUIRK_NOSENSE) == 0) { 2567 /* ask scsipi to send a REQUEST_SENSE */ 2568 sc_xfer->error = XS_BUSY; 2569 sc_xfer->status = SCSI_CHECK; 2570 } else 2571 if (wdc->dma_status & (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) { 2572 ata_dmaerr(drvp, 2573 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2574 sc_xfer->error = XS_RESET; 2575 ata_channel_unlock(chp); 2576 mvsata_atapi_reset(chp, xfer); 2577 return; 2578 } 2579 } 2580 if (xfer->c_bcount != 0) { 2581 DPRINTF(DEBUG_XFERS, ("%s:%d:%d: mvsata_atapi_intr:" 2582 " bcount value is %d after io\n", 2583 device_xname(atac->atac_dev), chp->ch_channel, 2584 xfer->c_drive, xfer->c_bcount)); 2585 } 2586 #ifdef DIAGNOSTIC 2587 if (xfer->c_bcount < 0) { 2588 aprint_error_dev(atac->atac_dev, 2589 "channel %d drive %d: mvsata_atapi_intr:" 2590 " warning: bcount value is %d after io\n", 2591 chp->ch_channel, xfer->c_drive, xfer->c_bcount); 2592 } 2593 #endif 2594 2595 DPRINTF(DEBUG_XFERS, 2596 ("%s:%d:%d: mvsata_atapi_phase_complete:" 2597 " mvsata_atapi_done(), error 0x%x sense 0x%x\n", 2598 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 2599 sc_xfer->error, sc_xfer->sense.atapi_sense)); 2600 ata_channel_unlock(chp); 2601 mvsata_atapi_done(chp, xfer); 2602 } 2603 2604 static void 2605 mvsata_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer) 2606 { 2607 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2608 bool iserror = (sc_xfer->error != XS_NOERROR); 2609 2610 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2611 ("%s:%d:%d: mvsata_atapi_done: flags 0x%x\n", 2612 device_xname(chp->ch_atac->atac_dev), chp->ch_channel, 2613 xfer->c_drive, (u_int)xfer->c_flags)); 2614 2615 if (ata_waitdrain_xfer_check(chp, xfer)) 2616 return; 2617 2618 /* mark controller inactive and free the command */ 2619 ata_deactivate_xfer(chp, xfer); 2620 2621 ata_free_xfer(chp, xfer); 2622 2623 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2624 ("%s:%d: mvsata_atapi_done: scsipi_done\n", 2625 device_xname(chp->ch_atac->atac_dev), chp->ch_channel)); 2626 scsipi_done(sc_xfer); 2627 DPRINTF(DEBUG_FUNCS, 2628 ("%s:%d: atastart from wdc_atapi_done, flags 0x%x\n", 2629 device_xname(chp->ch_atac->atac_dev), chp->ch_channel, 2630 chp->ch_flags)); 2631 if (!iserror) 2632 atastart(chp); 2633 } 2634 2635 static void 2636 mvsata_atapi_polldsc(void *arg) 2637 { 2638 struct ata_channel *chp = arg; 2639 struct ata_xfer *xfer = ata_queue_get_active_xfer(chp); 2640 2641 KASSERT(xfer != NULL); 2642 2643 ata_channel_lock(chp); 2644 2645 /* this will unlock channel lock too */ 2646 mvsata_atapi_phase_complete(xfer, 0); 2647 } 2648 #endif /* NATAPIBUS > 0 */ 2649 2650 2651 /* 2652 * XXXX: Shall we need lock for race condition in mvsata_edma_enqueue{,_gen2}(), 2653 * if supported queuing command by atabus? The race condition will not happen 2654 * if this is called only to the thread of atabus. 2655 */ 2656 static int 2657 mvsata_edma_enqueue(struct mvsata_port *mvport, struct ata_xfer *xfer) 2658 { 2659 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2660 struct ata_bio *ata_bio = &xfer->c_bio; 2661 void *databuf = (uint8_t *)xfer->c_databuf + xfer->c_skip; 2662 struct eprd *eprd; 2663 bus_addr_t crqb_base_addr; 2664 bus_dmamap_t data_dmamap; 2665 uint32_t reg; 2666 int erqqip, erqqop, next, rv, i; 2667 2668 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, ("%s:%d:%d: mvsata_edma_enqueue:" 2669 " blkno=0x%" PRIx64 ", nbytes=%d, flags=0x%x\n", 2670 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2671 mvport->port, ata_bio->blkno, ata_bio->nbytes, ata_bio->flags)); 2672 2673 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2674 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2675 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP); 2676 erqqip = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2677 next = erqqip; 2678 MVSATA_EDMAQ_INC(next); 2679 if (next == erqqop) { 2680 /* queue full */ 2681 return EBUSY; 2682 } 2683 DPRINTF(DEBUG_XFERS, 2684 (" erqqip=%d, quetag=%d\n", erqqip, xfer->c_slot)); 2685 2686 rv = mvsata_dma_bufload(mvport, xfer->c_slot, databuf, ata_bio->nbytes, 2687 ata_bio->flags); 2688 if (rv != 0) 2689 return rv; 2690 2691 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2692 data_dmamap = mvport->port_reqtbl[xfer->c_slot].data_dmamap; 2693 eprd = mvport->port_reqtbl[xfer->c_slot].eprd; 2694 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2695 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2696 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2697 2698 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2699 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2700 eprd->eot = htole16(0); 2701 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2702 eprd++; 2703 } 2704 (eprd - 1)->eot |= htole16(EPRD_EOT); 2705 #ifdef MVSATA_DEBUG 2706 if (mvsata_debug >= 3) 2707 mvsata_print_eprd(mvport, xfer->c_slot); 2708 #endif 2709 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2710 mvport->port_reqtbl[xfer->c_slot].eprd_offset, MVSATA_EPRD_MAX_SIZE, 2711 BUS_DMASYNC_PREWRITE); 2712 2713 /* setup EDMA Command Request Block (CRQB) Data */ 2714 sc->sc_edma_setup_crqb(mvport, erqqip, xfer); 2715 #ifdef MVSATA_DEBUG 2716 if (mvsata_debug >= 3) 2717 mvsata_print_crqb(mvport, erqqip); 2718 #endif 2719 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 2720 erqqip * sizeof(union mvsata_crqb), 2721 sizeof(union mvsata_crqb), BUS_DMASYNC_PREWRITE); 2722 2723 MVSATA_EDMAQ_INC(erqqip); 2724 2725 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2726 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2727 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2728 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2729 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2730 2731 return 0; 2732 } 2733 2734 static int 2735 mvsata_edma_handle(struct mvsata_port *mvport, struct ata_xfer *xfer1) 2736 { 2737 struct ata_channel *chp = &mvport->port_ata_channel; 2738 struct crpb *crpb; 2739 struct ata_bio *ata_bio; 2740 struct ata_xfer *xfer; 2741 uint32_t reg; 2742 int erqqop, erpqip, erpqop, prev_erpqop, quetag, handled = 0, n; 2743 int st, dmaerr; 2744 2745 /* First, Sync for Request Queue buffer */ 2746 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2747 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2748 if (mvport->port_prev_erqqop != erqqop) { 2749 const int s = sizeof(union mvsata_crqb); 2750 2751 if (mvport->port_prev_erqqop < erqqop) 2752 n = erqqop - mvport->port_prev_erqqop; 2753 else { 2754 if (erqqop > 0) 2755 bus_dmamap_sync(mvport->port_dmat, 2756 mvport->port_crqb_dmamap, 0, erqqop * s, 2757 BUS_DMASYNC_POSTWRITE); 2758 n = MVSATA_EDMAQ_LEN - mvport->port_prev_erqqop; 2759 } 2760 if (n > 0) 2761 bus_dmamap_sync(mvport->port_dmat, 2762 mvport->port_crqb_dmamap, 2763 mvport->port_prev_erqqop * s, n * s, 2764 BUS_DMASYNC_POSTWRITE); 2765 mvport->port_prev_erqqop = erqqop; 2766 } 2767 2768 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQIP); 2769 erpqip = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2770 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQOP); 2771 erpqop = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2772 2773 DPRINTF(DEBUG_XFERS, 2774 ("%s:%d:%d: mvsata_edma_handle: erpqip=%d, erpqop=%d\n", 2775 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2776 mvport->port, erpqip, erpqop)); 2777 2778 if (erpqop == erpqip) 2779 return 0; 2780 2781 if (erpqop < erpqip) 2782 n = erpqip - erpqop; 2783 else { 2784 if (erpqip > 0) 2785 bus_dmamap_sync(mvport->port_dmat, 2786 mvport->port_crpb_dmamap, 2787 0, erpqip * sizeof(struct crpb), 2788 BUS_DMASYNC_POSTREAD); 2789 n = MVSATA_EDMAQ_LEN - erpqop; 2790 } 2791 if (n > 0) 2792 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2793 erpqop * sizeof(struct crpb), 2794 n * sizeof(struct crpb), BUS_DMASYNC_POSTREAD); 2795 2796 uint32_t aslots = ata_queue_active(chp); 2797 2798 prev_erpqop = erpqop; 2799 while (erpqop != erpqip) { 2800 #ifdef MVSATA_DEBUG 2801 if (mvsata_debug >= 3) 2802 mvsata_print_crpb(mvport, erpqop); 2803 #endif 2804 crpb = mvport->port_crpb + erpqop; 2805 MVSATA_EDMAQ_INC(erpqop); 2806 2807 quetag = CRPB_CHOSTQUETAG(le16toh(crpb->id)); 2808 2809 if ((aslots & __BIT(quetag)) == 0) { 2810 /* not actually executing */ 2811 continue; 2812 } 2813 2814 xfer = ata_queue_hwslot_to_xfer(chp, quetag); 2815 2816 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2817 mvport->port_reqtbl[xfer->c_slot].eprd_offset, 2818 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2819 2820 st = CRPB_CDEVSTS(le16toh(crpb->rspflg)); 2821 dmaerr = CRPB_CEDMASTS(le16toh(crpb->rspflg)); 2822 2823 ata_bio = &xfer->c_bio; 2824 ata_bio->error = NOERROR; 2825 if (dmaerr != 0) 2826 ata_bio->error = ERR_DMA; 2827 2828 mvsata_dma_bufunload(mvport, quetag, ata_bio->flags); 2829 2830 KASSERT(xfer->c_flags & C_DMA); 2831 mvsata_bio_intr(chp, xfer, ATACH_ERR_ST(0, st)); 2832 2833 if (xfer1 == NULL) 2834 handled++; 2835 else if (xfer == xfer1) { 2836 handled = 1; 2837 break; 2838 } 2839 } 2840 if (prev_erpqop < erpqop) 2841 n = erpqop - prev_erpqop; 2842 else { 2843 if (erpqop > 0) 2844 bus_dmamap_sync(mvport->port_dmat, 2845 mvport->port_crpb_dmamap, 0, 2846 erpqop * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2847 n = MVSATA_EDMAQ_LEN - prev_erpqop; 2848 } 2849 if (n > 0) 2850 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2851 prev_erpqop * sizeof(struct crpb), 2852 n * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2853 2854 reg &= ~EDMA_RESQP_ERPQP_MASK; 2855 reg |= (erpqop << EDMA_RESQP_ERPQP_SHIFT); 2856 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, reg); 2857 2858 return handled; 2859 } 2860 2861 static int 2862 mvsata_edma_wait(struct mvsata_port *mvport, struct ata_xfer *xfer, int timeout) 2863 { 2864 int xtime; 2865 2866 for (xtime = 0; xtime < timeout * 10; xtime++) { 2867 if (mvsata_edma_handle(mvport, xfer)) 2868 return 0; 2869 DELAY(100); 2870 } 2871 2872 DPRINTF(DEBUG_FUNCS, ("%s: timeout: %p\n", __func__, xfer)); 2873 mvsata_edma_rqq_remove(mvport, xfer); 2874 xfer->c_flags |= C_TIMEOU; 2875 return 1; 2876 } 2877 2878 static void 2879 mvsata_edma_rqq_remove(struct mvsata_port *mvport, struct ata_xfer *xfer) 2880 { 2881 struct ata_channel *chp = &mvport->port_ata_channel; 2882 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2883 bus_addr_t crqb_base_addr; 2884 int erqqip, i; 2885 2886 /* First, hardware reset, stop EDMA */ 2887 mvsata_hreset_port(mvport); 2888 2889 /* cleanup completed EDMA safely */ 2890 mvsata_edma_handle(mvport, NULL); 2891 2892 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2893 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, BUS_DMASYNC_PREWRITE); 2894 2895 uint32_t aslots = ata_queue_active(chp); 2896 2897 for (i = 0, erqqip = 0; i < MVSATA_EDMAQ_LEN; i++) { 2898 struct ata_xfer *rqxfer; 2899 2900 if ((aslots & __BIT(i)) == 0) 2901 continue; 2902 2903 if (i == xfer->c_slot) { 2904 /* remove xfer from EDMA request queue */ 2905 bus_dmamap_sync(mvport->port_dmat, 2906 mvport->port_eprd_dmamap, 2907 mvport->port_reqtbl[i].eprd_offset, 2908 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2909 mvsata_dma_bufunload(mvport, i, xfer->c_bio.flags); 2910 /* quetag freed by caller later */ 2911 continue; 2912 } 2913 2914 rqxfer = ata_queue_hwslot_to_xfer(chp, i); 2915 sc->sc_edma_setup_crqb(mvport, erqqip, rqxfer); 2916 erqqip++; 2917 } 2918 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2919 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, 2920 BUS_DMASYNC_POSTWRITE); 2921 2922 mvsata_edma_config(mvport, mvport->port_edmamode_curr); 2923 mvsata_edma_reset_qptr(mvport); 2924 mvsata_edma_enable(mvport); 2925 2926 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2927 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2928 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2929 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2930 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2931 } 2932 2933 #if NATAPIBUS > 0 2934 static int 2935 mvsata_bdma_init(struct mvsata_port *mvport, struct ata_xfer *xfer) 2936 { 2937 struct scsipi_xfer *sc_xfer = xfer->c_scsipi; 2938 struct eprd *eprd; 2939 bus_dmamap_t data_dmamap; 2940 bus_addr_t eprd_addr; 2941 int i, rv; 2942 void *databuf = (uint8_t *)xfer->c_databuf + xfer->c_skip; 2943 2944 DPRINTF(DEBUG_FUNCS|DEBUG_XFERS, 2945 ("%s:%d:%d: mvsata_bdma_init: datalen=%d, xs_control=0x%x\n", 2946 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2947 mvport->port, sc_xfer->datalen, sc_xfer->xs_control)); 2948 2949 rv = mvsata_dma_bufload(mvport, xfer->c_slot, databuf, 2950 sc_xfer->datalen, 2951 sc_xfer->xs_control & XS_CTL_DATA_IN ? ATA_READ : 0); 2952 if (rv != 0) 2953 return rv; 2954 2955 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2956 data_dmamap = mvport->port_reqtbl[xfer->c_slot].data_dmamap; 2957 eprd = mvport->port_reqtbl[xfer->c_slot].eprd; 2958 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2959 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2960 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2961 2962 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2963 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2964 eprd->eot = htole16(0); 2965 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2966 eprd++; 2967 } 2968 (eprd - 1)->eot |= htole16(EPRD_EOT); 2969 #ifdef MVSATA_DEBUG 2970 if (mvsata_debug >= 3) 2971 mvsata_print_eprd(mvport, xfer->c_slot); 2972 #endif 2973 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2974 mvport->port_reqtbl[xfer->c_slot].eprd_offset, 2975 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_PREWRITE); 2976 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 2977 mvport->port_reqtbl[xfer->c_slot].eprd_offset; 2978 2979 MVSATA_EDMA_WRITE_4(mvport, DMA_DTLBA, eprd_addr & DMA_DTLBA_MASK); 2980 MVSATA_EDMA_WRITE_4(mvport, DMA_DTHBA, (eprd_addr >> 16) >> 16); 2981 2982 if (sc_xfer->xs_control & XS_CTL_DATA_IN) 2983 MVSATA_EDMA_WRITE_4(mvport, DMA_C, DMA_C_READ); 2984 else 2985 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 0); 2986 2987 return 0; 2988 } 2989 2990 static void 2991 mvsata_bdma_start(struct mvsata_port *mvport) 2992 { 2993 2994 #ifdef MVSATA_DEBUG 2995 if (mvsata_debug >= 3) 2996 mvsata_print_eprd(mvport, 0); 2997 #endif 2998 2999 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 3000 MVSATA_EDMA_READ_4(mvport, DMA_C) | DMA_C_START); 3001 } 3002 #endif 3003 #endif 3004 3005 3006 static int 3007 mvsata_port_init(struct mvsata_hc *mvhc, int port) 3008 { 3009 struct mvsata_softc *sc = mvhc->hc_sc; 3010 struct mvsata_port *mvport; 3011 struct ata_channel *chp; 3012 int channel, rv, i; 3013 const int crqbq_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 3014 const int crpbq_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 3015 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 3016 3017 mvport = malloc(sizeof(struct mvsata_port), M_DEVBUF, 3018 M_ZERO | M_WAITOK); 3019 mvport->port = port; 3020 mvport->port_hc = mvhc; 3021 mvport->port_edmamode_negotiated = nodma; 3022 3023 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 3024 EDMA_REGISTERS_OFFSET + port * EDMA_REGISTERS_SIZE, 3025 EDMA_REGISTERS_SIZE, &mvport->port_ioh); 3026 if (rv != 0) { 3027 aprint_error("%s:%d: can't subregion EDMA %d registers\n", 3028 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 3029 goto fail0; 3030 } 3031 mvport->port_iot = mvhc->hc_iot; 3032 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SS, 4, 3033 &mvport->port_sata_sstatus); 3034 if (rv != 0) { 3035 aprint_error("%s:%d:%d: couldn't subregion sstatus regs\n", 3036 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 3037 goto fail0; 3038 } 3039 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SE, 4, 3040 &mvport->port_sata_serror); 3041 if (rv != 0) { 3042 aprint_error("%s:%d:%d: couldn't subregion serror regs\n", 3043 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 3044 goto fail0; 3045 } 3046 if (sc->sc_rev == gen1) 3047 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 3048 SATAHC_I_R02(port), 4, &mvport->port_sata_scontrol); 3049 else 3050 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3051 SATA_SC, 4, &mvport->port_sata_scontrol); 3052 if (rv != 0) { 3053 aprint_error("%s:%d:%d: couldn't subregion scontrol regs\n", 3054 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 3055 goto fail0; 3056 } 3057 mvport->port_dmat = sc->sc_dmat; 3058 mvhc->hc_ports[port] = mvport; 3059 3060 channel = mvhc->hc * sc->sc_port + port; 3061 chp = &mvport->port_ata_channel; 3062 chp->ch_channel = channel; 3063 chp->ch_atac = &sc->sc_wdcdev.sc_atac; 3064 chp->ch_queue = ata_queue_alloc(MVSATA_EDMAQ_LEN); 3065 sc->sc_ata_channels[channel] = chp; 3066 3067 rv = mvsata_wdc_reg_init(mvport, sc->sc_wdcdev.regs + channel); 3068 if (rv != 0) 3069 goto fail0; 3070 3071 rv = bus_dmamap_create(mvport->port_dmat, crqbq_size, 1, crqbq_size, 0, 3072 BUS_DMA_NOWAIT, &mvport->port_crqb_dmamap); 3073 if (rv != 0) { 3074 aprint_error( 3075 "%s:%d:%d: EDMA CRQB map create failed: error=%d\n", 3076 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 3077 goto fail0; 3078 } 3079 rv = bus_dmamap_create(mvport->port_dmat, crpbq_size, 1, crpbq_size, 0, 3080 BUS_DMA_NOWAIT, &mvport->port_crpb_dmamap); 3081 if (rv != 0) { 3082 aprint_error( 3083 "%s:%d:%d: EDMA CRPB map create failed: error=%d\n", 3084 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 3085 goto fail1; 3086 } 3087 rv = bus_dmamap_create(mvport->port_dmat, eprd_buf_size, 1, 3088 eprd_buf_size, 0, BUS_DMA_NOWAIT, &mvport->port_eprd_dmamap); 3089 if (rv != 0) { 3090 aprint_error( 3091 "%s:%d:%d: EDMA ePRD buffer map create failed: error=%d\n", 3092 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 3093 goto fail2; 3094 } 3095 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 3096 rv = bus_dmamap_create(mvport->port_dmat, MAXPHYS, 3097 MVSATA_MAX_SEGS, MAXPHYS, 0, BUS_DMA_NOWAIT, 3098 &mvport->port_reqtbl[i].data_dmamap); 3099 if (rv != 0) { 3100 aprint_error("%s:%d:%d:" 3101 " EDMA data map(%d) create failed: error=%d\n", 3102 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, i, 3103 rv); 3104 goto fail3; 3105 } 3106 } 3107 3108 return 0; 3109 3110 fail3: 3111 for (i--; i >= 0; i--) 3112 bus_dmamap_destroy(mvport->port_dmat, 3113 mvport->port_reqtbl[i].data_dmamap); 3114 bus_dmamap_destroy(mvport->port_dmat, mvport->port_eprd_dmamap); 3115 fail2: 3116 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crpb_dmamap); 3117 fail1: 3118 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crqb_dmamap); 3119 fail0: 3120 return rv; 3121 } 3122 3123 static int 3124 mvsata_wdc_reg_init(struct mvsata_port *mvport, struct wdc_regs *wdr) 3125 { 3126 int hc, port, rv, i; 3127 3128 hc = mvport->port_hc->hc; 3129 port = mvport->port; 3130 3131 /* Create subregion for Shadow Registers Map */ 3132 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3133 SHADOW_REG_BLOCK_OFFSET, SHADOW_REG_BLOCK_SIZE, &wdr->cmd_baseioh); 3134 if (rv != 0) { 3135 aprint_error("%s:%d:%d: couldn't subregion shadow block regs\n", 3136 device_xname(MVSATA_DEV2(mvport)), hc, port); 3137 return rv; 3138 } 3139 wdr->cmd_iot = mvport->port_iot; 3140 3141 /* Once create subregion for each command registers */ 3142 for (i = 0; i < WDC_NREG; i++) { 3143 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 3144 i * 4, sizeof(uint32_t), &wdr->cmd_iohs[i]); 3145 if (rv != 0) { 3146 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 3147 device_xname(MVSATA_DEV2(mvport)), hc, port); 3148 return rv; 3149 } 3150 } 3151 /* Create subregion for Alternate Status register */ 3152 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 3153 i * 4, sizeof(uint32_t), &wdr->ctl_ioh); 3154 if (rv != 0) { 3155 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 3156 device_xname(MVSATA_DEV2(mvport)), hc, port); 3157 return rv; 3158 } 3159 wdr->ctl_iot = mvport->port_iot; 3160 3161 wdc_init_shadow_regs(wdr); 3162 3163 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3164 SATA_SS, sizeof(uint32_t) * 3, &wdr->sata_baseioh); 3165 if (rv != 0) { 3166 aprint_error("%s:%d:%d: couldn't subregion SATA regs\n", 3167 device_xname(MVSATA_DEV2(mvport)), hc, port); 3168 return rv; 3169 } 3170 wdr->sata_iot = mvport->port_iot; 3171 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3172 SATA_SC, sizeof(uint32_t), &wdr->sata_control); 3173 if (rv != 0) { 3174 aprint_error("%s:%d:%d: couldn't subregion SControl\n", 3175 device_xname(MVSATA_DEV2(mvport)), hc, port); 3176 return rv; 3177 } 3178 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3179 SATA_SS, sizeof(uint32_t), &wdr->sata_status); 3180 if (rv != 0) { 3181 aprint_error("%s:%d:%d: couldn't subregion SStatus\n", 3182 device_xname(MVSATA_DEV2(mvport)), hc, port); 3183 return rv; 3184 } 3185 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3186 SATA_SE, sizeof(uint32_t), &wdr->sata_error); 3187 if (rv != 0) { 3188 aprint_error("%s:%d:%d: couldn't subregion SError\n", 3189 device_xname(MVSATA_DEV2(mvport)), hc, port); 3190 return rv; 3191 } 3192 3193 return 0; 3194 } 3195 3196 3197 #ifndef MVSATA_WITHOUTDMA 3198 static void * 3199 mvsata_edma_resource_prepare(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3200 bus_dmamap_t *dmamap, size_t size, int write) 3201 { 3202 bus_dma_segment_t seg; 3203 int nseg, rv; 3204 void *kva; 3205 3206 rv = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &seg, 1, &nseg, 3207 BUS_DMA_NOWAIT); 3208 if (rv != 0) { 3209 aprint_error("%s:%d:%d: DMA memory alloc failed: error=%d\n", 3210 device_xname(MVSATA_DEV2(mvport)), 3211 mvport->port_hc->hc, mvport->port, rv); 3212 goto fail; 3213 } 3214 3215 rv = bus_dmamem_map(dmat, &seg, nseg, size, &kva, BUS_DMA_NOWAIT); 3216 if (rv != 0) { 3217 aprint_error("%s:%d:%d: DMA memory map failed: error=%d\n", 3218 device_xname(MVSATA_DEV2(mvport)), 3219 mvport->port_hc->hc, mvport->port, rv); 3220 goto free; 3221 } 3222 3223 rv = bus_dmamap_load(dmat, *dmamap, kva, size, NULL, 3224 BUS_DMA_NOWAIT | (write ? BUS_DMA_WRITE : BUS_DMA_READ)); 3225 if (rv != 0) { 3226 aprint_error("%s:%d:%d: DMA map load failed: error=%d\n", 3227 device_xname(MVSATA_DEV2(mvport)), 3228 mvport->port_hc->hc, mvport->port, rv); 3229 goto unmap; 3230 } 3231 3232 if (!write) 3233 bus_dmamap_sync(dmat, *dmamap, 0, size, BUS_DMASYNC_PREREAD); 3234 3235 return kva; 3236 3237 unmap: 3238 bus_dmamem_unmap(dmat, kva, size); 3239 free: 3240 bus_dmamem_free(dmat, &seg, nseg); 3241 fail: 3242 return NULL; 3243 } 3244 3245 /* ARGSUSED */ 3246 static void 3247 mvsata_edma_resource_purge(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3248 bus_dmamap_t dmamap, void *kva) 3249 { 3250 3251 bus_dmamap_unload(dmat, dmamap); 3252 bus_dmamem_unmap(dmat, kva, dmamap->dm_mapsize); 3253 bus_dmamem_free(dmat, dmamap->dm_segs, dmamap->dm_nsegs); 3254 } 3255 3256 static int 3257 mvsata_dma_bufload(struct mvsata_port *mvport, int index, void *databuf, 3258 size_t datalen, int flags) 3259 { 3260 int rv, lop, sop; 3261 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3262 3263 lop = (flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE; 3264 sop = (flags & ATA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 3265 3266 rv = bus_dmamap_load(mvport->port_dmat, data_dmamap, databuf, datalen, 3267 NULL, BUS_DMA_NOWAIT | lop); 3268 if (rv) { 3269 aprint_error("%s:%d:%d: buffer load failed: error=%d\n", 3270 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 3271 mvport->port, rv); 3272 return rv; 3273 } 3274 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3275 data_dmamap->dm_mapsize, sop); 3276 3277 return 0; 3278 } 3279 3280 static inline void 3281 mvsata_dma_bufunload(struct mvsata_port *mvport, int index, int flags) 3282 { 3283 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3284 3285 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3286 data_dmamap->dm_mapsize, 3287 (flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3288 bus_dmamap_unload(mvport->port_dmat, data_dmamap); 3289 } 3290 #endif 3291 3292 static void 3293 mvsata_hreset_port(struct mvsata_port *mvport) 3294 { 3295 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3296 3297 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EATARST); 3298 3299 delay(25); /* allow reset propagation */ 3300 3301 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3302 3303 mvport->_fix_phy_param._fix_phy(mvport); 3304 3305 if (sc->sc_gen == gen1) 3306 delay(1000); 3307 } 3308 3309 static void 3310 mvsata_reset_port(struct mvsata_port *mvport) 3311 { 3312 device_t parent = device_parent(MVSATA_DEV2(mvport)); 3313 3314 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3315 3316 mvsata_hreset_port(mvport); 3317 3318 if (device_is_a(parent, "pci")) 3319 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3320 EDMA_CFG_RESERVED | EDMA_CFG_ERDBSZ); 3321 else /* SoC */ 3322 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3323 EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2); 3324 MVSATA_EDMA_WRITE_4(mvport, EDMA_T, 0); 3325 MVSATA_EDMA_WRITE_4(mvport, SATA_SEIM, 0x019c0000); 3326 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, ~0); 3327 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 0); 3328 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, 0); 3329 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, 0); 3330 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3331 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3332 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3333 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, 0); 3334 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3335 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, 0); 3336 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3337 MVSATA_EDMA_WRITE_4(mvport, EDMA_TC, 0); 3338 MVSATA_EDMA_WRITE_4(mvport, EDMA_IORT, 0xbc); 3339 } 3340 3341 static void 3342 mvsata_reset_hc(struct mvsata_hc *mvhc) 3343 { 3344 #if 0 3345 uint32_t val; 3346 #endif 3347 3348 MVSATA_HC_WRITE_4(mvhc, SATAHC_ICT, 0); 3349 MVSATA_HC_WRITE_4(mvhc, SATAHC_ITT, 0); 3350 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 0); 3351 3352 #if 0 /* XXXX needs? */ 3353 MVSATA_HC_WRITE_4(mvhc, 0x01c, 0); 3354 3355 /* 3356 * Keep the SS during power on and the reference clock bits (reset 3357 * sample) 3358 */ 3359 val = MVSATA_HC_READ_4(mvhc, 0x020); 3360 val &= 0x1c1c1c1c; 3361 val |= 0x03030303; 3362 MVSATA_HC_READ_4(mvhc, 0x020, 0); 3363 #endif 3364 } 3365 3366 static uint32_t 3367 mvsata_softreset(struct mvsata_port *mvport, int flags) 3368 { 3369 struct ata_channel *chp = &mvport->port_ata_channel; 3370 uint32_t sig0 = ~0; 3371 int timeout; 3372 uint8_t st0; 3373 3374 ata_channel_lock_owned(chp); 3375 3376 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_RST | WDCTL_IDS | WDCTL_4BIT); 3377 delay(10); 3378 (void) MVSATA_WDC_READ_1(mvport, SRB_FE); 3379 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_IDS | WDCTL_4BIT); 3380 delay(10); 3381 3382 /* wait for BSY to deassert */ 3383 for (timeout = 0; timeout < WDC_RESET_WAIT / 10; timeout++) { 3384 st0 = MVSATA_WDC_READ_1(mvport, SRB_CS); 3385 3386 if ((st0 & WDCS_BSY) == 0) { 3387 sig0 = MVSATA_WDC_READ_1(mvport, SRB_SC) << 0; 3388 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 8; 3389 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 16; 3390 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 24; 3391 goto out; 3392 } 3393 ata_delay(chp, 10, "atarst", flags); 3394 } 3395 3396 aprint_error("%s:%d:%d: %s: timeout\n", 3397 device_xname(MVSATA_DEV2(mvport)), 3398 mvport->port_hc->hc, mvport->port, __func__); 3399 3400 out: 3401 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 3402 return sig0; 3403 } 3404 3405 #ifndef MVSATA_WITHOUTDMA 3406 static void 3407 mvsata_edma_reset_qptr(struct mvsata_port *mvport) 3408 { 3409 const bus_addr_t crpb_addr = 3410 mvport->port_crpb_dmamap->dm_segs[0].ds_addr; 3411 const uint32_t crpb_addr_mask = 3412 EDMA_RESQP_ERPQBAP_MASK | EDMA_RESQP_ERPQBA_MASK; 3413 3414 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3415 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3416 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3417 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, (crpb_addr >> 16) >> 16); 3418 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3419 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, (crpb_addr & crpb_addr_mask)); 3420 } 3421 3422 static inline void 3423 mvsata_edma_enable(struct mvsata_port *mvport) 3424 { 3425 3426 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EENEDMA); 3427 } 3428 3429 static void 3430 mvsata_edma_disable(struct mvsata_port *mvport, int timeout, int wflags) 3431 { 3432 struct ata_channel *chp = &mvport->port_ata_channel; 3433 uint32_t command; 3434 int t; 3435 3436 ata_channel_lock_owned(chp); 3437 3438 /* The disable bit (eDsEDMA) is self negated. */ 3439 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3440 3441 timeout = mstohz(timeout + hztoms(1) - 1); 3442 3443 for (t = 0; ; ++t) { 3444 command = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 3445 if (!(command & EDMA_CMD_EENEDMA)) 3446 return; 3447 if (t >= timeout) 3448 break; 3449 ata_delay(chp, hztoms(1), "mvsata_edma2", wflags); 3450 } 3451 3452 aprint_error("%s:%d:%d: unable to disable EDMA\n", 3453 device_xname(MVSATA_DEV2(mvport)), 3454 mvport->port_hc->hc, mvport->port); 3455 } 3456 3457 /* 3458 * Set EDMA registers according to mode. 3459 * ex. NCQ/TCQ(queued)/non queued. 3460 */ 3461 static void 3462 mvsata_edma_config(struct mvsata_port *mvport, enum mvsata_edmamode mode) 3463 { 3464 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3465 uint32_t reg; 3466 3467 reg = MVSATA_EDMA_READ_4(mvport, EDMA_CFG); 3468 reg |= EDMA_CFG_RESERVED; 3469 3470 if (mode == ncq) { 3471 if (sc->sc_gen == gen1) { 3472 aprint_error_dev(MVSATA_DEV2(mvport), 3473 "GenI not support NCQ\n"); 3474 return; 3475 } else if (sc->sc_gen == gen2) 3476 reg |= EDMA_CFG_EDEVERR; 3477 reg |= EDMA_CFG_ESATANATVCMDQUE; 3478 } else if (mode == queued) { 3479 reg &= ~EDMA_CFG_ESATANATVCMDQUE; 3480 reg |= EDMA_CFG_EQUE; 3481 } else 3482 reg &= ~(EDMA_CFG_ESATANATVCMDQUE | EDMA_CFG_EQUE); 3483 3484 if (sc->sc_gen == gen1) 3485 reg |= EDMA_CFG_ERDBSZ; 3486 else if (sc->sc_gen == gen2) 3487 reg |= (EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN); 3488 else if (sc->sc_gen == gen2e) { 3489 device_t parent = device_parent(MVSATA_DEV(sc)); 3490 3491 reg |= (EDMA_CFG_EMASKRXPM | EDMA_CFG_EHOSTQUEUECACHEEN); 3492 reg &= ~(EDMA_CFG_EEDMAFBS | EDMA_CFG_EEDMAQUELEN); 3493 3494 if (device_is_a(parent, "pci")) 3495 reg |= ( 3496 #if NATAPIBUS > 0 3497 EDMA_CFG_EEARLYCOMPLETIONEN | 3498 #endif 3499 EDMA_CFG_ECUTTHROUGHEN | 3500 EDMA_CFG_EWRBUFFERLEN | 3501 EDMA_CFG_ERDBSZEXT); 3502 } 3503 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, reg); 3504 3505 reg = ( 3506 EDMA_IE_EIORDYERR | 3507 EDMA_IE_ETRANSINT | 3508 EDMA_IE_EDEVCON | 3509 EDMA_IE_EDEVDIS); 3510 if (sc->sc_gen != gen1) 3511 reg |= ( 3512 EDMA_IE_TRANSPROTERR | 3513 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKTXERR_FISTXABORTED) | 3514 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3515 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3516 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3517 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_SATACRC) | 3518 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3519 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3520 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3521 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3522 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3523 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3524 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_SATACRC) | 3525 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3526 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3527 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3528 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_SATACRC) | 3529 EDMA_IE_ESELFDIS); 3530 3531 if (mode == ncq) 3532 reg |= EDMA_IE_EDEVERR; 3533 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, reg); 3534 reg = MVSATA_EDMA_READ_4(mvport, EDMA_HC); 3535 reg &= ~EDMA_IE_EDEVERR; 3536 if (mode != ncq) 3537 reg |= EDMA_IE_EDEVERR; 3538 MVSATA_EDMA_WRITE_4(mvport, EDMA_HC, reg); 3539 if (sc->sc_gen == gen2e) { 3540 /* 3541 * Clear FISWait4HostRdyEn[0] and [2]. 3542 * [0]: Device to Host FIS with <ERR> or <DF> bit set to 1. 3543 * [2]: SDB FIS is received with <ERR> bit set to 1. 3544 */ 3545 reg = MVSATA_EDMA_READ_4(mvport, SATA_FISC); 3546 reg &= ~(SATA_FISC_FISWAIT4HOSTRDYEN_B0 | 3547 SATA_FISC_FISWAIT4HOSTRDYEN_B2); 3548 MVSATA_EDMA_WRITE_4(mvport, SATA_FISC, reg); 3549 } 3550 3551 mvport->port_edmamode_curr = mode; 3552 } 3553 3554 3555 /* 3556 * Generation dependent functions 3557 */ 3558 3559 static void 3560 mvsata_edma_setup_crqb(struct mvsata_port *mvport, int erqqip, 3561 struct ata_xfer *xfer) 3562 { 3563 struct crqb *crqb; 3564 bus_addr_t eprd_addr; 3565 daddr_t blkno; 3566 uint32_t rw; 3567 uint8_t cmd, head; 3568 int i; 3569 struct ata_bio *ata_bio = &xfer->c_bio; 3570 3571 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3572 mvport->port_reqtbl[xfer->c_slot].eprd_offset; 3573 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3574 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA; 3575 if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) { 3576 head = WDSD_LBA; 3577 } else { 3578 head = 0; 3579 } 3580 blkno = ata_bio->blkno; 3581 if (ata_bio->flags & ATA_LBA48) 3582 cmd = atacmd_to48(cmd); 3583 else { 3584 head |= ((ata_bio->blkno >> 24) & 0xf); 3585 blkno &= 0xffffff; 3586 } 3587 crqb = &mvport->port_crqb->crqb + erqqip; 3588 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3589 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3590 crqb->ctrlflg = 3591 htole16(rw | CRQB_CHOSTQUETAG(xfer->c_slot) | 3592 CRQB_CPMPORT(xfer->c_drive)); 3593 i = 0; 3594 if (mvport->port_edmamode_curr == dma) { 3595 if (ata_bio->flags & ATA_LBA48) 3596 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3597 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks >> 8)); 3598 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3599 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks)); 3600 } else { /* ncq/queued */ 3601 3602 /* 3603 * XXXX: Oops, ata command is not correct. And, atabus layer 3604 * has not been supported yet now. 3605 * Queued DMA read/write. 3606 * read/write FPDMAQueued. 3607 */ 3608 3609 if (ata_bio->flags & ATA_LBA48) 3610 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3611 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks >> 8)); 3612 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3613 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks)); 3614 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3615 CRQB_ATACOMMAND_SECTORCOUNT, xfer->c_slot << 3)); 3616 } 3617 if (ata_bio->flags & ATA_LBA48) { 3618 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3619 CRQB_ATACOMMAND_LBALOW, blkno >> 24)); 3620 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3621 CRQB_ATACOMMAND_LBAMID, blkno >> 32)); 3622 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3623 CRQB_ATACOMMAND_LBAHIGH, blkno >> 40)); 3624 } 3625 crqb->atacommand[i++] = 3626 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBALOW, blkno)); 3627 crqb->atacommand[i++] = 3628 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAMID, blkno >> 8)); 3629 crqb->atacommand[i++] = 3630 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAHIGH, blkno >> 16)); 3631 crqb->atacommand[i++] = 3632 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_DEVICE, head)); 3633 crqb->atacommand[i++] = htole16( 3634 CRQB_ATACOMMAND(CRQB_ATACOMMAND_COMMAND, cmd) | 3635 CRQB_ATACOMMAND_LAST); 3636 } 3637 #endif 3638 3639 static uint32_t 3640 mvsata_read_preamps_gen1(struct mvsata_port *mvport) 3641 { 3642 struct mvsata_hc *hc = mvport->port_hc; 3643 uint32_t reg; 3644 3645 reg = MVSATA_HC_READ_4(hc, SATAHC_I_PHYMODE(mvport->port)); 3646 /* 3647 * [12:11] : pre 3648 * [7:5] : amps 3649 */ 3650 return reg & 0x000018e0; 3651 } 3652 3653 static void 3654 mvsata_fix_phy_gen1(struct mvsata_port *mvport) 3655 { 3656 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3657 struct mvsata_hc *mvhc = mvport->port_hc; 3658 uint32_t reg; 3659 int port = mvport->port, fix_apm_sq = 0; 3660 3661 if (sc->sc_model == PCI_PRODUCT_MARVELL_88SX5080) { 3662 if (sc->sc_rev == 0x01) 3663 fix_apm_sq = 1; 3664 } else { 3665 if (sc->sc_rev == 0x00) 3666 fix_apm_sq = 1; 3667 } 3668 3669 if (fix_apm_sq) { 3670 /* 3671 * Disable auto-power management 3672 * 88SX50xx FEr SATA#12 3673 */ 3674 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_LTMODE(port)); 3675 reg |= (1 << 19); 3676 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_LTMODE(port), reg); 3677 3678 /* 3679 * Fix squelch threshold 3680 * 88SX50xx FEr SATA#9 3681 */ 3682 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYCONTROL(port)); 3683 reg &= ~0x3; 3684 reg |= 0x1; 3685 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYCONTROL(port), reg); 3686 } 3687 3688 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3689 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYMODE(port)); 3690 reg &= ~0x000018e0; /* pre and amps mask */ 3691 reg |= mvport->_fix_phy_param.pre_amps; 3692 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYMODE(port), reg); 3693 } 3694 3695 static void 3696 mvsata_devconn_gen1(struct mvsata_port *mvport) 3697 { 3698 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3699 3700 /* Fix for 88SX50xx FEr SATA#2 */ 3701 mvport->_fix_phy_param._fix_phy(mvport); 3702 3703 /* If disk is connected, then enable the activity LED */ 3704 if (sc->sc_rev == 0x03) { 3705 /* XXXXX */ 3706 } 3707 } 3708 3709 static uint32_t 3710 mvsata_read_preamps_gen2(struct mvsata_port *mvport) 3711 { 3712 uint32_t reg; 3713 3714 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3715 /* 3716 * [10:8] : amps 3717 * [7:5] : pre 3718 */ 3719 return reg & 0x000007e0; 3720 } 3721 3722 static void 3723 mvsata_fix_phy_gen2(struct mvsata_port *mvport) 3724 { 3725 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3726 uint32_t reg; 3727 3728 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3729 sc->sc_gen == gen2e) { 3730 /* 3731 * Fix for 3732 * 88SX60X1 FEr SATA #23 3733 * 88SX6042/88SX7042 FEr SATA #23 3734 * 88F5182 FEr #SATA-S13 3735 * 88F5082 FEr #SATA-S13 3736 */ 3737 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3738 reg &= ~(1 << 16); 3739 reg |= (1 << 31); 3740 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3741 3742 delay(200); 3743 3744 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3745 reg &= ~((1 << 16) | (1 << 31)); 3746 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3747 3748 delay(200); 3749 } 3750 3751 /* Fix values in PHY Mode 3 Register.*/ 3752 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3753 reg &= ~0x7F900000; 3754 reg |= 0x2A800000; 3755 /* Implement Guidline 88F5182, 88F5082, 88F6082 (GL# SATA-S11) */ 3756 if (sc->sc_model == PCI_PRODUCT_MARVELL_88F5082 || 3757 sc->sc_model == PCI_PRODUCT_MARVELL_88F5182 || 3758 sc->sc_model == PCI_PRODUCT_MARVELL_88F6082) 3759 reg &= ~0x0000001c; 3760 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, reg); 3761 3762 /* 3763 * Fix values in PHY Mode 4 Register. 3764 * 88SX60x1 FEr SATA#10 3765 * 88F5182 GL #SATA-S10 3766 * 88F5082 GL #SATA-S10 3767 */ 3768 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3769 sc->sc_gen == gen2e) { 3770 uint32_t tmp = 0; 3771 3772 /* 88SX60x1 FEr SATA #13 */ 3773 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3774 tmp = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3775 3776 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM4); 3777 reg |= (1 << 0); 3778 reg &= ~(1 << 1); 3779 /* PHY Mode 4 Register of Gen IIE has some restriction */ 3780 if (sc->sc_gen == gen2e) { 3781 reg &= ~0x5de3fffc; 3782 reg |= (1 << 2); 3783 } 3784 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM4, reg); 3785 3786 /* 88SX60x1 FEr SATA #13 */ 3787 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3788 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, tmp); 3789 } 3790 3791 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3792 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3793 reg &= ~0x000007e0; /* pre and amps mask */ 3794 reg |= mvport->_fix_phy_param.pre_amps; 3795 reg &= ~(1 << 16); 3796 if (sc->sc_gen == gen2e) { 3797 /* 3798 * according to mvSata 3.6.1, some IIE values are fixed. 3799 * some reserved fields must be written with fixed values. 3800 */ 3801 reg &= ~0xC30FF01F; 3802 reg |= 0x0000900F; 3803 } 3804 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3805 } 3806 3807 #ifndef MVSATA_WITHOUTDMA 3808 static void 3809 mvsata_edma_setup_crqb_gen2e(struct mvsata_port *mvport, int erqqip, 3810 struct ata_xfer *xfer) 3811 { 3812 struct crqb_gen2e *crqb; 3813 bus_addr_t eprd_addr; 3814 uint32_t ctrlflg, rw; 3815 uint8_t fis[RHD_FISLEN]; 3816 3817 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3818 mvport->port_reqtbl[xfer->c_slot].eprd_offset; 3819 rw = (xfer->c_bio.flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3820 ctrlflg = (rw | CRQB_CDEVICEQUETAG(xfer->c_slot) | 3821 CRQB_CPMPORT(xfer->c_drive) | 3822 CRQB_CPRDMODE_EPRD | CRQB_CHOSTQUETAG_GEN2(xfer->c_slot)); 3823 3824 crqb = &mvport->port_crqb->crqb_gen2e + erqqip; 3825 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3826 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3827 crqb->ctrlflg = htole32(ctrlflg); 3828 3829 satafis_rhd_construct_bio(xfer, fis); 3830 3831 crqb->atacommand[0] = 0; 3832 crqb->atacommand[1] = 0; 3833 /* copy over the ATA command part of the fis */ 3834 memcpy(&crqb->atacommand[2], &fis[rhd_command], 3835 MIN(sizeof(crqb->atacommand) - 2, RHD_FISLEN - rhd_command)); 3836 } 3837 3838 #ifdef MVSATA_DEBUG 3839 #define MVSATA_DEBUG_PRINT(type, size, n, p) \ 3840 do { \ 3841 int _i; \ 3842 u_char *_p = (p); \ 3843 \ 3844 printf(#type "(%d)", (n)); \ 3845 for (_i = 0; _i < (size); _i++, _p++) { \ 3846 if (_i % 16 == 0) \ 3847 printf("\n "); \ 3848 printf(" %02x", *_p); \ 3849 } \ 3850 printf("\n"); \ 3851 } while (0 /* CONSTCOND */) 3852 3853 static void 3854 mvsata_print_crqb(struct mvsata_port *mvport, int n) 3855 { 3856 3857 MVSATA_DEBUG_PRINT(crqb, sizeof(union mvsata_crqb), 3858 n, (u_char *)(mvport->port_crqb + n)); 3859 } 3860 3861 static void 3862 mvsata_print_crpb(struct mvsata_port *mvport, int n) 3863 { 3864 3865 MVSATA_DEBUG_PRINT(crpb, sizeof(struct crpb), 3866 n, (u_char *)(mvport->port_crpb + n)); 3867 } 3868 3869 static void 3870 mvsata_print_eprd(struct mvsata_port *mvport, int n) 3871 { 3872 struct eprd *eprd; 3873 int i = 0; 3874 3875 eprd = mvport->port_reqtbl[n].eprd; 3876 while (1 /*CONSTCOND*/) { 3877 MVSATA_DEBUG_PRINT(eprd, sizeof(struct eprd), 3878 i, (u_char *)eprd); 3879 if (eprd->eot & EPRD_EOT) 3880 break; 3881 eprd++; 3882 i++; 3883 } 3884 } 3885 #endif 3886 #endif 3887