1 /* $NetBSD: mvsata.c,v 1.30 2013/04/03 17:15:07 bouyer Exp $ */ 2 /* 3 * Copyright (c) 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: mvsata.c,v 1.30 2013/04/03 17:15:07 bouyer Exp $"); 30 31 #include "opt_mvsata.h" 32 33 /* ATAPI implementation not finished. */ 34 //#include "atapibus.h" 35 36 #include <sys/param.h> 37 #if NATAPIBUS > 0 38 #include <sys/buf.h> 39 #endif 40 #include <sys/bus.h> 41 #include <sys/cpu.h> 42 #include <sys/device.h> 43 #include <sys/disklabel.h> 44 #include <sys/errno.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 49 #include <machine/vmparam.h> 50 51 #include <dev/ata/atareg.h> 52 #include <dev/ata/atavar.h> 53 #include <dev/ic/wdcvar.h> 54 #include <dev/ata/satapmpreg.h> 55 #include <dev/ata/satareg.h> 56 #include <dev/ata/satavar.h> 57 58 #if NATAPIBUS > 0 59 #include <dev/scsipi/scsi_all.h> /* for SCSI status */ 60 #endif 61 62 #include <dev/pci/pcidevs.h> 63 64 #include <dev/ic/mvsatareg.h> 65 #include <dev/ic/mvsatavar.h> 66 67 68 #define MVSATA_DEV(sc) ((sc)->sc_wdcdev.sc_atac.atac_dev) 69 #define MVSATA_DEV2(mvport) ((mvport)->port_ata_channel.ch_atac->atac_dev) 70 71 #define MVSATA_HC_READ_4(hc, reg) \ 72 bus_space_read_4((hc)->hc_iot, (hc)->hc_ioh, (reg)) 73 #define MVSATA_HC_WRITE_4(hc, reg, val) \ 74 bus_space_write_4((hc)->hc_iot, (hc)->hc_ioh, (reg), (val)) 75 #define MVSATA_EDMA_READ_4(mvport, reg) \ 76 bus_space_read_4((mvport)->port_iot, (mvport)->port_ioh, (reg)) 77 #define MVSATA_EDMA_WRITE_4(mvport, reg, val) \ 78 bus_space_write_4((mvport)->port_iot, (mvport)->port_ioh, (reg), (val)) 79 #define MVSATA_WDC_READ_2(mvport, reg) \ 80 bus_space_read_2((mvport)->port_iot, (mvport)->port_ioh, \ 81 SHADOW_REG_BLOCK_OFFSET + (reg)) 82 #define MVSATA_WDC_READ_1(mvport, reg) \ 83 bus_space_read_1((mvport)->port_iot, (mvport)->port_ioh, \ 84 SHADOW_REG_BLOCK_OFFSET + (reg)) 85 #define MVSATA_WDC_WRITE_2(mvport, reg, val) \ 86 bus_space_write_2((mvport)->port_iot, (mvport)->port_ioh, \ 87 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 88 #define MVSATA_WDC_WRITE_1(mvport, reg, val) \ 89 bus_space_write_1((mvport)->port_iot, (mvport)->port_ioh, \ 90 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 91 92 #ifdef MVSATA_DEBUG 93 #define DPRINTF(x) if (mvsata_debug) printf x 94 #define DPRINTFN(n,x) if (mvsata_debug >= (n)) printf x 95 int mvsata_debug = 2; 96 #else 97 #define DPRINTF(x) 98 #define DPRINTFN(n,x) 99 #endif 100 101 #define ATA_DELAY 10000 /* 10s for a drive I/O */ 102 #define ATAPI_DELAY 10 /* 10 ms, this is used only before 103 sending a cmd */ 104 #define ATAPI_MODE_DELAY 1000 /* 1s, timeout for SET_FEATURE cmds */ 105 106 #define MVSATA_EPRD_MAX_SIZE (sizeof(struct eprd) * (MAXPHYS / PAGE_SIZE)) 107 108 109 #ifndef MVSATA_WITHOUTDMA 110 static int mvsata_bio(struct ata_drive_datas *, struct ata_bio *); 111 static void mvsata_reset_drive(struct ata_drive_datas *, int, uint32_t *); 112 static void mvsata_reset_channel(struct ata_channel *, int); 113 static int mvsata_exec_command(struct ata_drive_datas *, struct ata_command *); 114 static int mvsata_addref(struct ata_drive_datas *); 115 static void mvsata_delref(struct ata_drive_datas *); 116 static void mvsata_killpending(struct ata_drive_datas *); 117 118 #if NATAPIBUS > 0 119 static void mvsata_atapibus_attach(struct atabus_softc *); 120 static void mvsata_atapi_scsipi_request(struct scsipi_channel *, 121 scsipi_adapter_req_t, void *); 122 static void mvsata_atapi_minphys(struct buf *); 123 static void mvsata_atapi_probe_device(struct atapibus_softc *, int); 124 static void mvsata_atapi_kill_pending(struct scsipi_periph *); 125 #endif 126 #endif 127 128 static void mvsata_setup_channel(struct ata_channel *); 129 130 #ifndef MVSATA_WITHOUTDMA 131 static void mvsata_bio_start(struct ata_channel *, struct ata_xfer *); 132 static int mvsata_bio_intr(struct ata_channel *, struct ata_xfer *, int); 133 static void mvsata_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 134 static void mvsata_bio_done(struct ata_channel *, struct ata_xfer *); 135 static int mvsata_bio_ready(struct mvsata_port *, struct ata_bio *, int, 136 int); 137 static void mvsata_wdc_cmd_start(struct ata_channel *, struct ata_xfer *); 138 static int mvsata_wdc_cmd_intr(struct ata_channel *, struct ata_xfer *, int); 139 static void mvsata_wdc_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, 140 int); 141 static void mvsata_wdc_cmd_done(struct ata_channel *, struct ata_xfer *); 142 static void mvsata_wdc_cmd_done_end(struct ata_channel *, struct ata_xfer *); 143 #if NATAPIBUS > 0 144 static void mvsata_atapi_start(struct ata_channel *, struct ata_xfer *); 145 static int mvsata_atapi_intr(struct ata_channel *, struct ata_xfer *, int); 146 static void mvsata_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, 147 int); 148 static void mvsata_atapi_reset(struct ata_channel *, struct ata_xfer *); 149 static void mvsata_atapi_phase_complete(struct ata_xfer *); 150 static void mvsata_atapi_done(struct ata_channel *, struct ata_xfer *); 151 static void mvsata_atapi_polldsc(void *); 152 #endif 153 154 static int mvsata_edma_enqueue(struct mvsata_port *, struct ata_bio *, void *); 155 static int mvsata_edma_handle(struct mvsata_port *, struct ata_xfer *); 156 static int mvsata_edma_wait(struct mvsata_port *, struct ata_xfer *, int); 157 static void mvsata_edma_timeout(void *); 158 static void mvsata_edma_rqq_remove(struct mvsata_port *, struct ata_xfer *); 159 #if NATAPIBUS > 0 160 static int mvsata_bdma_init(struct mvsata_port *, struct scsipi_xfer *, void *); 161 static void mvsata_bdma_start(struct mvsata_port *); 162 #endif 163 #endif 164 165 static int mvsata_port_init(struct mvsata_hc *, int); 166 static int mvsata_wdc_reg_init(struct mvsata_port *, struct wdc_regs *); 167 #ifndef MVSATA_WITHOUTDMA 168 static inline void mvsata_quetag_init(struct mvsata_port *); 169 static inline int mvsata_quetag_get(struct mvsata_port *); 170 static inline void mvsata_quetag_put(struct mvsata_port *, int); 171 static void *mvsata_edma_resource_prepare(struct mvsata_port *, bus_dma_tag_t, 172 bus_dmamap_t *, size_t, int); 173 static void mvsata_edma_resource_purge(struct mvsata_port *, bus_dma_tag_t, 174 bus_dmamap_t, void *); 175 static int mvsata_dma_bufload(struct mvsata_port *, int, void *, size_t, int); 176 static inline void mvsata_dma_bufunload(struct mvsata_port *, int, int); 177 #endif 178 179 static void mvsata_hreset_port(struct mvsata_port *); 180 static void mvsata_reset_port(struct mvsata_port *); 181 static void mvsata_reset_hc(struct mvsata_hc *); 182 static uint32_t mvsata_softreset(struct mvsata_port *, int); 183 #ifndef MVSATA_WITHOUTDMA 184 static void mvsata_edma_reset_qptr(struct mvsata_port *); 185 static inline void mvsata_edma_enable(struct mvsata_port *); 186 static int mvsata_edma_disable(struct mvsata_port *, int, int); 187 static void mvsata_edma_config(struct mvsata_port *, int); 188 189 static void mvsata_edma_setup_crqb(struct mvsata_port *, int, int, 190 struct ata_bio *); 191 #endif 192 static uint32_t mvsata_read_preamps_gen1(struct mvsata_port *); 193 static void mvsata_fix_phy_gen1(struct mvsata_port *); 194 static void mvsata_devconn_gen1(struct mvsata_port *); 195 196 static uint32_t mvsata_read_preamps_gen2(struct mvsata_port *); 197 static void mvsata_fix_phy_gen2(struct mvsata_port *); 198 #ifndef MVSATA_WITHOUTDMA 199 static void mvsata_edma_setup_crqb_gen2e(struct mvsata_port *, int, int, 200 struct ata_bio *); 201 202 #ifdef MVSATA_DEBUG 203 static void mvsata_print_crqb(struct mvsata_port *, int); 204 static void mvsata_print_crpb(struct mvsata_port *, int); 205 static void mvsata_print_eprd(struct mvsata_port *, int); 206 #endif 207 208 static void mvsata_probe_drive(struct ata_channel *); 209 210 struct ata_bustype mvsata_ata_bustype = { 211 SCSIPI_BUSTYPE_ATA, 212 mvsata_bio, 213 mvsata_reset_drive, 214 mvsata_reset_channel, 215 mvsata_exec_command, 216 ata_get_params, 217 mvsata_addref, 218 mvsata_delref, 219 mvsata_killpending 220 }; 221 222 #if NATAPIBUS > 0 223 static const struct scsipi_bustype mvsata_atapi_bustype = { 224 SCSIPI_BUSTYPE_ATAPI, 225 atapi_scsipi_cmd, 226 atapi_interpret_sense, 227 atapi_print_addr, 228 mvsata_atapi_kill_pending, 229 NULL, 230 }; 231 #endif /* NATAPIBUS */ 232 #endif 233 234 static void 235 mvsata_pmp_select(struct mvsata_port *mvport, int pmpport) 236 { 237 uint32_t ifctl; 238 239 KASSERT(pmpport < PMP_MAX_DRIVES); 240 #if defined(DIAGNOSTIC) || defined(MVSATA_DEBUG) 241 if ((MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) != 0) { 242 panic("EDMA enabled"); 243 } 244 #endif 245 246 ifctl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICTL); 247 ifctl &= ~0xf; 248 ifctl |= pmpport; 249 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICTL, ifctl); 250 } 251 252 int 253 mvsata_attach(struct mvsata_softc *sc, struct mvsata_product *product, 254 int (*mvsata_sreset)(struct mvsata_softc *), 255 int (*mvsata_misc_reset)(struct mvsata_softc *), 256 int read_pre_amps) 257 { 258 struct mvsata_hc *mvhc; 259 struct mvsata_port *mvport; 260 uint32_t (*read_preamps)(struct mvsata_port *) = NULL; 261 void (*_fix_phy)(struct mvsata_port *) = NULL; 262 #ifndef MVSATA_WITHOUTDMA 263 void (*edma_setup_crqb) 264 (struct mvsata_port *, int, int, struct ata_bio *) = NULL; 265 #endif 266 int hc, port, channel; 267 268 aprint_normal_dev(MVSATA_DEV(sc), "Gen%s, %dhc, %dport/hc\n", 269 (product->generation == gen1) ? "I" : 270 ((product->generation == gen2) ? "II" : "IIe"), 271 product->hc, product->port); 272 273 274 switch (product->generation) { 275 case gen1: 276 mvsata_sreset = NULL; 277 read_pre_amps = 1; /* MUST */ 278 read_preamps = mvsata_read_preamps_gen1; 279 _fix_phy = mvsata_fix_phy_gen1; 280 #ifndef MVSATA_WITHOUTDMA 281 edma_setup_crqb = mvsata_edma_setup_crqb; 282 #endif 283 break; 284 285 case gen2: 286 read_preamps = mvsata_read_preamps_gen2; 287 _fix_phy = mvsata_fix_phy_gen2; 288 #ifndef MVSATA_WITHOUTDMA 289 edma_setup_crqb = mvsata_edma_setup_crqb; 290 #endif 291 break; 292 293 case gen2e: 294 read_preamps = mvsata_read_preamps_gen2; 295 _fix_phy = mvsata_fix_phy_gen2; 296 #ifndef MVSATA_WITHOUTDMA 297 edma_setup_crqb = mvsata_edma_setup_crqb_gen2e; 298 #endif 299 break; 300 } 301 302 sc->sc_gen = product->generation; 303 sc->sc_hc = product->hc; 304 sc->sc_port = product->port; 305 sc->sc_flags = product->flags; 306 307 #ifdef MVSATA_WITHOUTDMA 308 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 309 #else 310 sc->sc_edma_setup_crqb = edma_setup_crqb; 311 sc->sc_wdcdev.sc_atac.atac_cap |= 312 (ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA); 313 #endif 314 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 315 #ifdef MVSATA_WITHOUTDMA 316 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 317 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; 318 #else 319 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 320 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 321 #endif 322 sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_ata_channels; 323 sc->sc_wdcdev.sc_atac.atac_nchannels = sc->sc_hc * sc->sc_port; 324 #ifndef MVSATA_WITHOUTDMA 325 sc->sc_wdcdev.sc_atac.atac_bustype_ata = &mvsata_ata_bustype; 326 #if NATAPIBUS > 0 327 sc->sc_wdcdev.sc_atac.atac_atapibus_attach = mvsata_atapibus_attach; 328 #endif 329 #endif 330 sc->sc_wdcdev.wdc_maxdrives = 1; /* SATA is always 1 drive */ 331 sc->sc_wdcdev.sc_atac.atac_probe = mvsata_probe_drive; 332 sc->sc_wdcdev.sc_atac.atac_set_modes = mvsata_setup_channel; 333 334 sc->sc_wdc_regs = 335 malloc(sizeof(struct wdc_regs) * product->hc * product->port, 336 M_DEVBUF, M_NOWAIT); 337 if (sc->sc_wdc_regs == NULL) { 338 aprint_error_dev(MVSATA_DEV(sc), 339 "can't allocate wdc regs memory\n"); 340 return ENOMEM; 341 } 342 sc->sc_wdcdev.regs = sc->sc_wdc_regs; 343 344 for (hc = 0; hc < sc->sc_hc; hc++) { 345 mvhc = &sc->sc_hcs[hc]; 346 mvhc->hc = hc; 347 mvhc->hc_sc = sc; 348 mvhc->hc_iot = sc->sc_iot; 349 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 350 hc * SATAHC_REGISTER_SIZE, SATAHC_REGISTER_SIZE, 351 &mvhc->hc_ioh)) { 352 aprint_error_dev(MVSATA_DEV(sc), 353 "can't subregion SATAHC %d registers\n", hc); 354 continue; 355 } 356 357 for (port = 0; port < sc->sc_port; port++) 358 if (mvsata_port_init(mvhc, port) == 0) { 359 int pre_amps; 360 361 mvport = mvhc->hc_ports[port]; 362 pre_amps = read_pre_amps ? 363 read_preamps(mvport) : 0x00000720; 364 mvport->_fix_phy_param.pre_amps = pre_amps; 365 mvport->_fix_phy_param._fix_phy = _fix_phy; 366 367 if (!mvsata_sreset) 368 mvsata_reset_port(mvport); 369 } 370 371 if (!mvsata_sreset) 372 mvsata_reset_hc(mvhc); 373 } 374 if (mvsata_sreset) 375 mvsata_sreset(sc); 376 377 if (mvsata_misc_reset) 378 mvsata_misc_reset(sc); 379 380 for (hc = 0; hc < sc->sc_hc; hc++) 381 for (port = 0; port < sc->sc_port; port++) { 382 mvport = sc->sc_hcs[hc].hc_ports[port]; 383 if (mvport == NULL) 384 continue; 385 if (mvsata_sreset) 386 mvport->_fix_phy_param._fix_phy(mvport); 387 } 388 for (channel = 0; channel < sc->sc_hc * sc->sc_port; channel++) 389 wdcattach(sc->sc_ata_channels[channel]); 390 391 return 0; 392 } 393 394 int 395 mvsata_intr(struct mvsata_hc *mvhc) 396 { 397 struct mvsata_softc *sc = mvhc->hc_sc; 398 struct mvsata_port *mvport; 399 uint32_t cause; 400 int port, handled = 0; 401 402 cause = MVSATA_HC_READ_4(mvhc, SATAHC_IC); 403 404 DPRINTFN(3, ("%s:%d: mvsata_intr: cause=0x%08x\n", 405 device_xname(MVSATA_DEV(sc)), mvhc->hc, cause)); 406 407 if (cause & SATAHC_IC_SAINTCOAL) 408 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, ~SATAHC_IC_SAINTCOAL); 409 cause &= ~SATAHC_IC_SAINTCOAL; 410 for (port = 0; port < sc->sc_port; port++) { 411 mvport = mvhc->hc_ports[port]; 412 413 if (cause & SATAHC_IC_DONE(port)) { 414 #ifndef MVSATA_WITHOUTDMA 415 handled = mvsata_edma_handle(mvport, NULL); 416 #endif 417 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 418 ~SATAHC_IC_DONE(port)); 419 } 420 421 if (cause & SATAHC_IC_SADEVINTERRUPT(port)) { 422 wdcintr(&mvport->port_ata_channel); 423 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 424 ~SATAHC_IC_SADEVINTERRUPT(port)); 425 handled = 1; 426 } 427 } 428 429 return handled; 430 } 431 432 int 433 mvsata_error(struct mvsata_port *mvport) 434 { 435 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 436 uint32_t cause; 437 438 cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC); 439 /* 440 * We must ack SATA_SE and SATA_FISIC before acking coresponding bits 441 * in EDMA_IEC. 442 */ 443 if (cause & EDMA_IE_SERRINT) { 444 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, 445 MVSATA_EDMA_READ_4(mvport, SATA_SEIM)); 446 } 447 if (cause & EDMA_IE_ETRANSINT) { 448 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 449 ~MVSATA_EDMA_READ_4(mvport, SATA_FISIM)); 450 } 451 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause); 452 453 DPRINTFN(3, ("%s:%d:%d:" 454 " mvsata_error: cause=0x%08x, mask=0x%08x, status=0x%08x\n", 455 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 456 mvport->port, cause, MVSATA_EDMA_READ_4(mvport, EDMA_IEM), 457 MVSATA_EDMA_READ_4(mvport, EDMA_S))); 458 459 cause &= MVSATA_EDMA_READ_4(mvport, EDMA_IEM); 460 if (!cause) 461 return 0; 462 463 if (cause & EDMA_IE_EDEVDIS) { 464 aprint_normal("%s:%d:%d: device disconnect\n", 465 device_xname(MVSATA_DEV2(mvport)), 466 mvport->port_hc->hc, mvport->port); 467 } 468 if (cause & EDMA_IE_EDEVCON) { 469 if (sc->sc_gen == gen1) 470 mvsata_devconn_gen1(mvport); 471 472 DPRINTFN(3, (" device connected\n")); 473 } 474 #ifndef MVSATA_WITHOUTDMA 475 if ((sc->sc_gen == gen1 && cause & EDMA_IE_ETRANSINT) || 476 (sc->sc_gen != gen1 && cause & EDMA_IE_ESELFDIS)) { 477 switch (mvport->port_edmamode) { 478 case dma: 479 case queued: 480 case ncq: 481 mvsata_edma_reset_qptr(mvport); 482 mvsata_edma_enable(mvport); 483 if (cause & EDMA_IE_EDEVERR) 484 break; 485 486 /* FALLTHROUGH */ 487 488 case nodma: 489 default: 490 aprint_error( 491 "%s:%d:%d: EDMA self disable happen 0x%x\n", 492 device_xname(MVSATA_DEV2(mvport)), 493 mvport->port_hc->hc, mvport->port, cause); 494 break; 495 } 496 } 497 #endif 498 if (cause & EDMA_IE_ETRANSINT) { 499 /* hot plug the Port Multiplier */ 500 aprint_normal("%s:%d:%d: detect Port Multiplier?\n", 501 device_xname(MVSATA_DEV2(mvport)), 502 mvport->port_hc->hc, mvport->port); 503 } 504 505 return 1; 506 } 507 508 509 /* 510 * ATA callback entry points 511 */ 512 513 static void 514 mvsata_probe_drive(struct ata_channel *chp) 515 { 516 struct mvsata_port * const mvport = (struct mvsata_port *)chp; 517 uint32_t sstat, sig; 518 519 sstat = sata_reset_interface(chp, mvport->port_iot, 520 mvport->port_sata_scontrol, mvport->port_sata_sstatus, AT_WAIT); 521 switch (sstat) { 522 case SStatus_DET_DEV: 523 mvsata_pmp_select(mvport, PMP_PORT_CTL); 524 sig = mvsata_softreset(mvport, AT_WAIT); 525 sata_interpret_sig(chp, 0, sig); 526 break; 527 default: 528 break; 529 } 530 } 531 532 #ifndef MVSATA_WITHOUTDMA 533 static int 534 mvsata_bio(struct ata_drive_datas *drvp, struct ata_bio *ata_bio) 535 { 536 struct ata_channel *chp = drvp->chnl_softc; 537 struct atac_softc *atac = chp->ch_atac; 538 struct ata_xfer *xfer; 539 540 DPRINTFN(1, ("%s:%d: mvsata_bio: drive=%d, blkno=%" PRId64 541 ", bcount=%ld\n", device_xname(atac->atac_dev), chp->ch_channel, 542 drvp->drive, ata_bio->blkno, ata_bio->bcount)); 543 544 xfer = ata_get_xfer(ATAXF_NOSLEEP); 545 if (xfer == NULL) 546 return ATACMD_TRY_AGAIN; 547 if (atac->atac_cap & ATAC_CAP_NOIRQ) 548 ata_bio->flags |= ATA_POLL; 549 if (ata_bio->flags & ATA_POLL) 550 xfer->c_flags |= C_POLL; 551 if ((drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) && 552 (ata_bio->flags & ATA_SINGLE) == 0) 553 xfer->c_flags |= C_DMA; 554 xfer->c_drive = drvp->drive; 555 xfer->c_cmd = ata_bio; 556 xfer->c_databuf = ata_bio->databuf; 557 xfer->c_bcount = ata_bio->bcount; 558 xfer->c_start = mvsata_bio_start; 559 xfer->c_intr = mvsata_bio_intr; 560 xfer->c_kill_xfer = mvsata_bio_kill_xfer; 561 ata_exec_xfer(chp, xfer); 562 return (ata_bio->flags & ATA_ITSDONE) ? ATACMD_COMPLETE : ATACMD_QUEUED; 563 } 564 565 static void 566 mvsata_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp) 567 { 568 struct ata_channel *chp = drvp->chnl_softc; 569 struct mvsata_port *mvport = (struct mvsata_port *)chp; 570 uint32_t edma_c; 571 uint32_t sig; 572 573 edma_c = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 574 575 DPRINTF(("%s:%d: mvsata_reset_drive: drive=%d (EDMA %sactive)\n", 576 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drvp->drive, 577 (edma_c & EDMA_CMD_EENEDMA) ? "" : "not ")); 578 579 if (edma_c & EDMA_CMD_EENEDMA) 580 mvsata_edma_disable(mvport, 10000, flags & AT_WAIT); 581 582 mvsata_pmp_select(mvport, drvp->drive); 583 584 sig = mvsata_softreset(mvport, flags & AT_WAIT); 585 586 if (sigp) 587 *sigp = sig; 588 589 if (edma_c & EDMA_CMD_EENEDMA) { 590 mvsata_edma_reset_qptr(mvport); 591 mvsata_edma_enable(mvport); 592 } 593 return; 594 } 595 596 static void 597 mvsata_reset_channel(struct ata_channel *chp, int flags) 598 { 599 struct mvsata_port *mvport = (struct mvsata_port *)chp; 600 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 601 struct ata_xfer *xfer; 602 uint32_t sstat, ctrl; 603 int i; 604 605 DPRINTF(("%s: mvsata_reset_channel: channel=%d\n", 606 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 607 608 mvsata_hreset_port(mvport); 609 sstat = sata_reset_interface(chp, mvport->port_iot, 610 mvport->port_sata_scontrol, mvport->port_sata_sstatus, flags); 611 612 if (flags & AT_WAIT && sstat == SStatus_DET_DEV_NE && 613 sc->sc_gen != gen1) { 614 /* Downgrade to GenI */ 615 const uint32_t val = SControl_IPM_NONE | SControl_SPD_ANY | 616 SControl_DET_DISABLE; 617 618 MVSATA_EDMA_WRITE_4(mvport, mvport->port_sata_scontrol, val); 619 620 ctrl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICFG); 621 ctrl &= ~(1 << 17); /* Disable GenII */ 622 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICFG, ctrl); 623 624 mvsata_hreset_port(mvport); 625 sata_reset_interface(chp, mvport->port_iot, 626 mvport->port_sata_scontrol, mvport->port_sata_sstatus, 627 flags); 628 } 629 630 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 631 xfer = mvport->port_reqtbl[i].xfer; 632 if (xfer == NULL) 633 continue; 634 chp->ch_queue->active_xfer = xfer; 635 xfer->c_kill_xfer(chp, xfer, KILL_RESET); 636 } 637 638 mvsata_edma_config(mvport, mvport->port_edmamode); 639 mvsata_edma_reset_qptr(mvport); 640 mvsata_edma_enable(mvport); 641 return; 642 } 643 644 645 static int 646 mvsata_exec_command(struct ata_drive_datas *drvp, struct ata_command *ata_c) 647 { 648 struct ata_channel *chp = drvp->chnl_softc; 649 #ifdef MVSATA_DEBUG 650 struct mvsata_port *mvport = (struct mvsata_port *)chp; 651 #endif 652 struct ata_xfer *xfer; 653 int rv, s; 654 655 DPRINTFN(1, ("%s:%d: mvsata_exec_command: drive=%d, bcount=%d," 656 " r_lba=0x%012"PRIx64", r_count=0x%04x, r_features=0x%04x," 657 " r_device=0x%02x, r_command=0x%02x\n", 658 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, 659 drvp->drive, ata_c->bcount, ata_c->r_lba, ata_c->r_count, 660 ata_c->r_features, ata_c->r_device, ata_c->r_command)); 661 662 xfer = ata_get_xfer(ata_c->flags & AT_WAIT ? ATAXF_CANSLEEP : 663 ATAXF_NOSLEEP); 664 if (xfer == NULL) 665 return ATACMD_TRY_AGAIN; 666 if (ata_c->flags & AT_POLL) 667 xfer->c_flags |= C_POLL; 668 if (ata_c->flags & AT_WAIT) 669 xfer->c_flags |= C_WAIT; 670 xfer->c_drive = drvp->drive; 671 xfer->c_databuf = ata_c->data; 672 xfer->c_bcount = ata_c->bcount; 673 xfer->c_cmd = ata_c; 674 xfer->c_start = mvsata_wdc_cmd_start; 675 xfer->c_intr = mvsata_wdc_cmd_intr; 676 xfer->c_kill_xfer = mvsata_wdc_cmd_kill_xfer; 677 s = splbio(); 678 ata_exec_xfer(chp, xfer); 679 #ifdef DIAGNOSTIC 680 if ((ata_c->flags & AT_POLL) != 0 && 681 (ata_c->flags & AT_DONE) == 0) 682 panic("mvsata_exec_command: polled command not done"); 683 #endif 684 if (ata_c->flags & AT_DONE) 685 rv = ATACMD_COMPLETE; 686 else { 687 if (ata_c->flags & AT_WAIT) { 688 while ((ata_c->flags & AT_DONE) == 0) 689 tsleep(ata_c, PRIBIO, "mvsatacmd", 0); 690 rv = ATACMD_COMPLETE; 691 } else 692 rv = ATACMD_QUEUED; 693 } 694 splx(s); 695 return rv; 696 } 697 698 static int 699 mvsata_addref(struct ata_drive_datas *drvp) 700 { 701 702 return 0; 703 } 704 705 static void 706 mvsata_delref(struct ata_drive_datas *drvp) 707 { 708 709 return; 710 } 711 712 static void 713 mvsata_killpending(struct ata_drive_datas *drvp) 714 { 715 716 return; 717 } 718 719 #if NATAPIBUS > 0 720 static void 721 mvsata_atapibus_attach(struct atabus_softc *ata_sc) 722 { 723 struct ata_channel *chp = ata_sc->sc_chan; 724 struct atac_softc *atac = chp->ch_atac; 725 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 726 struct scsipi_channel *chan = &chp->ch_atapi_channel; 727 728 /* 729 * Fill in the scsipi_adapter. 730 */ 731 adapt->adapt_dev = atac->atac_dev; 732 adapt->adapt_nchannels = atac->atac_nchannels; 733 adapt->adapt_request = mvsata_atapi_scsipi_request; 734 adapt->adapt_minphys = mvsata_atapi_minphys; 735 atac->atac_atapi_adapter.atapi_probe_device = mvsata_atapi_probe_device; 736 737 /* 738 * Fill in the scsipi_channel. 739 */ 740 memset(chan, 0, sizeof(*chan)); 741 chan->chan_adapter = adapt; 742 chan->chan_bustype = &mvsata_atapi_bustype; 743 chan->chan_channel = chp->ch_channel; 744 chan->chan_flags = SCSIPI_CHAN_OPENINGS; 745 chan->chan_openings = 1; 746 chan->chan_max_periph = 1; 747 chan->chan_ntargets = 1; 748 chan->chan_nluns = 1; 749 750 chp->atapibus = 751 config_found_ia(ata_sc->sc_dev, "atapi", chan, atapiprint); 752 } 753 754 static void 755 mvsata_atapi_scsipi_request(struct scsipi_channel *chan, 756 scsipi_adapter_req_t req, void *arg) 757 { 758 struct scsipi_adapter *adapt = chan->chan_adapter; 759 struct scsipi_periph *periph; 760 struct scsipi_xfer *sc_xfer; 761 struct mvsata_softc *sc = device_private(adapt->adapt_dev); 762 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 763 struct ata_xfer *xfer; 764 int channel = chan->chan_channel; 765 int drive, s; 766 767 switch (req) { 768 case ADAPTER_REQ_RUN_XFER: 769 sc_xfer = arg; 770 periph = sc_xfer->xs_periph; 771 drive = periph->periph_target; 772 773 if (!device_is_active(atac->atac_dev)) { 774 sc_xfer->error = XS_DRIVER_STUFFUP; 775 scsipi_done(sc_xfer); 776 return; 777 } 778 xfer = ata_get_xfer(ATAXF_NOSLEEP); 779 if (xfer == NULL) { 780 sc_xfer->error = XS_RESOURCE_SHORTAGE; 781 scsipi_done(sc_xfer); 782 return; 783 } 784 785 if (sc_xfer->xs_control & XS_CTL_POLL) 786 xfer->c_flags |= C_POLL; 787 xfer->c_drive = drive; 788 xfer->c_flags |= C_ATAPI; 789 xfer->c_cmd = sc_xfer; 790 xfer->c_databuf = sc_xfer->data; 791 xfer->c_bcount = sc_xfer->datalen; 792 xfer->c_start = mvsata_atapi_start; 793 xfer->c_intr = mvsata_atapi_intr; 794 xfer->c_kill_xfer = mvsata_atapi_kill_xfer; 795 xfer->c_dscpoll = 0; 796 s = splbio(); 797 ata_exec_xfer(atac->atac_channels[channel], xfer); 798 #ifdef DIAGNOSTIC 799 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 && 800 (sc_xfer->xs_status & XS_STS_DONE) == 0) 801 panic("mvsata_atapi_scsipi_request:" 802 " polled command not done"); 803 #endif 804 splx(s); 805 return; 806 807 default: 808 /* Not supported, nothing to do. */ 809 ; 810 } 811 } 812 813 static void 814 mvsata_atapi_minphys(struct buf *bp) 815 { 816 817 if (bp->b_bcount > MAXPHYS) 818 bp->b_bcount = MAXPHYS; 819 minphys(bp); 820 } 821 822 static void 823 mvsata_atapi_probe_device(struct atapibus_softc *sc, int target) 824 { 825 struct scsipi_channel *chan = sc->sc_channel; 826 struct scsipi_periph *periph; 827 struct ataparams ids; 828 struct ataparams *id = &ids; 829 struct mvsata_softc *mvc = 830 device_private(chan->chan_adapter->adapt_dev); 831 struct atac_softc *atac = &mvc->sc_wdcdev.sc_atac; 832 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 833 struct ata_drive_datas *drvp = &chp->ch_drive[target]; 834 struct scsipibus_attach_args sa; 835 char serial_number[21], model[41], firmware_revision[9]; 836 int s; 837 838 /* skip if already attached */ 839 if (scsipi_lookup_periph(chan, target, 0) != NULL) 840 return; 841 842 /* if no ATAPI device detected at attach time, skip */ 843 if (drvp->drive_type != ATA_DRIVET_ATAPI) { 844 DPRINTF(("%s:%d: mvsata_atapi_probe_device:" 845 " drive %d not present\n", 846 device_xname(atac->atac_dev), chp->ch_channel, target)); 847 return; 848 } 849 850 /* Some ATAPI devices need a bit more time after software reset. */ 851 delay(5000); 852 if (ata_get_params(drvp, AT_WAIT, id) == 0) { 853 #ifdef ATAPI_DEBUG_PROBE 854 log(LOG_DEBUG, "%s:%d: drive %d: cmdsz 0x%x drqtype 0x%x\n", 855 device_xname(atac->atac_dev), chp->ch_channel, target, 856 id->atap_config & ATAPI_CFG_CMD_MASK, 857 id->atap_config & ATAPI_CFG_DRQ_MASK); 858 #endif 859 periph = scsipi_alloc_periph(M_NOWAIT); 860 if (periph == NULL) { 861 aprint_error_dev(atac->atac_dev, 862 "unable to allocate periph" 863 " for channel %d drive %d\n", 864 chp->ch_channel, target); 865 return; 866 } 867 periph->periph_dev = NULL; 868 periph->periph_channel = chan; 869 periph->periph_switch = &atapi_probe_periphsw; 870 periph->periph_target = target; 871 periph->periph_lun = 0; 872 periph->periph_quirks = PQUIRK_ONLYBIG; 873 874 #ifdef SCSIPI_DEBUG 875 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI && 876 SCSIPI_DEBUG_TARGET == target) 877 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS; 878 #endif 879 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config); 880 if (id->atap_config & ATAPI_CFG_REMOV) 881 periph->periph_flags |= PERIPH_REMOVABLE; 882 if (periph->periph_type == T_SEQUENTIAL) { 883 s = splbio(); 884 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW; 885 splx(s); 886 } 887 888 sa.sa_periph = periph; 889 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config); 890 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ? 891 T_REMOV : T_FIXED; 892 scsipi_strvis((u_char *)model, 40, id->atap_model, 40); 893 scsipi_strvis((u_char *)serial_number, 20, id->atap_serial, 20); 894 scsipi_strvis((u_char *)firmware_revision, 8, id->atap_revision, 895 8); 896 sa.sa_inqbuf.vendor = model; 897 sa.sa_inqbuf.product = serial_number; 898 sa.sa_inqbuf.revision = firmware_revision; 899 900 /* 901 * Determine the operating mode capabilities of the device. 902 */ 903 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16) 904 periph->periph_cap |= PERIPH_CAP_CMD16; 905 /* XXX This is gross. */ 906 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK); 907 908 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa); 909 910 if (drvp->drv_softc) 911 ata_probe_caps(drvp); 912 else { 913 s = splbio(); 914 drvp->drive_type = ATA_DRIVET_NONE; 915 splx(s); 916 } 917 } else { 918 DPRINTF(("%s:%d: mvsata_atapi_probe_device:" 919 " ATAPI_IDENTIFY_DEVICE failed for drive %d: error 0x%x\n", 920 device_xname(atac->atac_dev), chp->ch_channel, target, 921 chp->ch_error)); 922 s = splbio(); 923 drvp->drive_type = ATA_DRIVET_NONE; 924 splx(s); 925 } 926 } 927 928 /* 929 * Kill off all pending xfers for a periph. 930 * 931 * Must be called at splbio(). 932 */ 933 static void 934 mvsata_atapi_kill_pending(struct scsipi_periph *periph) 935 { 936 struct atac_softc *atac = 937 device_private(periph->periph_channel->chan_adapter->adapt_dev); 938 struct ata_channel *chp = 939 atac->atac_channels[periph->periph_channel->chan_channel]; 940 941 ata_kill_pending(&chp->ch_drive[periph->periph_target]); 942 } 943 #endif /* NATAPIBUS > 0 */ 944 #endif /* MVSATA_WITHOUTDMA */ 945 946 947 /* 948 * mvsata_setup_channel() 949 * Setup EDMA registers and prepare/purge DMA resources. 950 * We assuming already stopped the EDMA. 951 */ 952 static void 953 mvsata_setup_channel(struct ata_channel *chp) 954 { 955 #if !defined(MVSATA_WITHOUTDMA) || defined(MVSATA_DEBUG) 956 struct mvsata_port *mvport = (struct mvsata_port *)chp; 957 #endif 958 struct ata_drive_datas *drvp; 959 uint32_t edma_mode; 960 int drive, s; 961 #ifndef MVSATA_WITHOUTDMA 962 int i; 963 const int crqb_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 964 const int crpb_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 965 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 966 #endif 967 968 DPRINTF(("%s:%d: mvsata_setup_channel: ", 969 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 970 971 edma_mode = nodma; 972 for (drive = 0; drive < chp->ch_ndrives; drive++) { 973 drvp = &chp->ch_drive[drive]; 974 975 /* If no drive, skip */ 976 if (drvp->drive_type == ATA_DRIVET_NONE) 977 continue; 978 979 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 980 /* use Ultra/DMA */ 981 s = splbio(); 982 drvp->drive_flags &= ~ATA_DRIVE_DMA; 983 splx(s); 984 } 985 986 if (drvp->drive_flags & (ATA_DRIVE_UDMA | ATA_DRIVE_DMA)) 987 if (drvp->drive_type == ATA_DRIVET_ATA) 988 edma_mode = dma; 989 } 990 991 DPRINTF(("EDMA %sactive mode\n", (edma_mode == nodma) ? "not " : "")); 992 993 #ifndef MVSATA_WITHOUTDMA 994 if (edma_mode == nodma) { 995 no_edma: 996 if (mvport->port_crqb != NULL) 997 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 998 mvport->port_crqb_dmamap, mvport->port_crqb); 999 if (mvport->port_crpb != NULL) 1000 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 1001 mvport->port_crpb_dmamap, mvport->port_crpb); 1002 if (mvport->port_eprd != NULL) 1003 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 1004 mvport->port_eprd_dmamap, mvport->port_eprd); 1005 1006 return; 1007 } 1008 1009 if (mvport->port_crqb == NULL) 1010 mvport->port_crqb = mvsata_edma_resource_prepare(mvport, 1011 mvport->port_dmat, &mvport->port_crqb_dmamap, crqb_size, 1); 1012 if (mvport->port_crpb == NULL) 1013 mvport->port_crpb = mvsata_edma_resource_prepare(mvport, 1014 mvport->port_dmat, &mvport->port_crpb_dmamap, crpb_size, 0); 1015 if (mvport->port_eprd == NULL) { 1016 mvport->port_eprd = mvsata_edma_resource_prepare(mvport, 1017 mvport->port_dmat, &mvport->port_eprd_dmamap, eprd_buf_size, 1018 1); 1019 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 1020 mvport->port_reqtbl[i].eprd_offset = 1021 i * MVSATA_EPRD_MAX_SIZE; 1022 mvport->port_reqtbl[i].eprd = mvport->port_eprd + 1023 i * MVSATA_EPRD_MAX_SIZE / sizeof(struct eprd); 1024 } 1025 } 1026 1027 if (mvport->port_crqb == NULL || mvport->port_crpb == NULL || 1028 mvport->port_eprd == NULL) { 1029 aprint_error_dev(MVSATA_DEV2(mvport), 1030 "channel %d: can't use EDMA\n", chp->ch_channel); 1031 s = splbio(); 1032 for (drive = 0; drive < chp->ch_ndrives; drive++) { 1033 drvp = &chp->ch_drive[drive]; 1034 1035 /* If no drive, skip */ 1036 if (drvp->drive_type == ATA_DRIVET_NONE) 1037 continue; 1038 1039 drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA); 1040 } 1041 splx(s); 1042 goto no_edma; 1043 } 1044 1045 mvsata_edma_config(mvport, edma_mode); 1046 mvsata_edma_reset_qptr(mvport); 1047 mvsata_edma_enable(mvport); 1048 #endif 1049 } 1050 1051 #ifndef MVSATA_WITHOUTDMA 1052 static void 1053 mvsata_bio_start(struct ata_channel *chp, struct ata_xfer *xfer) 1054 { 1055 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1056 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 1057 struct atac_softc *atac = chp->ch_atac; 1058 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1059 struct ata_bio *ata_bio = xfer->c_cmd; 1060 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1061 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1062 u_int16_t cyl; 1063 u_int8_t head, sect, cmd = 0; 1064 int nblks, error; 1065 1066 DPRINTFN(2, ("%s:%d: mvsata_bio_start: drive=%d\n", 1067 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1068 1069 if (xfer->c_flags & C_DMA) 1070 if (drvp->n_xfers <= NXFER) 1071 drvp->n_xfers++; 1072 1073 again: 1074 /* 1075 * 1076 * When starting a multi-sector transfer, or doing single-sector 1077 * transfers... 1078 */ 1079 if (xfer->c_skip == 0 || (ata_bio->flags & ATA_SINGLE) != 0) { 1080 if (ata_bio->flags & ATA_SINGLE) 1081 nblks = 1; 1082 else 1083 nblks = xfer->c_bcount / ata_bio->lp->d_secsize; 1084 /* Check for bad sectors and adjust transfer, if necessary. */ 1085 if ((ata_bio->lp->d_flags & D_BADSECT) != 0) { 1086 long blkdiff; 1087 int i; 1088 1089 for (i = 0; (blkdiff = ata_bio->badsect[i]) != -1; 1090 i++) { 1091 blkdiff -= ata_bio->blkno; 1092 if (blkdiff < 0) 1093 continue; 1094 if (blkdiff == 0) 1095 /* Replace current block of transfer. */ 1096 ata_bio->blkno = 1097 ata_bio->lp->d_secperunit - 1098 ata_bio->lp->d_nsectors - i - 1; 1099 if (blkdiff < nblks) { 1100 /* Bad block inside transfer. */ 1101 ata_bio->flags |= ATA_SINGLE; 1102 nblks = 1; 1103 } 1104 break; 1105 } 1106 /* Transfer is okay now. */ 1107 } 1108 if (xfer->c_flags & C_DMA) { 1109 ata_bio->nblks = nblks; 1110 ata_bio->nbytes = xfer->c_bcount; 1111 1112 if (xfer->c_flags & C_POLL) 1113 sc->sc_enable_intr(mvport, 0 /*off*/); 1114 error = mvsata_edma_enqueue(mvport, ata_bio, 1115 (char *)xfer->c_databuf + xfer->c_skip); 1116 if (error) { 1117 if (error == EINVAL) { 1118 /* 1119 * We can't do DMA on this transfer 1120 * for some reason. Fall back to 1121 * PIO. 1122 */ 1123 xfer->c_flags &= ~C_DMA; 1124 error = 0; 1125 goto do_pio; 1126 } 1127 if (error == EBUSY) { 1128 aprint_error_dev(atac->atac_dev, 1129 "channel %d: EDMA Queue full\n", 1130 chp->ch_channel); 1131 /* 1132 * XXXX: Perhaps, after it waits for 1133 * a while, it is necessary to call 1134 * bio_start again. 1135 */ 1136 } 1137 ata_bio->error = ERR_DMA; 1138 ata_bio->r_error = 0; 1139 mvsata_bio_done(chp, xfer); 1140 return; 1141 } 1142 chp->ch_flags |= ATACH_DMA_WAIT; 1143 /* start timeout machinery */ 1144 if ((xfer->c_flags & C_POLL) == 0) 1145 callout_reset(&chp->ch_callout, 1146 ATA_DELAY / 1000 * hz, 1147 mvsata_edma_timeout, xfer); 1148 /* wait for irq */ 1149 goto intr; 1150 } /* else not DMA */ 1151 do_pio: 1152 if (ata_bio->flags & ATA_LBA48) { 1153 sect = 0; 1154 cyl = 0; 1155 head = 0; 1156 } else if (ata_bio->flags & ATA_LBA) { 1157 sect = (ata_bio->blkno >> 0) & 0xff; 1158 cyl = (ata_bio->blkno >> 8) & 0xffff; 1159 head = (ata_bio->blkno >> 24) & 0x0f; 1160 head |= WDSD_LBA; 1161 } else { 1162 int blkno = ata_bio->blkno; 1163 sect = blkno % ata_bio->lp->d_nsectors; 1164 sect++; /* Sectors begin with 1, not 0. */ 1165 blkno /= ata_bio->lp->d_nsectors; 1166 head = blkno % ata_bio->lp->d_ntracks; 1167 blkno /= ata_bio->lp->d_ntracks; 1168 cyl = blkno; 1169 head |= WDSD_CHS; 1170 } 1171 ata_bio->nblks = min(nblks, ata_bio->multi); 1172 ata_bio->nbytes = ata_bio->nblks * ata_bio->lp->d_secsize; 1173 KASSERT(nblks == 1 || (ata_bio->flags & ATA_SINGLE) == 0); 1174 if (ata_bio->nblks > 1) 1175 cmd = (ata_bio->flags & ATA_READ) ? 1176 WDCC_READMULTI : WDCC_WRITEMULTI; 1177 else 1178 cmd = (ata_bio->flags & ATA_READ) ? 1179 WDCC_READ : WDCC_WRITE; 1180 1181 /* EDMA disable, if enabled this channel. */ 1182 if (mvport->port_edmamode != nodma) 1183 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1184 1185 mvsata_pmp_select(mvport, xfer->c_drive); 1186 1187 /* Do control operations specially. */ 1188 if (__predict_false(drvp->state < READY)) { 1189 /* 1190 * Actually, we want to be careful not to mess with 1191 * the control state if the device is currently busy, 1192 * but we can assume that we never get to this point 1193 * if that's the case. 1194 */ 1195 /* 1196 * If it's not a polled command, we need the kernel 1197 * thread 1198 */ 1199 if ((xfer->c_flags & C_POLL) == 0 && cpu_intr_p()) { 1200 chp->ch_queue->queue_freeze++; 1201 wakeup(&chp->ch_thread); 1202 return; 1203 } 1204 if (mvsata_bio_ready(mvport, ata_bio, xfer->c_drive, 1205 (xfer->c_flags & C_POLL) ? AT_POLL : 0) != 0) { 1206 mvsata_bio_done(chp, xfer); 1207 return; 1208 } 1209 } 1210 1211 /* Initiate command! */ 1212 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1213 switch(wdc_wait_for_ready(chp, ATA_DELAY, wait_flags)) { 1214 case WDCWAIT_OK: 1215 break; 1216 case WDCWAIT_TOUT: 1217 goto timeout; 1218 case WDCWAIT_THR: 1219 return; 1220 } 1221 if (ata_bio->flags & ATA_LBA48) 1222 wdccommandext(chp, 0, atacmd_to48(cmd), 1223 ata_bio->blkno, nblks, 0, WDSD_LBA); 1224 else 1225 wdccommand(chp, 0, cmd, cyl, 1226 head, sect, nblks, 1227 (ata_bio->lp->d_type == DTYPE_ST506) ? 1228 ata_bio->lp->d_precompcyl / 4 : 0); 1229 1230 /* start timeout machinery */ 1231 if ((xfer->c_flags & C_POLL) == 0) 1232 callout_reset(&chp->ch_callout, 1233 ATA_DELAY / 1000 * hz, wdctimeout, chp); 1234 } else if (ata_bio->nblks > 1) { 1235 /* The number of blocks in the last stretch may be smaller. */ 1236 nblks = xfer->c_bcount / ata_bio->lp->d_secsize; 1237 if (ata_bio->nblks > nblks) { 1238 ata_bio->nblks = nblks; 1239 ata_bio->nbytes = xfer->c_bcount; 1240 } 1241 } 1242 /* If this was a write and not using DMA, push the data. */ 1243 if ((ata_bio->flags & ATA_READ) == 0) { 1244 /* 1245 * we have to busy-wait here, we can't rely on running in 1246 * thread context. 1247 */ 1248 if (wdc_wait_for_drq(chp, ATA_DELAY, AT_POLL) != 0) { 1249 aprint_error_dev(atac->atac_dev, 1250 "channel %d: drive %d timeout waiting for DRQ," 1251 " st=0x%02x, err=0x%02x\n", 1252 chp->ch_channel, xfer->c_drive, chp->ch_status, 1253 chp->ch_error); 1254 ata_bio->error = TIMEOUT; 1255 mvsata_bio_done(chp, xfer); 1256 return; 1257 } 1258 if (chp->ch_status & WDCS_ERR) { 1259 ata_bio->error = ERROR; 1260 ata_bio->r_error = chp->ch_error; 1261 mvsata_bio_done(chp, xfer); 1262 return; 1263 } 1264 1265 wdc->dataout_pio(chp, drvp->drive_flags, 1266 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1267 } 1268 1269 intr: 1270 /* Wait for IRQ (either real or polled) */ 1271 if ((ata_bio->flags & ATA_POLL) == 0) { 1272 chp->ch_flags |= ATACH_IRQ_WAIT; 1273 } else { 1274 /* Wait for at last 400ns for status bit to be valid */ 1275 delay(1); 1276 if (chp->ch_flags & ATACH_DMA_WAIT) { 1277 mvsata_edma_wait(mvport, xfer, ATA_DELAY); 1278 sc->sc_enable_intr(mvport, 1 /*on*/); 1279 chp->ch_flags &= ~ATACH_DMA_WAIT; 1280 } 1281 mvsata_bio_intr(chp, xfer, 0); 1282 if ((ata_bio->flags & ATA_ITSDONE) == 0) 1283 goto again; 1284 } 1285 return; 1286 1287 timeout: 1288 aprint_error_dev(atac->atac_dev, 1289 "channel %d: drive %d not ready, st=0x%02x, err=0x%02x\n", 1290 chp->ch_channel, xfer->c_drive, chp->ch_status, chp->ch_error); 1291 ata_bio->error = TIMEOUT; 1292 mvsata_bio_done(chp, xfer); 1293 return; 1294 } 1295 1296 static int 1297 mvsata_bio_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 1298 { 1299 struct atac_softc *atac = chp->ch_atac; 1300 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1301 struct ata_bio *ata_bio = xfer->c_cmd; 1302 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1303 1304 DPRINTFN(2, ("%s:%d: mvsata_bio_intr: drive=%d\n", 1305 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1306 1307 chp->ch_flags &= ~(ATACH_IRQ_WAIT|ATACH_DMA_WAIT); 1308 1309 /* 1310 * If we missed an interrupt transfer, reset and restart. 1311 * Don't try to continue transfer, we may have missed cycles. 1312 */ 1313 if (xfer->c_flags & C_TIMEOU) { 1314 ata_bio->error = TIMEOUT; 1315 mvsata_bio_done(chp, xfer); 1316 return 1; 1317 } 1318 1319 /* Is it not a transfer, but a control operation? */ 1320 if (!(xfer->c_flags & C_DMA) && drvp->state < READY) { 1321 aprint_error_dev(atac->atac_dev, 1322 "channel %d: drive %d bad state %d in mvsata_bio_intr\n", 1323 chp->ch_channel, xfer->c_drive, drvp->state); 1324 panic("mvsata_bio_intr: bad state"); 1325 } 1326 1327 /* Ack interrupt done by wdc_wait_for_unbusy */ 1328 if (!(xfer->c_flags & C_DMA) && 1329 (wdc_wait_for_unbusy(chp, (irq == 0) ? ATA_DELAY : 0, AT_POLL) 1330 == WDCWAIT_TOUT)) { 1331 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1332 return 0; /* IRQ was not for us */ 1333 aprint_error_dev(atac->atac_dev, 1334 "channel %d: drive %d timeout, c_bcount=%d, c_skip%d\n", 1335 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 1336 xfer->c_skip); 1337 ata_bio->error = TIMEOUT; 1338 mvsata_bio_done(chp, xfer); 1339 return 1; 1340 } 1341 1342 if (xfer->c_flags & C_DMA) { 1343 if (ata_bio->error == NOERROR) 1344 goto end; 1345 if (ata_bio->error == ERR_DMA) 1346 ata_dmaerr(drvp, 1347 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 1348 } 1349 1350 /* if we had an error, end */ 1351 if (ata_bio->error != NOERROR) { 1352 mvsata_bio_done(chp, xfer); 1353 return 1; 1354 } 1355 1356 /* If this was a read and not using DMA, fetch the data. */ 1357 if ((ata_bio->flags & ATA_READ) != 0) { 1358 if ((chp->ch_status & WDCS_DRQ) != WDCS_DRQ) { 1359 aprint_error_dev(atac->atac_dev, 1360 "channel %d: drive %d read intr before drq\n", 1361 chp->ch_channel, xfer->c_drive); 1362 ata_bio->error = TIMEOUT; 1363 mvsata_bio_done(chp, xfer); 1364 return 1; 1365 } 1366 wdc->datain_pio(chp, drvp->drive_flags, 1367 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1368 } 1369 1370 end: 1371 ata_bio->blkno += ata_bio->nblks; 1372 ata_bio->blkdone += ata_bio->nblks; 1373 xfer->c_skip += ata_bio->nbytes; 1374 xfer->c_bcount -= ata_bio->nbytes; 1375 /* See if this transfer is complete. */ 1376 if (xfer->c_bcount > 0) { 1377 if ((ata_bio->flags & ATA_POLL) == 0) 1378 /* Start the next operation */ 1379 mvsata_bio_start(chp, xfer); 1380 else 1381 /* Let mvsata_bio_start do the loop */ 1382 return 1; 1383 } else { /* Done with this transfer */ 1384 ata_bio->error = NOERROR; 1385 mvsata_bio_done(chp, xfer); 1386 } 1387 return 1; 1388 } 1389 1390 static void 1391 mvsata_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1392 { 1393 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1394 struct atac_softc *atac = chp->ch_atac; 1395 struct ata_bio *ata_bio = xfer->c_cmd; 1396 int drive = xfer->c_drive; 1397 1398 DPRINTFN(2, ("%s:%d: mvsata_bio_kill_xfer: drive=%d\n", 1399 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1400 1401 /* EDMA restart, if enabled */ 1402 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) { 1403 mvsata_edma_reset_qptr(mvport); 1404 mvsata_edma_enable(mvport); 1405 } 1406 1407 ata_free_xfer(chp, xfer); 1408 1409 ata_bio->flags |= ATA_ITSDONE; 1410 switch (reason) { 1411 case KILL_GONE: 1412 ata_bio->error = ERR_NODEV; 1413 break; 1414 case KILL_RESET: 1415 ata_bio->error = ERR_RESET; 1416 break; 1417 default: 1418 aprint_error_dev(atac->atac_dev, 1419 "mvsata_bio_kill_xfer: unknown reason %d\n", reason); 1420 panic("mvsata_bio_kill_xfer"); 1421 } 1422 ata_bio->r_error = WDCE_ABRT; 1423 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc); 1424 } 1425 1426 static void 1427 mvsata_bio_done(struct ata_channel *chp, struct ata_xfer *xfer) 1428 { 1429 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1430 struct ata_bio *ata_bio = xfer->c_cmd; 1431 int drive = xfer->c_drive; 1432 1433 DPRINTFN(2, ("%s:%d: mvsata_bio_done: drive=%d, flags=0x%x\n", 1434 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive, 1435 (u_int)xfer->c_flags)); 1436 1437 callout_stop(&chp->ch_callout); 1438 1439 /* EDMA restart, if enabled */ 1440 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) { 1441 mvsata_edma_reset_qptr(mvport); 1442 mvsata_edma_enable(mvport); 1443 } 1444 1445 /* feed back residual bcount to our caller */ 1446 ata_bio->bcount = xfer->c_bcount; 1447 1448 /* mark controller inactive and free xfer */ 1449 KASSERT(chp->ch_queue->active_xfer != NULL); 1450 chp->ch_queue->active_xfer = NULL; 1451 ata_free_xfer(chp, xfer); 1452 1453 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1454 ata_bio->error = ERR_NODEV; 1455 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1456 wakeup(&chp->ch_queue->active_xfer); 1457 } 1458 ata_bio->flags |= ATA_ITSDONE; 1459 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc); 1460 atastart(chp); 1461 } 1462 1463 static int 1464 mvsata_bio_ready(struct mvsata_port *mvport, struct ata_bio *ata_bio, int drive, 1465 int flags) 1466 { 1467 struct ata_channel *chp = &mvport->port_ata_channel; 1468 struct atac_softc *atac = chp->ch_atac; 1469 struct ata_drive_datas *drvp = &chp->ch_drive[drive]; 1470 const char *errstring; 1471 1472 /* 1473 * disable interrupts, all commands here should be quick 1474 * enough to be able to poll, and we don't go here that often 1475 */ 1476 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1477 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1478 DELAY(10); 1479 errstring = "wait"; 1480 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1481 goto ctrltimeout; 1482 wdccommandshort(chp, 0, WDCC_RECAL); 1483 /* Wait for at least 400ns for status bit to be valid */ 1484 DELAY(1); 1485 errstring = "recal"; 1486 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1487 goto ctrltimeout; 1488 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1489 goto ctrlerror; 1490 /* Don't try to set modes if controller can't be adjusted */ 1491 if (atac->atac_set_modes == NULL) 1492 goto geometry; 1493 /* Also don't try if the drive didn't report its mode */ 1494 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 1495 goto geometry; 1496 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1497 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 1498 errstring = "piomode"; 1499 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1500 goto ctrltimeout; 1501 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1502 goto ctrlerror; 1503 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1504 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1505 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 1506 else if (drvp->drive_flags & ATA_DRIVE_DMA) 1507 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1508 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 1509 else 1510 goto geometry; 1511 errstring = "dmamode"; 1512 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1513 goto ctrltimeout; 1514 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1515 goto ctrlerror; 1516 geometry: 1517 if (ata_bio->flags & ATA_LBA) 1518 goto multimode; 1519 wdccommand(chp, 0, WDCC_IDP, ata_bio->lp->d_ncylinders, 1520 ata_bio->lp->d_ntracks - 1, 0, ata_bio->lp->d_nsectors, 1521 (ata_bio->lp->d_type == DTYPE_ST506) ? 1522 ata_bio->lp->d_precompcyl / 4 : 0); 1523 errstring = "geometry"; 1524 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1525 goto ctrltimeout; 1526 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1527 goto ctrlerror; 1528 multimode: 1529 if (ata_bio->multi == 1) 1530 goto ready; 1531 wdccommand(chp, 0, WDCC_SETMULTI, 0, 0, 0, ata_bio->multi, 0); 1532 errstring = "setmulti"; 1533 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1534 goto ctrltimeout; 1535 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1536 goto ctrlerror; 1537 ready: 1538 drvp->state = READY; 1539 /* 1540 * The drive is usable now 1541 */ 1542 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1543 delay(10); /* some drives need a little delay here */ 1544 return 0; 1545 1546 ctrltimeout: 1547 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s timed out\n", 1548 chp->ch_channel, drive, errstring); 1549 ata_bio->error = TIMEOUT; 1550 goto ctrldone; 1551 ctrlerror: 1552 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s ", 1553 chp->ch_channel, drive, errstring); 1554 if (chp->ch_status & WDCS_DWF) { 1555 aprint_error("drive fault\n"); 1556 ata_bio->error = ERR_DF; 1557 } else { 1558 aprint_error("error (%x)\n", chp->ch_error); 1559 ata_bio->r_error = chp->ch_error; 1560 ata_bio->error = ERROR; 1561 } 1562 ctrldone: 1563 drvp->state = 0; 1564 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1565 return -1; 1566 } 1567 1568 static void 1569 mvsata_wdc_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer) 1570 { 1571 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1572 int drive = xfer->c_drive; 1573 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1574 struct ata_command *ata_c = xfer->c_cmd; 1575 1576 DPRINTFN(1, ("%s:%d: mvsata_cmd_start: drive=%d\n", 1577 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drive)); 1578 1579 /* First, EDMA disable, if enabled this channel. */ 1580 if (mvport->port_edmamode != nodma) 1581 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1582 1583 mvsata_pmp_select(mvport, drive); 1584 1585 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1586 switch(wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1587 ata_c->r_st_bmask, ata_c->timeout, wait_flags)) { 1588 case WDCWAIT_OK: 1589 break; 1590 case WDCWAIT_TOUT: 1591 ata_c->flags |= AT_TIMEOU; 1592 mvsata_wdc_cmd_done(chp, xfer); 1593 return; 1594 case WDCWAIT_THR: 1595 return; 1596 } 1597 if (ata_c->flags & AT_POLL) 1598 /* polled command, disable interrupts */ 1599 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1600 if ((ata_c->flags & AT_LBA48) != 0) { 1601 wdccommandext(chp, 0, ata_c->r_command, 1602 ata_c->r_lba, ata_c->r_count, ata_c->r_features, 1603 ata_c->r_device & ~0x10); 1604 } else { 1605 wdccommand(chp, 0, ata_c->r_command, 1606 (ata_c->r_lba >> 8) & 0xffff, 1607 (((ata_c->flags & AT_LBA) != 0) ? WDSD_LBA : 0) | 1608 ((ata_c->r_lba >> 24) & 0x0f), 1609 ata_c->r_lba & 0xff, 1610 ata_c->r_count & 0xff, 1611 ata_c->r_features & 0xff); 1612 } 1613 1614 if ((ata_c->flags & AT_POLL) == 0) { 1615 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for interrupt */ 1616 callout_reset(&chp->ch_callout, ata_c->timeout / 1000 * hz, 1617 wdctimeout, chp); 1618 return; 1619 } 1620 /* 1621 * Polled command. Wait for drive ready or drq. Done in intr(). 1622 * Wait for at last 400ns for status bit to be valid. 1623 */ 1624 delay(10); /* 400ns delay */ 1625 mvsata_wdc_cmd_intr(chp, xfer, 0); 1626 } 1627 1628 static int 1629 mvsata_wdc_cmd_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 1630 { 1631 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1632 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1633 struct ata_command *ata_c = xfer->c_cmd; 1634 int bcount = ata_c->bcount; 1635 char *data = ata_c->data; 1636 int wflags; 1637 int drive_flags; 1638 1639 if (ata_c->r_command == WDCC_IDENTIFY || 1640 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) 1641 /* 1642 * The IDENTIFY data has been designed as an array of 1643 * u_int16_t, so we can byteswap it on the fly. 1644 * Historically it's what we have always done so keeping it 1645 * here ensure binary backward compatibility. 1646 */ 1647 drive_flags = ATA_DRIVE_NOSTREAM | 1648 chp->ch_drive[xfer->c_drive].drive_flags; 1649 else 1650 /* 1651 * Other data structure are opaque and should be transfered 1652 * as is. 1653 */ 1654 drive_flags = chp->ch_drive[xfer->c_drive].drive_flags; 1655 1656 if ((ata_c->flags & (AT_WAIT | AT_POLL)) == (AT_WAIT | AT_POLL)) 1657 /* both wait and poll, we can tsleep here */ 1658 wflags = AT_WAIT | AT_POLL; 1659 else 1660 wflags = AT_POLL; 1661 1662 again: 1663 DPRINTFN(1, ("%s:%d: mvsata_cmd_intr: drive=%d\n", 1664 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive)); 1665 1666 /* 1667 * after a ATAPI_SOFT_RESET, the device will have released the bus. 1668 * Reselect again, it doesn't hurt for others commands, and the time 1669 * penalty for the extra register write is acceptable, 1670 * wdc_exec_command() isn't called often (mostly for autoconfig) 1671 */ 1672 if ((xfer->c_flags & C_ATAPI) != 0) { 1673 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1674 } 1675 if ((ata_c->flags & AT_XFDONE) != 0) { 1676 /* 1677 * We have completed a data xfer. The drive should now be 1678 * in its initial state 1679 */ 1680 if (wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1681 ata_c->r_st_bmask, (irq == 0) ? ata_c->timeout : 0, 1682 wflags) == WDCWAIT_TOUT) { 1683 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1684 return 0; /* IRQ was not for us */ 1685 ata_c->flags |= AT_TIMEOU; 1686 } 1687 goto out; 1688 } 1689 if (wdcwait(chp, ata_c->r_st_pmask, ata_c->r_st_pmask, 1690 (irq == 0) ? ata_c->timeout : 0, wflags) == WDCWAIT_TOUT) { 1691 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1692 return 0; /* IRQ was not for us */ 1693 ata_c->flags |= AT_TIMEOU; 1694 goto out; 1695 } 1696 if (ata_c->flags & AT_READ) { 1697 if ((chp->ch_status & WDCS_DRQ) == 0) { 1698 ata_c->flags |= AT_TIMEOU; 1699 goto out; 1700 } 1701 wdc->datain_pio(chp, drive_flags, data, bcount); 1702 /* at this point the drive should be in its initial state */ 1703 ata_c->flags |= AT_XFDONE; 1704 /* 1705 * XXX checking the status register again here cause some 1706 * hardware to timeout. 1707 */ 1708 } else if (ata_c->flags & AT_WRITE) { 1709 if ((chp->ch_status & WDCS_DRQ) == 0) { 1710 ata_c->flags |= AT_TIMEOU; 1711 goto out; 1712 } 1713 wdc->dataout_pio(chp, drive_flags, data, bcount); 1714 ata_c->flags |= AT_XFDONE; 1715 if ((ata_c->flags & AT_POLL) == 0) { 1716 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for intr */ 1717 callout_reset(&chp->ch_callout, 1718 mstohz(ata_c->timeout), wdctimeout, chp); 1719 return 1; 1720 } else 1721 goto again; 1722 } 1723 out: 1724 mvsata_wdc_cmd_done(chp, xfer); 1725 return 1; 1726 } 1727 1728 static void 1729 mvsata_wdc_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 1730 int reason) 1731 { 1732 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1733 struct ata_command *ata_c = xfer->c_cmd; 1734 1735 DPRINTFN(1, ("%s:%d: mvsata_cmd_kill_xfer: drive=%d\n", 1736 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive)); 1737 1738 switch (reason) { 1739 case KILL_GONE: 1740 ata_c->flags |= AT_GONE; 1741 break; 1742 case KILL_RESET: 1743 ata_c->flags |= AT_RESET; 1744 break; 1745 default: 1746 aprint_error_dev(MVSATA_DEV2(mvport), 1747 "mvsata_cmd_kill_xfer: unknown reason %d\n", reason); 1748 panic("mvsata_cmd_kill_xfer"); 1749 } 1750 mvsata_wdc_cmd_done_end(chp, xfer); 1751 } 1752 1753 static void 1754 mvsata_wdc_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer) 1755 { 1756 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1757 struct atac_softc *atac = chp->ch_atac; 1758 struct ata_command *ata_c = xfer->c_cmd; 1759 1760 DPRINTFN(1, ("%s:%d: mvsata_cmd_done: drive=%d, flags=0x%x\n", 1761 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 1762 ata_c->flags)); 1763 1764 if (chp->ch_status & WDCS_DWF) 1765 ata_c->flags |= AT_DF; 1766 if (chp->ch_status & WDCS_ERR) { 1767 ata_c->flags |= AT_ERROR; 1768 ata_c->r_error = chp->ch_error; 1769 } 1770 if ((ata_c->flags & AT_READREG) != 0 && 1771 device_is_active(atac->atac_dev) && 1772 (ata_c->flags & (AT_ERROR | AT_DF)) == 0) { 1773 ata_c->r_status = MVSATA_WDC_READ_1(mvport, SRB_CS); 1774 ata_c->r_error = MVSATA_WDC_READ_1(mvport, SRB_FE); 1775 ata_c->r_count = MVSATA_WDC_READ_1(mvport, SRB_SC); 1776 ata_c->r_lba = 1777 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 0; 1778 ata_c->r_lba |= 1779 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 8; 1780 ata_c->r_lba |= 1781 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 16; 1782 ata_c->r_device = MVSATA_WDC_READ_1(mvport, SRB_H); 1783 if ((ata_c->flags & AT_LBA48) != 0) { 1784 if ((ata_c->flags & AT_POLL) != 0) { 1785 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1786 WDCTL_HOB|WDCTL_4BIT|WDCTL_IDS); 1787 } else { 1788 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1789 WDCTL_HOB|WDCTL_4BIT); 1790 } 1791 ata_c->r_count |= 1792 MVSATA_WDC_READ_1(mvport, SRB_SC) << 8; 1793 ata_c->r_lba |= 1794 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 24; 1795 ata_c->r_lba |= 1796 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 32; 1797 ata_c->r_lba |= 1798 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 40; 1799 if ((ata_c->flags & AT_POLL) != 0) { 1800 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1801 WDCTL_4BIT|WDCTL_IDS); 1802 } else { 1803 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1804 WDCTL_4BIT); 1805 } 1806 } else { 1807 ata_c->r_lba |= 1808 (uint64_t)(ata_c->r_device & 0x0f) << 24; 1809 } 1810 } 1811 callout_stop(&chp->ch_callout); 1812 chp->ch_queue->active_xfer = NULL; 1813 if (ata_c->flags & AT_POLL) { 1814 /* enable interrupts */ 1815 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1816 delay(10); /* some drives need a little delay here */ 1817 } 1818 if (chp->ch_drive[xfer->c_drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1819 mvsata_wdc_cmd_kill_xfer(chp, xfer, KILL_GONE); 1820 chp->ch_drive[xfer->c_drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1821 wakeup(&chp->ch_queue->active_xfer); 1822 } else 1823 mvsata_wdc_cmd_done_end(chp, xfer); 1824 } 1825 1826 static void 1827 mvsata_wdc_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer) 1828 { 1829 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1830 struct ata_command *ata_c = xfer->c_cmd; 1831 1832 /* EDMA restart, if enabled */ 1833 if (mvport->port_edmamode != nodma) { 1834 mvsata_edma_reset_qptr(mvport); 1835 mvsata_edma_enable(mvport); 1836 } 1837 1838 ata_c->flags |= AT_DONE; 1839 ata_free_xfer(chp, xfer); 1840 if (ata_c->flags & AT_WAIT) 1841 wakeup(ata_c); 1842 else if (ata_c->callback) 1843 ata_c->callback(ata_c->callback_arg); 1844 atastart(chp); 1845 1846 return; 1847 } 1848 1849 #if NATAPIBUS > 0 1850 static void 1851 mvsata_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer) 1852 { 1853 struct mvsata_softc *sc = (struct mvsata_softc *)chp->ch_atac; 1854 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1855 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 1856 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 1857 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1858 const int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1859 const char *errstring; 1860 1861 DPRINTFN(2, ("%s:%d:%d: mvsata_atapi_start: scsi flags 0x%x\n", 1862 device_xname(chp->ch_atac->atac_dev), chp->ch_channel, 1863 xfer->c_drive, sc_xfer->xs_control)); 1864 1865 if (mvport->port_edmamode != nodma) 1866 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1867 1868 mvsata_pmp_select(mvport, xfer->c_drive); 1869 1870 if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER)) 1871 drvp->n_xfers++; 1872 1873 /* Do control operations specially. */ 1874 if (__predict_false(drvp->state < READY)) { 1875 /* If it's not a polled command, we need the kernel thread */ 1876 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 && cpu_intr_p()) { 1877 chp->ch_queue->queue_freeze++; 1878 wakeup(&chp->ch_thread); 1879 return; 1880 } 1881 /* 1882 * disable interrupts, all commands here should be quick 1883 * enough to be able to poll, and we don't go here that often 1884 */ 1885 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1886 1887 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1888 /* Don't try to set mode if controller can't be adjusted */ 1889 if (atac->atac_set_modes == NULL) 1890 goto ready; 1891 /* Also don't try if the drive didn't report its mode */ 1892 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 1893 goto ready; 1894 errstring = "unbusy"; 1895 if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags)) 1896 goto timeout; 1897 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1898 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 1899 errstring = "piomode"; 1900 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags)) 1901 goto timeout; 1902 if (chp->ch_status & WDCS_ERR) { 1903 if (chp->ch_error == WDCE_ABRT) { 1904 /* 1905 * Some ATAPI drives reject PIO settings. 1906 * Fall back to PIO mode 3 since that's the 1907 * minimum for ATAPI. 1908 */ 1909 aprint_error_dev(atac->atac_dev, 1910 "channel %d drive %d: PIO mode %d rejected," 1911 " falling back to PIO mode 3\n", 1912 chp->ch_channel, xfer->c_drive, 1913 drvp->PIO_mode); 1914 if (drvp->PIO_mode > 3) 1915 drvp->PIO_mode = 3; 1916 } else 1917 goto error; 1918 } 1919 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1920 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1921 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 1922 else 1923 if (drvp->drive_flags & ATA_DRIVE_DMA) 1924 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1925 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 1926 else 1927 goto ready; 1928 errstring = "dmamode"; 1929 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags)) 1930 goto timeout; 1931 if (chp->ch_status & WDCS_ERR) { 1932 if (chp->ch_error == WDCE_ABRT) { 1933 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1934 goto error; 1935 else { 1936 /* 1937 * The drive rejected our DMA setting. 1938 * Fall back to mode 1. 1939 */ 1940 aprint_error_dev(atac->atac_dev, 1941 "channel %d drive %d:" 1942 " DMA mode %d rejected," 1943 " falling back to DMA mode 0\n", 1944 chp->ch_channel, xfer->c_drive, 1945 drvp->DMA_mode); 1946 if (drvp->DMA_mode > 0) 1947 drvp->DMA_mode = 0; 1948 } 1949 } else 1950 goto error; 1951 } 1952 ready: 1953 drvp->state = READY; 1954 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1955 delay(10); /* some drives need a little delay here */ 1956 } 1957 /* start timeout machinery */ 1958 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 1959 callout_reset(&chp->ch_callout, mstohz(sc_xfer->timeout), 1960 wdctimeout, chp); 1961 1962 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1963 switch (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags) < 0) { 1964 case WDCWAIT_OK: 1965 break; 1966 case WDCWAIT_TOUT: 1967 aprint_error_dev(atac->atac_dev, "not ready, st = %02x\n", 1968 chp->ch_status); 1969 sc_xfer->error = XS_TIMEOUT; 1970 mvsata_atapi_reset(chp, xfer); 1971 return; 1972 case WDCWAIT_THR: 1973 return; 1974 } 1975 1976 /* 1977 * Even with WDCS_ERR, the device should accept a command packet 1978 * Limit length to what can be stuffed into the cylinder register 1979 * (16 bits). Some CD-ROMs seem to interpret '0' as 65536, 1980 * but not all devices do that and it's not obvious from the 1981 * ATAPI spec that that behaviour should be expected. If more 1982 * data is necessary, multiple data transfer phases will be done. 1983 */ 1984 1985 wdccommand(chp, 0, ATAPI_PKT_CMD, 1986 xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff, 0, 0, 0, 1987 (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA : 0); 1988 1989 /* 1990 * If there is no interrupt for CMD input, busy-wait for it (done in 1991 * the interrupt routine. If it is a polled command, call the interrupt 1992 * routine until command is done. 1993 */ 1994 if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) != 1995 ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) { 1996 /* Wait for at last 400ns for status bit to be valid */ 1997 DELAY(1); 1998 mvsata_atapi_intr(chp, xfer, 0); 1999 } else 2000 chp->ch_flags |= ATACH_IRQ_WAIT; 2001 if (sc_xfer->xs_control & XS_CTL_POLL) { 2002 if (chp->ch_flags & ATACH_DMA_WAIT) { 2003 wdc_dmawait(chp, xfer, sc_xfer->timeout); 2004 chp->ch_flags &= ~ATACH_DMA_WAIT; 2005 } 2006 while ((sc_xfer->xs_status & XS_STS_DONE) == 0) { 2007 /* Wait for at last 400ns for status bit to be valid */ 2008 DELAY(1); 2009 mvsata_atapi_intr(chp, xfer, 0); 2010 } 2011 } 2012 return; 2013 2014 timeout: 2015 aprint_error_dev(atac->atac_dev, "channel %d drive %d: %s timed out\n", 2016 chp->ch_channel, xfer->c_drive, errstring); 2017 sc_xfer->error = XS_TIMEOUT; 2018 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2019 delay(10); /* some drives need a little delay here */ 2020 mvsata_atapi_reset(chp, xfer); 2021 return; 2022 2023 error: 2024 aprint_error_dev(atac->atac_dev, 2025 "channel %d drive %d: %s error (0x%x)\n", 2026 chp->ch_channel, xfer->c_drive, errstring, chp->ch_error); 2027 sc_xfer->error = XS_SHORTSENSE; 2028 sc_xfer->sense.atapi_sense = chp->ch_error; 2029 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2030 delay(10); /* some drives need a little delay here */ 2031 mvsata_atapi_reset(chp, xfer); 2032 return; 2033 } 2034 2035 static int 2036 mvsata_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 2037 { 2038 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2039 struct atac_softc *atac = chp->ch_atac; 2040 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2041 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2042 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2043 int len, phase, ire, error, retries=0, i; 2044 void *cmd; 2045 2046 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr\n", 2047 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 2048 2049 /* Is it not a transfer, but a control operation? */ 2050 if (drvp->state < READY) { 2051 aprint_error_dev(atac->atac_dev, 2052 "channel %d drive %d: bad state %d\n", 2053 chp->ch_channel, xfer->c_drive, drvp->state); 2054 panic("mvsata_atapi_intr: bad state"); 2055 } 2056 /* 2057 * If we missed an interrupt in a PIO transfer, reset and restart. 2058 * Don't try to continue transfer, we may have missed cycles. 2059 */ 2060 if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) { 2061 sc_xfer->error = XS_TIMEOUT; 2062 mvsata_atapi_reset(chp, xfer); 2063 return 1; 2064 } 2065 2066 /* Ack interrupt done in wdc_wait_for_unbusy */ 2067 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 2068 if (wdc_wait_for_unbusy(chp, 2069 (irq == 0) ? sc_xfer->timeout : 0, AT_POLL) == WDCWAIT_TOUT) { 2070 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 2071 return 0; /* IRQ was not for us */ 2072 aprint_error_dev(atac->atac_dev, 2073 "channel %d: device timeout, c_bcount=%d, c_skip=%d\n", 2074 chp->ch_channel, xfer->c_bcount, xfer->c_skip); 2075 if (xfer->c_flags & C_DMA) 2076 ata_dmaerr(drvp, 2077 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2078 sc_xfer->error = XS_TIMEOUT; 2079 mvsata_atapi_reset(chp, xfer); 2080 return 1; 2081 } 2082 2083 /* 2084 * If we missed an IRQ and were using DMA, flag it as a DMA error 2085 * and reset device. 2086 */ 2087 if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) { 2088 ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2089 sc_xfer->error = XS_RESET; 2090 mvsata_atapi_reset(chp, xfer); 2091 return (1); 2092 } 2093 /* 2094 * if the request sense command was aborted, report the short sense 2095 * previously recorded, else continue normal processing 2096 */ 2097 2098 again: 2099 len = MVSATA_WDC_READ_1(mvport, SRB_LBAM) + 2100 256 * MVSATA_WDC_READ_1(mvport, SRB_LBAH); 2101 ire = MVSATA_WDC_READ_1(mvport, SRB_SC); 2102 phase = (ire & (WDCI_CMD | WDCI_IN)) | (chp->ch_status & WDCS_DRQ); 2103 DPRINTF(( 2104 "mvsata_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x ire 0x%x :", 2105 xfer->c_bcount, len, chp->ch_status, chp->ch_error, ire)); 2106 2107 switch (phase) { 2108 case PHASE_CMDOUT: 2109 cmd = sc_xfer->cmd; 2110 DPRINTF(("PHASE_CMDOUT\n")); 2111 /* Init the DMA channel if necessary */ 2112 if (xfer->c_flags & C_DMA) { 2113 error = mvsata_bdma_init(mvport, sc_xfer, 2114 (char *)xfer->c_databuf + xfer->c_skip); 2115 if (error) { 2116 if (error == EINVAL) { 2117 /* 2118 * We can't do DMA on this transfer 2119 * for some reason. Fall back to PIO. 2120 */ 2121 xfer->c_flags &= ~C_DMA; 2122 error = 0; 2123 } else { 2124 sc_xfer->error = XS_DRIVER_STUFFUP; 2125 break; 2126 } 2127 } 2128 } 2129 2130 /* send packet command */ 2131 /* Commands are 12 or 16 bytes long. It's 32-bit aligned */ 2132 wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen); 2133 2134 /* Start the DMA channel if necessary */ 2135 if (xfer->c_flags & C_DMA) { 2136 mvsata_bdma_start(mvport); 2137 chp->ch_flags |= ATACH_DMA_WAIT; 2138 } 2139 2140 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2141 chp->ch_flags |= ATACH_IRQ_WAIT; 2142 return 1; 2143 2144 case PHASE_DATAOUT: 2145 /* write data */ 2146 DPRINTF(("PHASE_DATAOUT\n")); 2147 if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 || 2148 (xfer->c_flags & C_DMA) != 0) { 2149 aprint_error_dev(atac->atac_dev, 2150 "channel %d drive %d: bad data phase DATAOUT\n", 2151 chp->ch_channel, xfer->c_drive); 2152 if (xfer->c_flags & C_DMA) 2153 ata_dmaerr(drvp, 2154 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2155 sc_xfer->error = XS_TIMEOUT; 2156 mvsata_atapi_reset(chp, xfer); 2157 return 1; 2158 } 2159 xfer->c_lenoff = len - xfer->c_bcount; 2160 if (xfer->c_bcount < len) { 2161 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2162 " warning: write only %d of %d requested bytes\n", 2163 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2164 len); 2165 len = xfer->c_bcount; 2166 } 2167 2168 wdc->dataout_pio(chp, drvp->drive_flags, 2169 (char *)xfer->c_databuf + xfer->c_skip, len); 2170 2171 for (i = xfer->c_lenoff; i > 0; i -= 2) 2172 MVSATA_WDC_WRITE_2(mvport, SRB_PIOD, 0); 2173 2174 xfer->c_skip += len; 2175 xfer->c_bcount -= len; 2176 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2177 chp->ch_flags |= ATACH_IRQ_WAIT; 2178 return 1; 2179 2180 case PHASE_DATAIN: 2181 /* Read data */ 2182 DPRINTF(("PHASE_DATAIN\n")); 2183 if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 || 2184 (xfer->c_flags & C_DMA) != 0) { 2185 aprint_error_dev(atac->atac_dev, 2186 "channel %d drive %d: bad data phase DATAIN\n", 2187 chp->ch_channel, xfer->c_drive); 2188 if (xfer->c_flags & C_DMA) 2189 ata_dmaerr(drvp, 2190 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2191 sc_xfer->error = XS_TIMEOUT; 2192 mvsata_atapi_reset(chp, xfer); 2193 return 1; 2194 } 2195 xfer->c_lenoff = len - xfer->c_bcount; 2196 if (xfer->c_bcount < len) { 2197 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2198 " warning: reading only %d of %d bytes\n", 2199 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2200 len); 2201 len = xfer->c_bcount; 2202 } 2203 2204 wdc->datain_pio(chp, drvp->drive_flags, 2205 (char *)xfer->c_databuf + xfer->c_skip, len); 2206 2207 if (xfer->c_lenoff > 0) 2208 wdcbit_bucket(chp, len - xfer->c_bcount); 2209 2210 xfer->c_skip += len; 2211 xfer->c_bcount -= len; 2212 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2213 chp->ch_flags |= ATACH_IRQ_WAIT; 2214 return 1; 2215 2216 case PHASE_ABORTED: 2217 case PHASE_COMPLETED: 2218 DPRINTF(("PHASE_COMPLETED\n")); 2219 if (xfer->c_flags & C_DMA) 2220 xfer->c_bcount -= sc_xfer->datalen; 2221 sc_xfer->resid = xfer->c_bcount; 2222 mvsata_atapi_phase_complete(xfer); 2223 return 1; 2224 2225 default: 2226 if (++retries<500) { 2227 DELAY(100); 2228 chp->ch_status = MVSATA_WDC_READ_1(mvport, SRB_CS); 2229 chp->ch_error = MVSATA_WDC_READ_1(mvport, SRB_FE); 2230 goto again; 2231 } 2232 aprint_error_dev(atac->atac_dev, 2233 "channel %d drive %d: unknown phase 0x%x\n", 2234 chp->ch_channel, xfer->c_drive, phase); 2235 if (chp->ch_status & WDCS_ERR) { 2236 sc_xfer->error = XS_SHORTSENSE; 2237 sc_xfer->sense.atapi_sense = chp->ch_error; 2238 } else { 2239 if (xfer->c_flags & C_DMA) 2240 ata_dmaerr(drvp, 2241 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2242 sc_xfer->error = XS_RESET; 2243 mvsata_atapi_reset(chp, xfer); 2244 return (1); 2245 } 2246 } 2247 DPRINTF(("mvsata_atapi_intr: mvsata_atapi_done() (end), error 0x%x " 2248 "sense 0x%x\n", sc_xfer->error, sc_xfer->sense.atapi_sense)); 2249 mvsata_atapi_done(chp, xfer); 2250 return 1; 2251 } 2252 2253 static void 2254 mvsata_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 2255 int reason) 2256 { 2257 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2258 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2259 2260 /* remove this command from xfer queue */ 2261 switch (reason) { 2262 case KILL_GONE: 2263 sc_xfer->error = XS_DRIVER_STUFFUP; 2264 break; 2265 2266 case KILL_RESET: 2267 sc_xfer->error = XS_RESET; 2268 break; 2269 2270 default: 2271 aprint_error_dev(MVSATA_DEV2(mvport), 2272 "mvsata_atapi_kill_xfer: unknown reason %d\n", reason); 2273 panic("mvsata_atapi_kill_xfer"); 2274 } 2275 ata_free_xfer(chp, xfer); 2276 scsipi_done(sc_xfer); 2277 } 2278 2279 static void 2280 mvsata_atapi_reset(struct ata_channel *chp, struct ata_xfer *xfer) 2281 { 2282 struct atac_softc *atac = chp->ch_atac; 2283 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2284 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2285 2286 mvsata_pmp_select(mvport, xfer->c_drive); 2287 2288 wdccommandshort(chp, 0, ATAPI_SOFT_RESET); 2289 drvp->state = 0; 2290 if (wdc_wait_for_unbusy(chp, WDC_RESET_WAIT, AT_POLL) != 0) { 2291 printf("%s:%d:%d: reset failed\n", device_xname(atac->atac_dev), 2292 chp->ch_channel, xfer->c_drive); 2293 sc_xfer->error = XS_SELTIMEOUT; 2294 } 2295 mvsata_atapi_done(chp, xfer); 2296 return; 2297 } 2298 2299 static void 2300 mvsata_atapi_phase_complete(struct ata_xfer *xfer) 2301 { 2302 struct ata_channel *chp = xfer->c_chp; 2303 struct atac_softc *atac = chp->ch_atac; 2304 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2305 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2306 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2307 2308 /* wait for DSC if needed */ 2309 if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) { 2310 DPRINTFN(1, 2311 ("%s:%d:%d: mvsata_atapi_phase_complete: polldsc %d\n", 2312 device_xname(atac->atac_dev), chp->ch_channel, 2313 xfer->c_drive, xfer->c_dscpoll)); 2314 if (cold) 2315 panic("mvsata_atapi_phase_complete: cold"); 2316 2317 if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10, AT_POLL) == 2318 WDCWAIT_TOUT) { 2319 /* 10ms not enough, try again in 1 tick */ 2320 if (xfer->c_dscpoll++ > mstohz(sc_xfer->timeout)) { 2321 aprint_error_dev(atac->atac_dev, 2322 "channel %d: wait_for_dsc failed\n", 2323 chp->ch_channel); 2324 sc_xfer->error = XS_TIMEOUT; 2325 mvsata_atapi_reset(chp, xfer); 2326 return; 2327 } else 2328 callout_reset(&chp->ch_callout, 1, 2329 mvsata_atapi_polldsc, xfer); 2330 return; 2331 } 2332 } 2333 2334 /* 2335 * Some drive occasionally set WDCS_ERR with 2336 * "ATA illegal length indication" in the error 2337 * register. If we read some data the sense is valid 2338 * anyway, so don't report the error. 2339 */ 2340 if (chp->ch_status & WDCS_ERR && 2341 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 || 2342 sc_xfer->resid == sc_xfer->datalen)) { 2343 /* save the short sense */ 2344 sc_xfer->error = XS_SHORTSENSE; 2345 sc_xfer->sense.atapi_sense = chp->ch_error; 2346 if ((sc_xfer->xs_periph->periph_quirks & PQUIRK_NOSENSE) == 0) { 2347 /* ask scsipi to send a REQUEST_SENSE */ 2348 sc_xfer->error = XS_BUSY; 2349 sc_xfer->status = SCSI_CHECK; 2350 } else 2351 if (wdc->dma_status & (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) { 2352 ata_dmaerr(drvp, 2353 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2354 sc_xfer->error = XS_RESET; 2355 mvsata_atapi_reset(chp, xfer); 2356 return; 2357 } 2358 } 2359 if (xfer->c_bcount != 0) 2360 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr:" 2361 " bcount value is %d after io\n", 2362 device_xname(atac->atac_dev), chp->ch_channel, 2363 xfer->c_drive, xfer->c_bcount)); 2364 #ifdef DIAGNOSTIC 2365 if (xfer->c_bcount < 0) 2366 aprint_error_dev(atac->atac_dev, 2367 "channel %d drive %d: mvsata_atapi_intr:" 2368 " warning: bcount value is %d after io\n", 2369 chp->ch_channel, xfer->c_drive, xfer->c_bcount); 2370 #endif 2371 2372 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_phase_complete:" 2373 " mvsata_atapi_done(), error 0x%x sense 0x%x\n", 2374 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 2375 sc_xfer->error, sc_xfer->sense.atapi_sense)); 2376 mvsata_atapi_done(chp, xfer); 2377 } 2378 2379 static void 2380 mvsata_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer) 2381 { 2382 struct atac_softc *atac = chp->ch_atac; 2383 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2384 int drive = xfer->c_drive; 2385 2386 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_done: flags 0x%x\n", 2387 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 2388 (u_int)xfer->c_flags)); 2389 callout_stop(&chp->ch_callout); 2390 /* mark controller inactive and free the command */ 2391 chp->ch_queue->active_xfer = NULL; 2392 ata_free_xfer(chp, xfer); 2393 2394 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 2395 sc_xfer->error = XS_DRIVER_STUFFUP; 2396 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 2397 wakeup(&chp->ch_queue->active_xfer); 2398 } 2399 2400 DPRINTFN(1, ("%s:%d: mvsata_atapi_done: scsipi_done\n", 2401 device_xname(atac->atac_dev), chp->ch_channel)); 2402 scsipi_done(sc_xfer); 2403 DPRINTFN(1, ("%s:%d: atastart from wdc_atapi_done, flags 0x%x\n", 2404 device_xname(atac->atac_dev), chp->ch_channel, chp->ch_flags)); 2405 atastart(chp); 2406 } 2407 2408 static void 2409 mvsata_atapi_polldsc(void *arg) 2410 { 2411 2412 mvsata_atapi_phase_complete(arg); 2413 } 2414 #endif /* NATAPIBUS > 0 */ 2415 2416 2417 /* 2418 * XXXX: Shall we need lock for race condition in mvsata_edma_enqueue{,_gen2}(), 2419 * if supported queuing command by atabus? The race condition will not happen 2420 * if this is called only to the thread of atabus. 2421 */ 2422 static int 2423 mvsata_edma_enqueue(struct mvsata_port *mvport, struct ata_bio *ata_bio, 2424 void *databuf) 2425 { 2426 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2427 struct ata_channel *chp = &mvport->port_ata_channel; 2428 struct eprd *eprd; 2429 bus_addr_t crqb_base_addr; 2430 bus_dmamap_t data_dmamap; 2431 uint32_t reg; 2432 int quetag, erqqip, erqqop, next, rv, i; 2433 2434 DPRINTFN(2, ("%s:%d:%d: mvsata_edma_enqueue:" 2435 " blkno=0x%" PRIx64 ", nbytes=%d, flags=0x%x\n", 2436 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2437 mvport->port, ata_bio->blkno, ata_bio->nbytes, ata_bio->flags)); 2438 2439 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2440 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2441 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP); 2442 erqqip = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2443 next = erqqip; 2444 MVSATA_EDMAQ_INC(next); 2445 if (next == erqqop) 2446 /* queue full */ 2447 return EBUSY; 2448 if ((quetag = mvsata_quetag_get(mvport)) == -1) 2449 /* tag nothing */ 2450 return EBUSY; 2451 DPRINTFN(2, (" erqqip=%d, quetag=%d\n", erqqip, quetag)); 2452 2453 rv = mvsata_dma_bufload(mvport, quetag, databuf, ata_bio->nbytes, 2454 ata_bio->flags); 2455 if (rv != 0) 2456 return rv; 2457 2458 KASSERT(mvport->port_reqtbl[quetag].xfer == NULL); 2459 KASSERT(chp->ch_queue->active_xfer != NULL); 2460 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer; 2461 2462 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2463 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap; 2464 eprd = mvport->port_reqtbl[quetag].eprd; 2465 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2466 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2467 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2468 2469 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2470 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2471 eprd->eot = htole16(0); 2472 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2473 eprd++; 2474 } 2475 (eprd - 1)->eot |= htole16(EPRD_EOT); 2476 #ifdef MVSATA_DEBUG 2477 if (mvsata_debug >= 3) 2478 mvsata_print_eprd(mvport, quetag); 2479 #endif 2480 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2481 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE, 2482 BUS_DMASYNC_PREWRITE); 2483 2484 /* setup EDMA Command Request Block (CRQB) Data */ 2485 sc->sc_edma_setup_crqb(mvport, erqqip, quetag, ata_bio); 2486 #ifdef MVSATA_DEBUG 2487 if (mvsata_debug >= 3) 2488 mvsata_print_crqb(mvport, erqqip); 2489 #endif 2490 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 2491 erqqip * sizeof(union mvsata_crqb), 2492 sizeof(union mvsata_crqb), BUS_DMASYNC_PREWRITE); 2493 2494 MVSATA_EDMAQ_INC(erqqip); 2495 2496 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2497 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2498 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2499 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2500 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2501 2502 return 0; 2503 } 2504 2505 static int 2506 mvsata_edma_handle(struct mvsata_port *mvport, struct ata_xfer *xfer1) 2507 { 2508 struct ata_channel *chp = &mvport->port_ata_channel; 2509 struct crpb *crpb; 2510 struct ata_bio *ata_bio; 2511 struct ata_xfer *xfer; 2512 uint32_t reg; 2513 int erqqip, erqqop, erpqip, erpqop, prev_erpqop, quetag, handled = 0, n; 2514 2515 /* First, Sync for Request Queue buffer */ 2516 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2517 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2518 if (mvport->port_prev_erqqop != erqqop) { 2519 const int s = sizeof(union mvsata_crqb); 2520 2521 if (mvport->port_prev_erqqop < erqqop) 2522 n = erqqop - mvport->port_prev_erqqop; 2523 else { 2524 if (erqqop > 0) 2525 bus_dmamap_sync(mvport->port_dmat, 2526 mvport->port_crqb_dmamap, 0, erqqop * s, 2527 BUS_DMASYNC_POSTWRITE); 2528 n = MVSATA_EDMAQ_LEN - mvport->port_prev_erqqop; 2529 } 2530 if (n > 0) 2531 bus_dmamap_sync(mvport->port_dmat, 2532 mvport->port_crqb_dmamap, 2533 mvport->port_prev_erqqop * s, n * s, 2534 BUS_DMASYNC_POSTWRITE); 2535 mvport->port_prev_erqqop = erqqop; 2536 } 2537 2538 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQIP); 2539 erpqip = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2540 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQOP); 2541 erpqop = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2542 2543 DPRINTFN(3, ("%s:%d:%d: mvsata_edma_handle: erpqip=%d, erpqop=%d\n", 2544 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2545 mvport->port, erpqip, erpqop)); 2546 2547 if (erpqop == erpqip) 2548 return 0; 2549 2550 if (erpqop < erpqip) 2551 n = erpqip - erpqop; 2552 else { 2553 if (erpqip > 0) 2554 bus_dmamap_sync(mvport->port_dmat, 2555 mvport->port_crpb_dmamap, 2556 0, erpqip * sizeof(struct crpb), 2557 BUS_DMASYNC_POSTREAD); 2558 n = MVSATA_EDMAQ_LEN - erpqop; 2559 } 2560 if (n > 0) 2561 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2562 erpqop * sizeof(struct crpb), 2563 n * sizeof(struct crpb), BUS_DMASYNC_POSTREAD); 2564 2565 prev_erpqop = erpqop; 2566 while (erpqop != erpqip) { 2567 #ifdef MVSATA_DEBUG 2568 if (mvsata_debug >= 3) 2569 mvsata_print_crpb(mvport, erpqop); 2570 #endif 2571 crpb = mvport->port_crpb + erpqop; 2572 quetag = CRPB_CHOSTQUETAG(le16toh(crpb->id)); 2573 KASSERT(chp->ch_queue->active_xfer != NULL); 2574 xfer = chp->ch_queue->active_xfer; 2575 KASSERT(xfer == mvport->port_reqtbl[quetag].xfer); 2576 #ifdef DIAGNOSTIC 2577 if (xfer == NULL) 2578 panic("unknown response received: %s:%d:%d: tag 0x%x\n", 2579 device_xname(MVSATA_DEV2(mvport)), 2580 mvport->port_hc->hc, mvport->port, quetag); 2581 #endif 2582 2583 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2584 mvport->port_reqtbl[quetag].eprd_offset, 2585 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2586 2587 chp->ch_status = CRPB_CDEVSTS(le16toh(crpb->rspflg)); 2588 chp->ch_error = CRPB_CEDMASTS(le16toh(crpb->rspflg)); 2589 ata_bio = xfer->c_cmd; 2590 ata_bio->error = NOERROR; 2591 ata_bio->r_error = 0; 2592 if (chp->ch_status & WDCS_ERR) 2593 ata_bio->error = ERROR; 2594 if (chp->ch_status & WDCS_BSY) 2595 ata_bio->error = TIMEOUT; 2596 if (chp->ch_error) 2597 ata_bio->error = ERR_DMA; 2598 2599 mvsata_dma_bufunload(mvport, quetag, ata_bio->flags); 2600 mvport->port_reqtbl[quetag].xfer = NULL; 2601 mvsata_quetag_put(mvport, quetag); 2602 MVSATA_EDMAQ_INC(erpqop); 2603 2604 #if 1 /* XXXX: flags clears here, because necessary the atabus layer. */ 2605 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) & 2606 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2607 if (erpqop == erqqip) 2608 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT); 2609 #endif 2610 mvsata_bio_intr(chp, xfer, 1); 2611 if (xfer1 == NULL) 2612 handled++; 2613 else if (xfer == xfer1) { 2614 handled = 1; 2615 break; 2616 } 2617 } 2618 if (prev_erpqop < erpqop) 2619 n = erpqop - prev_erpqop; 2620 else { 2621 if (erpqop > 0) 2622 bus_dmamap_sync(mvport->port_dmat, 2623 mvport->port_crpb_dmamap, 0, 2624 erpqop * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2625 n = MVSATA_EDMAQ_LEN - prev_erpqop; 2626 } 2627 if (n > 0) 2628 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2629 prev_erpqop * sizeof(struct crpb), 2630 n * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2631 2632 reg &= ~EDMA_RESQP_ERPQP_MASK; 2633 reg |= (erpqop << EDMA_RESQP_ERPQP_SHIFT); 2634 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, reg); 2635 2636 #if 0 /* already cleared ago? */ 2637 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) & 2638 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2639 if (erpqop == erqqip) 2640 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT); 2641 #endif 2642 2643 return handled; 2644 } 2645 2646 static int 2647 mvsata_edma_wait(struct mvsata_port *mvport, struct ata_xfer *xfer, int timeout) 2648 { 2649 struct ata_bio *ata_bio = xfer->c_cmd; 2650 int xtime; 2651 2652 for (xtime = 0; xtime < timeout / 10; xtime++) { 2653 if (mvsata_edma_handle(mvport, xfer)) 2654 return 0; 2655 if (ata_bio->flags & ATA_POLL) 2656 delay(10000); 2657 else 2658 tsleep(&xfer, PRIBIO, "mvsataipl", mstohz(10)); 2659 } 2660 2661 DPRINTF(("mvsata_edma_wait: timeout: %p\n", xfer)); 2662 mvsata_edma_rqq_remove(mvport, xfer); 2663 xfer->c_flags |= C_TIMEOU; 2664 return 1; 2665 } 2666 2667 static void 2668 mvsata_edma_timeout(void *arg) 2669 { 2670 struct ata_xfer *xfer = (struct ata_xfer *)arg; 2671 struct ata_channel *chp = xfer->c_chp; 2672 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2673 int s; 2674 2675 s = splbio(); 2676 DPRINTF(("mvsata_edma_timeout: %p\n", xfer)); 2677 if ((chp->ch_flags & ATACH_IRQ_WAIT) != 0) { 2678 mvsata_edma_rqq_remove(mvport, xfer); 2679 xfer->c_flags |= C_TIMEOU; 2680 mvsata_bio_intr(chp, xfer, 1); 2681 } 2682 splx(s); 2683 } 2684 2685 static void 2686 mvsata_edma_rqq_remove(struct mvsata_port *mvport, struct ata_xfer *xfer) 2687 { 2688 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2689 struct ata_bio *ata_bio; 2690 bus_addr_t crqb_base_addr; 2691 int erqqip, i; 2692 2693 /* First, hardware reset, stop EDMA */ 2694 mvsata_hreset_port(mvport); 2695 2696 /* cleanup completed EDMA safely */ 2697 mvsata_edma_handle(mvport, NULL); 2698 2699 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2700 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, BUS_DMASYNC_PREWRITE); 2701 for (i = 0, erqqip = 0; i < MVSATA_EDMAQ_LEN; i++) { 2702 if (mvport->port_reqtbl[i].xfer == NULL) 2703 continue; 2704 2705 ata_bio = mvport->port_reqtbl[i].xfer->c_cmd; 2706 if (mvport->port_reqtbl[i].xfer == xfer) { 2707 /* remove xfer from EDMA request queue */ 2708 bus_dmamap_sync(mvport->port_dmat, 2709 mvport->port_eprd_dmamap, 2710 mvport->port_reqtbl[i].eprd_offset, 2711 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2712 mvsata_dma_bufunload(mvport, i, ata_bio->flags); 2713 mvport->port_reqtbl[i].xfer = NULL; 2714 mvsata_quetag_put(mvport, i); 2715 continue; 2716 } 2717 2718 sc->sc_edma_setup_crqb(mvport, erqqip, i, ata_bio); 2719 erqqip++; 2720 } 2721 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2722 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, 2723 BUS_DMASYNC_POSTWRITE); 2724 2725 mvsata_edma_config(mvport, mvport->port_edmamode); 2726 mvsata_edma_reset_qptr(mvport); 2727 mvsata_edma_enable(mvport); 2728 2729 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2730 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2731 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2732 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2733 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2734 } 2735 2736 #if NATAPIBUS > 0 2737 static int 2738 mvsata_bdma_init(struct mvsata_port *mvport, struct scsipi_xfer *sc_xfer, 2739 void *databuf) 2740 { 2741 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2742 struct eprd *eprd; 2743 bus_dmamap_t data_dmamap; 2744 bus_addr_t eprd_addr; 2745 int quetag, rv; 2746 2747 DPRINTFN(2, 2748 ("%s:%d:%d: mvsata_bdma_init: datalen=%d, xs_control=0x%x\n", 2749 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2750 mvport->port, sc_xfer->datalen, sc_xfer->xs_control)); 2751 2752 if ((quetag = mvsata_quetag_get(mvport)) == -1) 2753 /* tag nothing */ 2754 return EBUSY; 2755 DPRINTFN(2, (" quetag=%d\n", quetag)); 2756 2757 rv = mvsata_dma_bufload(mvport, quetag, databuf, sc_xfer->datalen, 2758 sc_xfer->xs_control & XS_CTL_DATA_IN ? ATA_READ : 0); 2759 if (rv != 0) 2760 return rv; 2761 2762 KASSERT(chp->ch_queue->active_xfer != NULL); 2763 KASSERT(mvport->port_reqtbl[quetag].xfer == NULL); 2764 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer; 2765 2766 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2767 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap; 2768 eprd = mvport->port_reqtbl[quetag].eprd; 2769 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2770 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2771 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2772 2773 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2774 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2775 eprd->eot = htole16(0); 2776 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2777 eprd++; 2778 } 2779 (eprd - 1)->eot |= htole16(EPRD_EOT); 2780 #ifdef MVSATA_DEBUG 2781 if (mvsata_debug >= 3) 2782 mvsata_print_eprd(mvport, quetag); 2783 #endif 2784 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2785 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE, 2786 BUS_DMASYNC_PREWRITE); 2787 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 2788 mvport->port_reqtbl[quetag].eprd_offset; 2789 2790 MVSATA_EDMA_WRITE_4(mvport, DMA_DTLBA, eprd_addr & DMA_DTLBA_MASK); 2791 MVSATA_EDMA_WRITE_4(mvport, DMA_DTHBA, (eprd_addr >> 16) >> 16); 2792 2793 if (sc_xfer->xs_control & XS_CTL_DATA_IN) 2794 MVSATA_EDMA_WRITE_4(mvport, DMA_C, DMA_C_READ); 2795 else 2796 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 0); 2797 2798 return 0; 2799 } 2800 2801 static void 2802 mvsata_bdma_start(struct mvsata_port *mvport) 2803 { 2804 2805 #ifdef MVSATA_DEBUG 2806 if (mvsata_debug >= 3) 2807 mvsata_print_eprd(mvport, 0); 2808 #endif 2809 2810 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 2811 MVSATA_EDMA_READ_4(mvport, DMA_C) | DMA_C_START); 2812 } 2813 #endif 2814 #endif 2815 2816 2817 static int 2818 mvsata_port_init(struct mvsata_hc *mvhc, int port) 2819 { 2820 struct mvsata_softc *sc = mvhc->hc_sc; 2821 struct mvsata_port *mvport; 2822 struct ata_channel *chp; 2823 int channel, rv, i; 2824 const int crqbq_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 2825 const int crpbq_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 2826 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 2827 2828 mvport = malloc(sizeof(struct mvsata_port), M_DEVBUF, 2829 M_ZERO | M_NOWAIT); 2830 if (mvport == NULL) { 2831 aprint_error("%s:%d: can't allocate memory for port %d\n", 2832 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2833 return ENOMEM; 2834 } 2835 2836 mvport->port = port; 2837 mvport->port_hc = mvhc; 2838 mvport->port_edmamode = nodma; 2839 2840 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 2841 EDMA_REGISTERS_OFFSET + port * EDMA_REGISTERS_SIZE, 2842 EDMA_REGISTERS_SIZE, &mvport->port_ioh); 2843 if (rv != 0) { 2844 aprint_error("%s:%d: can't subregion EDMA %d registers\n", 2845 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2846 goto fail0; 2847 } 2848 mvport->port_iot = mvhc->hc_iot; 2849 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SS, 4, 2850 &mvport->port_sata_sstatus); 2851 if (rv != 0) { 2852 aprint_error("%s:%d:%d: couldn't subregion sstatus regs\n", 2853 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2854 goto fail0; 2855 } 2856 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SE, 4, 2857 &mvport->port_sata_serror); 2858 if (rv != 0) { 2859 aprint_error("%s:%d:%d: couldn't subregion serror regs\n", 2860 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2861 goto fail0; 2862 } 2863 if (sc->sc_rev == gen1) 2864 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 2865 SATAHC_I_R02(port), 4, &mvport->port_sata_scontrol); 2866 else 2867 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2868 SATA_SC, 4, &mvport->port_sata_scontrol); 2869 if (rv != 0) { 2870 aprint_error("%s:%d:%d: couldn't subregion scontrol regs\n", 2871 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2872 goto fail0; 2873 } 2874 mvport->port_dmat = sc->sc_dmat; 2875 #ifndef MVSATA_WITHOUTDMA 2876 mvsata_quetag_init(mvport); 2877 #endif 2878 mvhc->hc_ports[port] = mvport; 2879 2880 channel = mvhc->hc * sc->sc_port + port; 2881 chp = &mvport->port_ata_channel; 2882 chp->ch_channel = channel; 2883 chp->ch_atac = &sc->sc_wdcdev.sc_atac; 2884 chp->ch_queue = &mvport->port_ata_queue; 2885 sc->sc_ata_channels[channel] = chp; 2886 2887 rv = mvsata_wdc_reg_init(mvport, sc->sc_wdcdev.regs + channel); 2888 if (rv != 0) 2889 goto fail0; 2890 2891 rv = bus_dmamap_create(mvport->port_dmat, crqbq_size, 1, crqbq_size, 0, 2892 BUS_DMA_NOWAIT, &mvport->port_crqb_dmamap); 2893 if (rv != 0) { 2894 aprint_error( 2895 "%s:%d:%d: EDMA CRQB map create failed: error=%d\n", 2896 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2897 goto fail0; 2898 } 2899 rv = bus_dmamap_create(mvport->port_dmat, crpbq_size, 1, crpbq_size, 0, 2900 BUS_DMA_NOWAIT, &mvport->port_crpb_dmamap); 2901 if (rv != 0) { 2902 aprint_error( 2903 "%s:%d:%d: EDMA CRPB map create failed: error=%d\n", 2904 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2905 goto fail1; 2906 } 2907 rv = bus_dmamap_create(mvport->port_dmat, eprd_buf_size, 1, 2908 eprd_buf_size, 0, BUS_DMA_NOWAIT, &mvport->port_eprd_dmamap); 2909 if (rv != 0) { 2910 aprint_error( 2911 "%s:%d:%d: EDMA ePRD buffer map create failed: error=%d\n", 2912 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2913 goto fail2; 2914 } 2915 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 2916 rv = bus_dmamap_create(mvport->port_dmat, MAXPHYS, 2917 MAXPHYS / PAGE_SIZE, MAXPHYS, 0, BUS_DMA_NOWAIT, 2918 &mvport->port_reqtbl[i].data_dmamap); 2919 if (rv != 0) { 2920 aprint_error("%s:%d:%d:" 2921 " EDMA data map(%d) create failed: error=%d\n", 2922 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, i, 2923 rv); 2924 goto fail3; 2925 } 2926 } 2927 2928 return 0; 2929 2930 fail3: 2931 for (i--; i >= 0; i--) 2932 bus_dmamap_destroy(mvport->port_dmat, 2933 mvport->port_reqtbl[i].data_dmamap); 2934 bus_dmamap_destroy(mvport->port_dmat, mvport->port_eprd_dmamap); 2935 fail2: 2936 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crpb_dmamap); 2937 fail1: 2938 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crqb_dmamap); 2939 fail0: 2940 return rv; 2941 } 2942 2943 static int 2944 mvsata_wdc_reg_init(struct mvsata_port *mvport, struct wdc_regs *wdr) 2945 { 2946 int hc, port, rv, i; 2947 2948 hc = mvport->port_hc->hc; 2949 port = mvport->port; 2950 2951 /* Create subregion for Shadow Registers Map */ 2952 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2953 SHADOW_REG_BLOCK_OFFSET, SHADOW_REG_BLOCK_SIZE, &wdr->cmd_baseioh); 2954 if (rv != 0) { 2955 aprint_error("%s:%d:%d: couldn't subregion shadow block regs\n", 2956 device_xname(MVSATA_DEV2(mvport)), hc, port); 2957 return rv; 2958 } 2959 wdr->cmd_iot = mvport->port_iot; 2960 2961 /* Once create subregion for each command registers */ 2962 for (i = 0; i < WDC_NREG; i++) { 2963 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 2964 i * 4, sizeof(uint32_t), &wdr->cmd_iohs[i]); 2965 if (rv != 0) { 2966 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 2967 device_xname(MVSATA_DEV2(mvport)), hc, port); 2968 return rv; 2969 } 2970 } 2971 /* Create subregion for Alternate Status register */ 2972 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 2973 i * 4, sizeof(uint32_t), &wdr->ctl_ioh); 2974 if (rv != 0) { 2975 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 2976 device_xname(MVSATA_DEV2(mvport)), hc, port); 2977 return rv; 2978 } 2979 wdr->ctl_iot = mvport->port_iot; 2980 2981 wdc_init_shadow_regs(&mvport->port_ata_channel); 2982 2983 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2984 SATA_SS, sizeof(uint32_t) * 3, &wdr->sata_baseioh); 2985 if (rv != 0) { 2986 aprint_error("%s:%d:%d: couldn't subregion SATA regs\n", 2987 device_xname(MVSATA_DEV2(mvport)), hc, port); 2988 return rv; 2989 } 2990 wdr->sata_iot = mvport->port_iot; 2991 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2992 SATA_SC, sizeof(uint32_t), &wdr->sata_control); 2993 if (rv != 0) { 2994 aprint_error("%s:%d:%d: couldn't subregion SControl\n", 2995 device_xname(MVSATA_DEV2(mvport)), hc, port); 2996 return rv; 2997 } 2998 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2999 SATA_SS, sizeof(uint32_t), &wdr->sata_status); 3000 if (rv != 0) { 3001 aprint_error("%s:%d:%d: couldn't subregion SStatus\n", 3002 device_xname(MVSATA_DEV2(mvport)), hc, port); 3003 return rv; 3004 } 3005 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3006 SATA_SE, sizeof(uint32_t), &wdr->sata_error); 3007 if (rv != 0) { 3008 aprint_error("%s:%d:%d: couldn't subregion SError\n", 3009 device_xname(MVSATA_DEV2(mvport)), hc, port); 3010 return rv; 3011 } 3012 3013 return 0; 3014 } 3015 3016 3017 #ifndef MVSATA_WITHOUTDMA 3018 /* 3019 * There are functions to determine Host Queue Tag. 3020 * XXXX: We hope to rotate Tag to facilitate debugging. 3021 */ 3022 3023 static inline void 3024 mvsata_quetag_init(struct mvsata_port *mvport) 3025 { 3026 3027 mvport->port_quetagidx = 0; 3028 } 3029 3030 static inline int 3031 mvsata_quetag_get(struct mvsata_port *mvport) 3032 { 3033 int begin = mvport->port_quetagidx; 3034 3035 do { 3036 if (mvport->port_reqtbl[mvport->port_quetagidx].xfer == NULL) { 3037 MVSATA_EDMAQ_INC(mvport->port_quetagidx); 3038 return mvport->port_quetagidx; 3039 } 3040 MVSATA_EDMAQ_INC(mvport->port_quetagidx); 3041 } while (mvport->port_quetagidx != begin); 3042 3043 return -1; 3044 } 3045 3046 static inline void 3047 mvsata_quetag_put(struct mvsata_port *mvport, int quetag) 3048 { 3049 3050 /* nothing */ 3051 } 3052 3053 static void * 3054 mvsata_edma_resource_prepare(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3055 bus_dmamap_t *dmamap, size_t size, int write) 3056 { 3057 bus_dma_segment_t seg; 3058 int nseg, rv; 3059 void *kva; 3060 3061 rv = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &seg, 1, &nseg, 3062 BUS_DMA_NOWAIT); 3063 if (rv != 0) { 3064 aprint_error("%s:%d:%d: DMA memory alloc failed: error=%d\n", 3065 device_xname(MVSATA_DEV2(mvport)), 3066 mvport->port_hc->hc, mvport->port, rv); 3067 goto fail; 3068 } 3069 3070 rv = bus_dmamem_map(dmat, &seg, nseg, size, &kva, BUS_DMA_NOWAIT); 3071 if (rv != 0) { 3072 aprint_error("%s:%d:%d: DMA memory map failed: error=%d\n", 3073 device_xname(MVSATA_DEV2(mvport)), 3074 mvport->port_hc->hc, mvport->port, rv); 3075 goto free; 3076 } 3077 3078 rv = bus_dmamap_load(dmat, *dmamap, kva, size, NULL, 3079 BUS_DMA_NOWAIT | (write ? BUS_DMA_WRITE : BUS_DMA_READ)); 3080 if (rv != 0) { 3081 aprint_error("%s:%d:%d: DMA map load failed: error=%d\n", 3082 device_xname(MVSATA_DEV2(mvport)), 3083 mvport->port_hc->hc, mvport->port, rv); 3084 goto unmap; 3085 } 3086 3087 if (!write) 3088 bus_dmamap_sync(dmat, *dmamap, 0, size, BUS_DMASYNC_PREREAD); 3089 3090 return kva; 3091 3092 unmap: 3093 bus_dmamem_unmap(dmat, kva, size); 3094 free: 3095 bus_dmamem_free(dmat, &seg, nseg); 3096 fail: 3097 return NULL; 3098 } 3099 3100 /* ARGSUSED */ 3101 static void 3102 mvsata_edma_resource_purge(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3103 bus_dmamap_t dmamap, void *kva) 3104 { 3105 3106 bus_dmamap_unload(dmat, dmamap); 3107 bus_dmamem_unmap(dmat, kva, dmamap->dm_mapsize); 3108 bus_dmamem_free(dmat, dmamap->dm_segs, dmamap->dm_nsegs); 3109 } 3110 3111 static int 3112 mvsata_dma_bufload(struct mvsata_port *mvport, int index, void *databuf, 3113 size_t datalen, int flags) 3114 { 3115 int rv, lop, sop; 3116 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3117 3118 lop = (flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE; 3119 sop = (flags & ATA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 3120 3121 rv = bus_dmamap_load(mvport->port_dmat, data_dmamap, databuf, datalen, 3122 NULL, BUS_DMA_NOWAIT | lop); 3123 if (rv) { 3124 aprint_error("%s:%d:%d: buffer load failed: error=%d", 3125 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 3126 mvport->port, rv); 3127 return rv; 3128 } 3129 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3130 data_dmamap->dm_mapsize, sop); 3131 3132 return 0; 3133 } 3134 3135 static inline void 3136 mvsata_dma_bufunload(struct mvsata_port *mvport, int index, int flags) 3137 { 3138 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3139 3140 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3141 data_dmamap->dm_mapsize, 3142 (flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3143 bus_dmamap_unload(mvport->port_dmat, data_dmamap); 3144 } 3145 #endif 3146 3147 static void 3148 mvsata_hreset_port(struct mvsata_port *mvport) 3149 { 3150 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3151 3152 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EATARST); 3153 3154 delay(25); /* allow reset propagation */ 3155 3156 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3157 3158 mvport->_fix_phy_param._fix_phy(mvport); 3159 3160 if (sc->sc_gen == gen1) 3161 delay(1000); 3162 } 3163 3164 static void 3165 mvsata_reset_port(struct mvsata_port *mvport) 3166 { 3167 device_t parent = device_parent(MVSATA_DEV2(mvport)); 3168 3169 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3170 3171 mvsata_hreset_port(mvport); 3172 3173 if (device_is_a(parent, "pci")) 3174 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3175 EDMA_CFG_RESERVED | EDMA_CFG_ERDBSZ); 3176 else /* SoC */ 3177 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3178 EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2); 3179 MVSATA_EDMA_WRITE_4(mvport, EDMA_T, 0); 3180 MVSATA_EDMA_WRITE_4(mvport, SATA_SEIM, 0x019c0000); 3181 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, ~0); 3182 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 0); 3183 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, 0); 3184 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, 0); 3185 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3186 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3187 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3188 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, 0); 3189 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3190 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, 0); 3191 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3192 MVSATA_EDMA_WRITE_4(mvport, EDMA_TC, 0); 3193 MVSATA_EDMA_WRITE_4(mvport, EDMA_IORT, 0xbc); 3194 } 3195 3196 static void 3197 mvsata_reset_hc(struct mvsata_hc *mvhc) 3198 { 3199 #if 0 3200 uint32_t val; 3201 #endif 3202 3203 MVSATA_HC_WRITE_4(mvhc, SATAHC_ICT, 0); 3204 MVSATA_HC_WRITE_4(mvhc, SATAHC_ITT, 0); 3205 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 0); 3206 3207 #if 0 /* XXXX needs? */ 3208 MVSATA_HC_WRITE_4(mvhc, 0x01c, 0); 3209 3210 /* 3211 * Keep the SS during power on and the reference clock bits (reset 3212 * sample) 3213 */ 3214 val = MVSATA_HC_READ_4(mvhc, 0x020); 3215 val &= 0x1c1c1c1c; 3216 val |= 0x03030303; 3217 MVSATA_HC_READ_4(mvhc, 0x020, 0); 3218 #endif 3219 } 3220 3221 #define WDCDELAY 100 /* 100 microseconds */ 3222 #define WDCNDELAY_RST (WDC_RESET_WAIT * 1000 / WDCDELAY) 3223 3224 static uint32_t 3225 mvsata_softreset(struct mvsata_port *mvport, int waitok) 3226 { 3227 uint32_t sig0 = ~0; 3228 int timeout, nloop; 3229 uint8_t st0; 3230 3231 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_RST | WDCTL_IDS | WDCTL_4BIT); 3232 delay(10); 3233 (void) MVSATA_WDC_READ_1(mvport, SRB_FE); 3234 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_IDS | WDCTL_4BIT); 3235 delay(10); 3236 3237 if (!waitok) 3238 nloop = WDCNDELAY_RST; 3239 else 3240 nloop = WDC_RESET_WAIT * hz / 1000; 3241 3242 /* wait for BSY to deassert */ 3243 for (timeout = 0; timeout < nloop; timeout++) { 3244 st0 = MVSATA_WDC_READ_1(mvport, SRB_CS); 3245 3246 if ((st0 & WDCS_BSY) == 0) { 3247 sig0 = MVSATA_WDC_READ_1(mvport, SRB_SC) << 0; 3248 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 8; 3249 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 16; 3250 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 24; 3251 goto out; 3252 } 3253 if (!waitok) 3254 delay(WDCDELAY); 3255 else 3256 tsleep(&nloop, PRIBIO, "atarst", 1); 3257 } 3258 3259 out: 3260 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 3261 return sig0; 3262 } 3263 3264 #ifndef MVSATA_WITHOUTDMA 3265 static void 3266 mvsata_edma_reset_qptr(struct mvsata_port *mvport) 3267 { 3268 const bus_addr_t crpb_addr = 3269 mvport->port_crpb_dmamap->dm_segs[0].ds_addr; 3270 const uint32_t crpb_addr_mask = 3271 EDMA_RESQP_ERPQBAP_MASK | EDMA_RESQP_ERPQBA_MASK; 3272 3273 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3274 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3275 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3276 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, (crpb_addr >> 16) >> 16); 3277 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3278 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, (crpb_addr & crpb_addr_mask)); 3279 } 3280 3281 static inline void 3282 mvsata_edma_enable(struct mvsata_port *mvport) 3283 { 3284 3285 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EENEDMA); 3286 } 3287 3288 static int 3289 mvsata_edma_disable(struct mvsata_port *mvport, int timeout, int waitok) 3290 { 3291 uint32_t status, command; 3292 int ms; 3293 3294 if (MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) { 3295 for (ms = 0; ms < timeout; ms++) { 3296 status = MVSATA_EDMA_READ_4(mvport, EDMA_S); 3297 if (status & EDMA_S_EDMAIDLE) 3298 break; 3299 if (waitok) 3300 tsleep(&waitok, PRIBIO, "mvsata_edma1", 3301 mstohz(1)); 3302 else 3303 delay(1000); 3304 } 3305 if (ms == timeout) { 3306 aprint_error("%s:%d:%d: unable to stop EDMA\n", 3307 device_xname(MVSATA_DEV2(mvport)), 3308 mvport->port_hc->hc, mvport->port); 3309 return EBUSY; 3310 } 3311 3312 /* The diable bit (eDsEDMA) is self negated. */ 3313 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3314 3315 for ( ; ms < timeout; ms++) { 3316 command = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 3317 if (!(command & EDMA_CMD_EENEDMA)) 3318 break; 3319 if (waitok) 3320 tsleep(&waitok, PRIBIO, "mvsata_edma2", 3321 mstohz(1)); 3322 else 3323 delay(1000); 3324 } 3325 if (ms == timeout) { 3326 aprint_error("%s:%d:%d: unable to stop EDMA\n", 3327 device_xname(MVSATA_DEV2(mvport)), 3328 mvport->port_hc->hc, mvport->port); 3329 return EBUSY; 3330 } 3331 } 3332 return 0; 3333 } 3334 3335 /* 3336 * Set EDMA registers according to mode. 3337 * ex. NCQ/TCQ(queued)/non queued. 3338 */ 3339 static void 3340 mvsata_edma_config(struct mvsata_port *mvport, int mode) 3341 { 3342 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3343 uint32_t reg; 3344 3345 reg = MVSATA_EDMA_READ_4(mvport, EDMA_CFG); 3346 reg |= EDMA_CFG_RESERVED; 3347 3348 if (mode == ncq) { 3349 if (sc->sc_gen == gen1) { 3350 aprint_error_dev(MVSATA_DEV2(mvport), 3351 "GenI not support NCQ\n"); 3352 return; 3353 } else if (sc->sc_gen == gen2) 3354 reg |= EDMA_CFG_EDEVERR; 3355 reg |= EDMA_CFG_ESATANATVCMDQUE; 3356 } else if (mode == queued) { 3357 reg &= ~EDMA_CFG_ESATANATVCMDQUE; 3358 reg |= EDMA_CFG_EQUE; 3359 } else 3360 reg &= ~(EDMA_CFG_ESATANATVCMDQUE | EDMA_CFG_EQUE); 3361 3362 if (sc->sc_gen == gen1) 3363 reg |= EDMA_CFG_ERDBSZ; 3364 else if (sc->sc_gen == gen2) 3365 reg |= (EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN); 3366 else if (sc->sc_gen == gen2e) { 3367 device_t parent = device_parent(MVSATA_DEV(sc)); 3368 3369 reg |= (EDMA_CFG_EMASKRXPM | EDMA_CFG_EHOSTQUEUECACHEEN); 3370 reg &= ~(EDMA_CFG_EEDMAFBS | EDMA_CFG_EEDMAQUELEN); 3371 3372 if (device_is_a(parent, "pci")) 3373 reg |= ( 3374 #if NATAPIBUS > 0 3375 EDMA_CFG_EEARLYCOMPLETIONEN | 3376 #endif 3377 EDMA_CFG_ECUTTHROUGHEN | 3378 EDMA_CFG_EWRBUFFERLEN | 3379 EDMA_CFG_ERDBSZEXT); 3380 } 3381 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, reg); 3382 3383 reg = ( 3384 EDMA_IE_EIORDYERR | 3385 EDMA_IE_ETRANSINT | 3386 EDMA_IE_EDEVCON | 3387 EDMA_IE_EDEVDIS); 3388 if (sc->sc_gen != gen1) 3389 reg |= ( 3390 EDMA_IE_TRANSPROTERR | 3391 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKTXERR_FISTXABORTED) | 3392 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3393 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3394 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3395 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_SATACRC) | 3396 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3397 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3398 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3399 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3400 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3401 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3402 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_SATACRC) | 3403 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3404 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3405 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3406 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_SATACRC) | 3407 EDMA_IE_ESELFDIS); 3408 3409 if (mode == ncq) 3410 reg |= EDMA_IE_EDEVERR; 3411 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, reg); 3412 reg = MVSATA_EDMA_READ_4(mvport, EDMA_HC); 3413 reg &= ~EDMA_IE_EDEVERR; 3414 if (mode != ncq) 3415 reg |= EDMA_IE_EDEVERR; 3416 MVSATA_EDMA_WRITE_4(mvport, EDMA_HC, reg); 3417 if (sc->sc_gen == gen2e) { 3418 /* 3419 * Clear FISWait4HostRdyEn[0] and [2]. 3420 * [0]: Device to Host FIS with <ERR> or <DF> bit set to 1. 3421 * [2]: SDB FIS is received with <ERR> bit set to 1. 3422 */ 3423 reg = MVSATA_EDMA_READ_4(mvport, SATA_FISC); 3424 reg &= ~(SATA_FISC_FISWAIT4HOSTRDYEN_B0 | 3425 SATA_FISC_FISWAIT4HOSTRDYEN_B2); 3426 MVSATA_EDMA_WRITE_4(mvport, SATA_FISC, reg); 3427 } 3428 3429 mvport->port_edmamode = mode; 3430 } 3431 3432 3433 /* 3434 * Generation dependent functions 3435 */ 3436 3437 static void 3438 mvsata_edma_setup_crqb(struct mvsata_port *mvport, int erqqip, int quetag, 3439 struct ata_bio *ata_bio) 3440 { 3441 struct crqb *crqb; 3442 bus_addr_t eprd_addr; 3443 daddr_t blkno; 3444 uint32_t rw; 3445 uint8_t cmd, head; 3446 int i; 3447 const int drive = 3448 mvport->port_ata_channel.ch_queue->active_xfer->c_drive; 3449 3450 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3451 mvport->port_reqtbl[quetag].eprd_offset; 3452 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3453 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA; 3454 if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) { 3455 head = WDSD_LBA; 3456 } else { 3457 head = 0; 3458 } 3459 blkno = ata_bio->blkno; 3460 if (ata_bio->flags & ATA_LBA48) 3461 cmd = atacmd_to48(cmd); 3462 else { 3463 head |= ((ata_bio->blkno >> 24) & 0xf); 3464 blkno &= 0xffffff; 3465 } 3466 crqb = &mvport->port_crqb->crqb + erqqip; 3467 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3468 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3469 crqb->ctrlflg = 3470 htole16(rw | CRQB_CHOSTQUETAG(quetag) | CRQB_CPMPORT(drive)); 3471 i = 0; 3472 if (mvport->port_edmamode == dma) { 3473 if (ata_bio->flags & ATA_LBA48) 3474 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3475 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks >> 8)); 3476 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3477 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks)); 3478 } else { /* ncq/queued */ 3479 3480 /* 3481 * XXXX: Oops, ata command is not correct. And, atabus layer 3482 * has not been supported yet now. 3483 * Queued DMA read/write. 3484 * read/write FPDMAQueued. 3485 */ 3486 3487 if (ata_bio->flags & ATA_LBA48) 3488 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3489 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks >> 8)); 3490 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3491 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks)); 3492 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3493 CRQB_ATACOMMAND_SECTORCOUNT, quetag << 3)); 3494 } 3495 if (ata_bio->flags & ATA_LBA48) { 3496 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3497 CRQB_ATACOMMAND_LBALOW, blkno >> 24)); 3498 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3499 CRQB_ATACOMMAND_LBAMID, blkno >> 32)); 3500 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3501 CRQB_ATACOMMAND_LBAHIGH, blkno >> 40)); 3502 } 3503 crqb->atacommand[i++] = 3504 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBALOW, blkno)); 3505 crqb->atacommand[i++] = 3506 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAMID, blkno >> 8)); 3507 crqb->atacommand[i++] = 3508 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAHIGH, blkno >> 16)); 3509 crqb->atacommand[i++] = 3510 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_DEVICE, head)); 3511 crqb->atacommand[i++] = htole16( 3512 CRQB_ATACOMMAND(CRQB_ATACOMMAND_COMMAND, cmd) | 3513 CRQB_ATACOMMAND_LAST); 3514 } 3515 #endif 3516 3517 static uint32_t 3518 mvsata_read_preamps_gen1(struct mvsata_port *mvport) 3519 { 3520 struct mvsata_hc *hc = mvport->port_hc; 3521 uint32_t reg; 3522 3523 reg = MVSATA_HC_READ_4(hc, SATAHC_I_PHYMODE(mvport->port)); 3524 /* 3525 * [12:11] : pre 3526 * [7:5] : amps 3527 */ 3528 return reg & 0x000018e0; 3529 } 3530 3531 static void 3532 mvsata_fix_phy_gen1(struct mvsata_port *mvport) 3533 { 3534 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3535 struct mvsata_hc *mvhc = mvport->port_hc; 3536 uint32_t reg; 3537 int port = mvport->port, fix_apm_sq = 0; 3538 3539 if (sc->sc_model == PCI_PRODUCT_MARVELL_88SX5080) { 3540 if (sc->sc_rev == 0x01) 3541 fix_apm_sq = 1; 3542 } else { 3543 if (sc->sc_rev == 0x00) 3544 fix_apm_sq = 1; 3545 } 3546 3547 if (fix_apm_sq) { 3548 /* 3549 * Disable auto-power management 3550 * 88SX50xx FEr SATA#12 3551 */ 3552 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_LTMODE(port)); 3553 reg |= (1 << 19); 3554 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_LTMODE(port), reg); 3555 3556 /* 3557 * Fix squelch threshold 3558 * 88SX50xx FEr SATA#9 3559 */ 3560 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYCONTROL(port)); 3561 reg &= ~0x3; 3562 reg |= 0x1; 3563 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYCONTROL(port), reg); 3564 } 3565 3566 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3567 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYMODE(port)); 3568 reg &= ~0x000018e0; /* pre and amps mask */ 3569 reg |= mvport->_fix_phy_param.pre_amps; 3570 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYMODE(port), reg); 3571 } 3572 3573 static void 3574 mvsata_devconn_gen1(struct mvsata_port *mvport) 3575 { 3576 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3577 3578 /* Fix for 88SX50xx FEr SATA#2 */ 3579 mvport->_fix_phy_param._fix_phy(mvport); 3580 3581 /* If disk is connected, then enable the activity LED */ 3582 if (sc->sc_rev == 0x03) { 3583 /* XXXXX */ 3584 } 3585 } 3586 3587 static uint32_t 3588 mvsata_read_preamps_gen2(struct mvsata_port *mvport) 3589 { 3590 uint32_t reg; 3591 3592 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3593 /* 3594 * [10:8] : amps 3595 * [7:5] : pre 3596 */ 3597 return reg & 0x000007e0; 3598 } 3599 3600 static void 3601 mvsata_fix_phy_gen2(struct mvsata_port *mvport) 3602 { 3603 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3604 uint32_t reg; 3605 3606 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3607 sc->sc_gen == gen2e) { 3608 /* 3609 * Fix for 3610 * 88SX60X1 FEr SATA #23 3611 * 88SX6042/88SX7042 FEr SATA #23 3612 * 88F5182 FEr #SATA-S13 3613 * 88F5082 FEr #SATA-S13 3614 */ 3615 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3616 reg &= ~(1 << 16); 3617 reg |= (1 << 31); 3618 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3619 3620 delay(200); 3621 3622 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3623 reg &= ~((1 << 16) | (1 << 31)); 3624 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3625 3626 delay(200); 3627 } 3628 3629 /* Fix values in PHY Mode 3 Register.*/ 3630 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3631 reg &= ~0x7F900000; 3632 reg |= 0x2A800000; 3633 /* Implement Guidline 88F5182, 88F5082, 88F6082 (GL# SATA-S11) */ 3634 if (sc->sc_model == PCI_PRODUCT_MARVELL_88F5082 || 3635 sc->sc_model == PCI_PRODUCT_MARVELL_88F5182 || 3636 sc->sc_model == PCI_PRODUCT_MARVELL_88F6082) 3637 reg &= ~0x0000001c; 3638 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, reg); 3639 3640 /* 3641 * Fix values in PHY Mode 4 Register. 3642 * 88SX60x1 FEr SATA#10 3643 * 88F5182 GL #SATA-S10 3644 * 88F5082 GL #SATA-S10 3645 */ 3646 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3647 sc->sc_gen == gen2e) { 3648 uint32_t tmp = 0; 3649 3650 /* 88SX60x1 FEr SATA #13 */ 3651 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3652 tmp = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3653 3654 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM4); 3655 reg |= (1 << 0); 3656 reg &= ~(1 << 1); 3657 /* PHY Mode 4 Register of Gen IIE has some restriction */ 3658 if (sc->sc_gen == gen2e) { 3659 reg &= ~0x5de3fffc; 3660 reg |= (1 << 2); 3661 } 3662 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM4, reg); 3663 3664 /* 88SX60x1 FEr SATA #13 */ 3665 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3666 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, tmp); 3667 } 3668 3669 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3670 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3671 reg &= ~0x000007e0; /* pre and amps mask */ 3672 reg |= mvport->_fix_phy_param.pre_amps; 3673 reg &= ~(1 << 16); 3674 if (sc->sc_gen == gen2e) { 3675 /* 3676 * according to mvSata 3.6.1, some IIE values are fixed. 3677 * some reserved fields must be written with fixed values. 3678 */ 3679 reg &= ~0xC30FF01F; 3680 reg |= 0x0000900F; 3681 } 3682 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3683 } 3684 3685 #ifndef MVSATA_WITHOUTDMA 3686 static void 3687 mvsata_edma_setup_crqb_gen2e(struct mvsata_port *mvport, int erqqip, int quetag, 3688 struct ata_bio *ata_bio) 3689 { 3690 struct crqb_gen2e *crqb; 3691 bus_addr_t eprd_addr; 3692 daddr_t blkno; 3693 uint32_t ctrlflg, rw; 3694 uint8_t cmd, head; 3695 const int drive = 3696 mvport->port_ata_channel.ch_queue->active_xfer->c_drive; 3697 3698 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3699 mvport->port_reqtbl[quetag].eprd_offset; 3700 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3701 ctrlflg = (rw | CRQB_CDEVICEQUETAG(0) | CRQB_CPMPORT(drive) | 3702 CRQB_CPRDMODE_EPRD | CRQB_CHOSTQUETAG_GEN2(quetag)); 3703 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA; 3704 if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) { 3705 head = WDSD_LBA; 3706 } else { 3707 head = 0; 3708 } 3709 blkno = ata_bio->blkno; 3710 if (ata_bio->flags & ATA_LBA48) 3711 cmd = atacmd_to48(cmd); 3712 else { 3713 head |= ((ata_bio->blkno >> 24) & 0xf); 3714 blkno &= 0xffffff; 3715 } 3716 crqb = &mvport->port_crqb->crqb_gen2e + erqqip; 3717 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3718 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3719 crqb->ctrlflg = htole32(ctrlflg); 3720 if (mvport->port_edmamode == dma) { 3721 crqb->atacommand[0] = htole32(cmd << 16); 3722 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24); 3723 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff)); 3724 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff); 3725 } else { /* ncq/queued */ 3726 3727 /* 3728 * XXXX: Oops, ata command is not correct. And, atabus layer 3729 * has not been supported yet now. 3730 * Queued DMA read/write. 3731 * read/write FPDMAQueued. 3732 */ 3733 3734 crqb->atacommand[0] = htole32( 3735 (cmd << 16) | ((ata_bio->nblks & 0xff) << 24)); 3736 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24); 3737 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff) | 3738 ((ata_bio->nblks >> 8) & 0xff)); 3739 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff); 3740 crqb->atacommand[3] = htole32(quetag << 3); 3741 } 3742 } 3743 3744 3745 #ifdef MVSATA_DEBUG 3746 #define MVSATA_DEBUG_PRINT(type, size, n, p) \ 3747 do { \ 3748 int _i; \ 3749 u_char *_p = (p); \ 3750 \ 3751 printf(#type "(%d)", (n)); \ 3752 for (_i = 0; _i < (size); _i++, _p++) { \ 3753 if (_i % 16 == 0) \ 3754 printf("\n "); \ 3755 printf(" %02x", *_p); \ 3756 } \ 3757 printf("\n"); \ 3758 } while (0 /* CONSTCOND */) 3759 3760 static void 3761 mvsata_print_crqb(struct mvsata_port *mvport, int n) 3762 { 3763 3764 MVSATA_DEBUG_PRINT(crqb, sizeof(union mvsata_crqb), 3765 n, (u_char *)(mvport->port_crqb + n)); 3766 } 3767 3768 static void 3769 mvsata_print_crpb(struct mvsata_port *mvport, int n) 3770 { 3771 3772 MVSATA_DEBUG_PRINT(crpb, sizeof(struct crpb), 3773 n, (u_char *)(mvport->port_crpb + n)); 3774 } 3775 3776 static void 3777 mvsata_print_eprd(struct mvsata_port *mvport, int n) 3778 { 3779 struct eprd *eprd; 3780 int i = 0; 3781 3782 eprd = mvport->port_reqtbl[n].eprd; 3783 while (1 /*CONSTCOND*/) { 3784 MVSATA_DEBUG_PRINT(eprd, sizeof(struct eprd), 3785 i, (u_char *)eprd); 3786 if (eprd->eot & EPRD_EOT) 3787 break; 3788 eprd++; 3789 i++; 3790 } 3791 } 3792 #endif 3793 #endif 3794