1 /* $NetBSD: mvsata.c,v 1.29 2013/02/10 21:21:29 jakllsch Exp $ */ 2 /* 3 * Copyright (c) 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: mvsata.c,v 1.29 2013/02/10 21:21:29 jakllsch Exp $"); 30 31 #include "opt_mvsata.h" 32 33 /* ATAPI implementation not finished. */ 34 //#include "atapibus.h" 35 36 #include <sys/param.h> 37 #if NATAPIBUS > 0 38 #include <sys/buf.h> 39 #endif 40 #include <sys/bus.h> 41 #include <sys/cpu.h> 42 #include <sys/device.h> 43 #include <sys/disklabel.h> 44 #include <sys/errno.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 49 #include <machine/vmparam.h> 50 51 #include <dev/ata/atareg.h> 52 #include <dev/ata/atavar.h> 53 #include <dev/ic/wdcvar.h> 54 #include <dev/ata/satapmpreg.h> 55 #include <dev/ata/satareg.h> 56 #include <dev/ata/satavar.h> 57 58 #if NATAPIBUS > 0 59 #include <dev/scsipi/scsi_all.h> /* for SCSI status */ 60 #endif 61 62 #include <dev/pci/pcidevs.h> 63 64 #include <dev/ic/mvsatareg.h> 65 #include <dev/ic/mvsatavar.h> 66 67 68 #define MVSATA_DEV(sc) ((sc)->sc_wdcdev.sc_atac.atac_dev) 69 #define MVSATA_DEV2(mvport) ((mvport)->port_ata_channel.ch_atac->atac_dev) 70 71 #define MVSATA_HC_READ_4(hc, reg) \ 72 bus_space_read_4((hc)->hc_iot, (hc)->hc_ioh, (reg)) 73 #define MVSATA_HC_WRITE_4(hc, reg, val) \ 74 bus_space_write_4((hc)->hc_iot, (hc)->hc_ioh, (reg), (val)) 75 #define MVSATA_EDMA_READ_4(mvport, reg) \ 76 bus_space_read_4((mvport)->port_iot, (mvport)->port_ioh, (reg)) 77 #define MVSATA_EDMA_WRITE_4(mvport, reg, val) \ 78 bus_space_write_4((mvport)->port_iot, (mvport)->port_ioh, (reg), (val)) 79 #define MVSATA_WDC_READ_2(mvport, reg) \ 80 bus_space_read_2((mvport)->port_iot, (mvport)->port_ioh, \ 81 SHADOW_REG_BLOCK_OFFSET + (reg)) 82 #define MVSATA_WDC_READ_1(mvport, reg) \ 83 bus_space_read_1((mvport)->port_iot, (mvport)->port_ioh, \ 84 SHADOW_REG_BLOCK_OFFSET + (reg)) 85 #define MVSATA_WDC_WRITE_2(mvport, reg, val) \ 86 bus_space_write_2((mvport)->port_iot, (mvport)->port_ioh, \ 87 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 88 #define MVSATA_WDC_WRITE_1(mvport, reg, val) \ 89 bus_space_write_1((mvport)->port_iot, (mvport)->port_ioh, \ 90 SHADOW_REG_BLOCK_OFFSET + (reg), (val)) 91 92 #ifdef MVSATA_DEBUG 93 #define DPRINTF(x) if (mvsata_debug) printf x 94 #define DPRINTFN(n,x) if (mvsata_debug >= (n)) printf x 95 int mvsata_debug = 2; 96 #else 97 #define DPRINTF(x) 98 #define DPRINTFN(n,x) 99 #endif 100 101 #define ATA_DELAY 10000 /* 10s for a drive I/O */ 102 #define ATAPI_DELAY 10 /* 10 ms, this is used only before 103 sending a cmd */ 104 #define ATAPI_MODE_DELAY 1000 /* 1s, timeout for SET_FEATURE cmds */ 105 106 #define MVSATA_EPRD_MAX_SIZE (sizeof(struct eprd) * (MAXPHYS / PAGE_SIZE)) 107 108 109 #ifndef MVSATA_WITHOUTDMA 110 static int mvsata_bio(struct ata_drive_datas *, struct ata_bio *); 111 static void mvsata_reset_drive(struct ata_drive_datas *, int, uint32_t *); 112 static void mvsata_reset_channel(struct ata_channel *, int); 113 static int mvsata_exec_command(struct ata_drive_datas *, struct ata_command *); 114 static int mvsata_addref(struct ata_drive_datas *); 115 static void mvsata_delref(struct ata_drive_datas *); 116 static void mvsata_killpending(struct ata_drive_datas *); 117 118 #if NATAPIBUS > 0 119 static void mvsata_atapibus_attach(struct atabus_softc *); 120 static void mvsata_atapi_scsipi_request(struct scsipi_channel *, 121 scsipi_adapter_req_t, void *); 122 static void mvsata_atapi_minphys(struct buf *); 123 static void mvsata_atapi_probe_device(struct atapibus_softc *, int); 124 static void mvsata_atapi_kill_pending(struct scsipi_periph *); 125 #endif 126 #endif 127 128 static void mvsata_setup_channel(struct ata_channel *); 129 130 #ifndef MVSATA_WITHOUTDMA 131 static void mvsata_bio_start(struct ata_channel *, struct ata_xfer *); 132 static int mvsata_bio_intr(struct ata_channel *, struct ata_xfer *, int); 133 static void mvsata_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int); 134 static void mvsata_bio_done(struct ata_channel *, struct ata_xfer *); 135 static int mvsata_bio_ready(struct mvsata_port *, struct ata_bio *, int, 136 int); 137 static void mvsata_wdc_cmd_start(struct ata_channel *, struct ata_xfer *); 138 static int mvsata_wdc_cmd_intr(struct ata_channel *, struct ata_xfer *, int); 139 static void mvsata_wdc_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, 140 int); 141 static void mvsata_wdc_cmd_done(struct ata_channel *, struct ata_xfer *); 142 static void mvsata_wdc_cmd_done_end(struct ata_channel *, struct ata_xfer *); 143 #if NATAPIBUS > 0 144 static void mvsata_atapi_start(struct ata_channel *, struct ata_xfer *); 145 static int mvsata_atapi_intr(struct ata_channel *, struct ata_xfer *, int); 146 static void mvsata_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, 147 int); 148 static void mvsata_atapi_reset(struct ata_channel *, struct ata_xfer *); 149 static void mvsata_atapi_phase_complete(struct ata_xfer *); 150 static void mvsata_atapi_done(struct ata_channel *, struct ata_xfer *); 151 static void mvsata_atapi_polldsc(void *); 152 #endif 153 154 static int mvsata_edma_enqueue(struct mvsata_port *, struct ata_bio *, void *); 155 static int mvsata_edma_handle(struct mvsata_port *, struct ata_xfer *); 156 static int mvsata_edma_wait(struct mvsata_port *, struct ata_xfer *, int); 157 static void mvsata_edma_timeout(void *); 158 static void mvsata_edma_rqq_remove(struct mvsata_port *, struct ata_xfer *); 159 #if NATAPIBUS > 0 160 static int mvsata_bdma_init(struct mvsata_port *, struct scsipi_xfer *, void *); 161 static void mvsata_bdma_start(struct mvsata_port *); 162 #endif 163 #endif 164 165 static int mvsata_port_init(struct mvsata_hc *, int); 166 static int mvsata_wdc_reg_init(struct mvsata_port *, struct wdc_regs *); 167 #ifndef MVSATA_WITHOUTDMA 168 static inline void mvsata_quetag_init(struct mvsata_port *); 169 static inline int mvsata_quetag_get(struct mvsata_port *); 170 static inline void mvsata_quetag_put(struct mvsata_port *, int); 171 static void *mvsata_edma_resource_prepare(struct mvsata_port *, bus_dma_tag_t, 172 bus_dmamap_t *, size_t, int); 173 static void mvsata_edma_resource_purge(struct mvsata_port *, bus_dma_tag_t, 174 bus_dmamap_t, void *); 175 static int mvsata_dma_bufload(struct mvsata_port *, int, void *, size_t, int); 176 static inline void mvsata_dma_bufunload(struct mvsata_port *, int, int); 177 #endif 178 179 static void mvsata_hreset_port(struct mvsata_port *); 180 static void mvsata_reset_port(struct mvsata_port *); 181 static void mvsata_reset_hc(struct mvsata_hc *); 182 static uint32_t mvsata_softreset(struct mvsata_port *, int); 183 #ifndef MVSATA_WITHOUTDMA 184 static void mvsata_edma_reset_qptr(struct mvsata_port *); 185 static inline void mvsata_edma_enable(struct mvsata_port *); 186 static int mvsata_edma_disable(struct mvsata_port *, int, int); 187 static void mvsata_edma_config(struct mvsata_port *, int); 188 189 static void mvsata_edma_setup_crqb(struct mvsata_port *, int, int, 190 struct ata_bio *); 191 #endif 192 static uint32_t mvsata_read_preamps_gen1(struct mvsata_port *); 193 static void mvsata_fix_phy_gen1(struct mvsata_port *); 194 static void mvsata_devconn_gen1(struct mvsata_port *); 195 196 static uint32_t mvsata_read_preamps_gen2(struct mvsata_port *); 197 static void mvsata_fix_phy_gen2(struct mvsata_port *); 198 #ifndef MVSATA_WITHOUTDMA 199 static void mvsata_edma_setup_crqb_gen2e(struct mvsata_port *, int, int, 200 struct ata_bio *); 201 202 #ifdef MVSATA_DEBUG 203 static void mvsata_print_crqb(struct mvsata_port *, int); 204 static void mvsata_print_crpb(struct mvsata_port *, int); 205 static void mvsata_print_eprd(struct mvsata_port *, int); 206 #endif 207 208 static void mvsata_probe_drive(struct ata_channel *); 209 210 struct ata_bustype mvsata_ata_bustype = { 211 SCSIPI_BUSTYPE_ATA, 212 mvsata_bio, 213 mvsata_reset_drive, 214 mvsata_reset_channel, 215 mvsata_exec_command, 216 ata_get_params, 217 mvsata_addref, 218 mvsata_delref, 219 mvsata_killpending 220 }; 221 222 #if NATAPIBUS > 0 223 static const struct scsipi_bustype mvsata_atapi_bustype = { 224 SCSIPI_BUSTYPE_ATAPI, 225 atapi_scsipi_cmd, 226 atapi_interpret_sense, 227 atapi_print_addr, 228 mvsata_atapi_kill_pending, 229 NULL, 230 }; 231 #endif /* NATAPIBUS */ 232 #endif 233 234 static void 235 mvsata_pmp_select(struct mvsata_port *mvport, int pmpport) 236 { 237 uint32_t ifctl; 238 239 KASSERT(pmpport < PMP_MAX_DRIVES); 240 #if defined(DIAGNOSTIC) || defined(MVSATA_DEBUG) 241 if ((MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) != 0) { 242 panic("EDMA enabled"); 243 } 244 #endif 245 246 ifctl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICTL); 247 ifctl &= ~0xf; 248 ifctl |= pmpport; 249 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICTL, ifctl); 250 } 251 252 int 253 mvsata_attach(struct mvsata_softc *sc, struct mvsata_product *product, 254 int (*mvsata_sreset)(struct mvsata_softc *), 255 int (*mvsata_misc_reset)(struct mvsata_softc *), 256 int read_pre_amps) 257 { 258 struct mvsata_hc *mvhc; 259 struct mvsata_port *mvport; 260 uint32_t (*read_preamps)(struct mvsata_port *) = NULL; 261 void (*_fix_phy)(struct mvsata_port *) = NULL; 262 #ifndef MVSATA_WITHOUTDMA 263 void (*edma_setup_crqb) 264 (struct mvsata_port *, int, int, struct ata_bio *) = NULL; 265 #endif 266 int hc, port, channel; 267 268 aprint_normal_dev(MVSATA_DEV(sc), "Gen%s, %dhc, %dport/hc\n", 269 (product->generation == gen1) ? "I" : 270 ((product->generation == gen2) ? "II" : "IIe"), 271 product->hc, product->port); 272 273 274 switch (product->generation) { 275 case gen1: 276 mvsata_sreset = NULL; 277 read_pre_amps = 1; /* MUST */ 278 read_preamps = mvsata_read_preamps_gen1; 279 _fix_phy = mvsata_fix_phy_gen1; 280 #ifndef MVSATA_WITHOUTDMA 281 edma_setup_crqb = mvsata_edma_setup_crqb; 282 #endif 283 break; 284 285 case gen2: 286 read_preamps = mvsata_read_preamps_gen2; 287 _fix_phy = mvsata_fix_phy_gen2; 288 #ifndef MVSATA_WITHOUTDMA 289 edma_setup_crqb = mvsata_edma_setup_crqb; 290 #endif 291 break; 292 293 case gen2e: 294 read_preamps = mvsata_read_preamps_gen2; 295 _fix_phy = mvsata_fix_phy_gen2; 296 #ifndef MVSATA_WITHOUTDMA 297 edma_setup_crqb = mvsata_edma_setup_crqb_gen2e; 298 #endif 299 break; 300 } 301 302 sc->sc_gen = product->generation; 303 sc->sc_hc = product->hc; 304 sc->sc_port = product->port; 305 sc->sc_flags = product->flags; 306 307 #ifdef MVSATA_WITHOUTDMA 308 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 309 #else 310 sc->sc_edma_setup_crqb = edma_setup_crqb; 311 sc->sc_wdcdev.sc_atac.atac_cap |= 312 (ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA); 313 #endif 314 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 315 #ifdef MVSATA_WITHOUTDMA 316 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 317 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; 318 #else 319 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 320 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 321 #endif 322 sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_ata_channels; 323 sc->sc_wdcdev.sc_atac.atac_nchannels = sc->sc_hc * sc->sc_port; 324 #ifndef MVSATA_WITHOUTDMA 325 sc->sc_wdcdev.sc_atac.atac_bustype_ata = &mvsata_ata_bustype; 326 #if NATAPIBUS > 0 327 sc->sc_wdcdev.sc_atac.atac_atapibus_attach = mvsata_atapibus_attach; 328 #endif 329 #endif 330 sc->sc_wdcdev.wdc_maxdrives = 1; /* SATA is always 1 drive */ 331 sc->sc_wdcdev.sc_atac.atac_probe = mvsata_probe_drive; 332 sc->sc_wdcdev.sc_atac.atac_set_modes = mvsata_setup_channel; 333 334 sc->sc_wdc_regs = 335 malloc(sizeof(struct wdc_regs) * product->hc * product->port, 336 M_DEVBUF, M_NOWAIT); 337 if (sc->sc_wdc_regs == NULL) { 338 aprint_error_dev(MVSATA_DEV(sc), 339 "can't allocate wdc regs memory\n"); 340 return ENOMEM; 341 } 342 sc->sc_wdcdev.regs = sc->sc_wdc_regs; 343 344 for (hc = 0; hc < sc->sc_hc; hc++) { 345 mvhc = &sc->sc_hcs[hc]; 346 mvhc->hc = hc; 347 mvhc->hc_sc = sc; 348 mvhc->hc_iot = sc->sc_iot; 349 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 350 hc * SATAHC_REGISTER_SIZE, SATAHC_REGISTER_SIZE, 351 &mvhc->hc_ioh)) { 352 aprint_error_dev(MVSATA_DEV(sc), 353 "can't subregion SATAHC %d registers\n", hc); 354 continue; 355 } 356 357 for (port = 0; port < sc->sc_port; port++) 358 if (mvsata_port_init(mvhc, port) == 0) { 359 int pre_amps; 360 361 mvport = mvhc->hc_ports[port]; 362 pre_amps = read_pre_amps ? 363 read_preamps(mvport) : 0x00000720; 364 mvport->_fix_phy_param.pre_amps = pre_amps; 365 mvport->_fix_phy_param._fix_phy = _fix_phy; 366 367 if (!mvsata_sreset) 368 mvsata_reset_port(mvport); 369 } 370 371 if (!mvsata_sreset) 372 mvsata_reset_hc(mvhc); 373 } 374 if (mvsata_sreset) 375 mvsata_sreset(sc); 376 377 if (mvsata_misc_reset) 378 mvsata_misc_reset(sc); 379 380 for (hc = 0; hc < sc->sc_hc; hc++) 381 for (port = 0; port < sc->sc_port; port++) { 382 mvport = sc->sc_hcs[hc].hc_ports[port]; 383 if (mvport == NULL) 384 continue; 385 if (mvsata_sreset) 386 mvport->_fix_phy_param._fix_phy(mvport); 387 } 388 for (channel = 0; channel < sc->sc_hc * sc->sc_port; channel++) 389 wdcattach(sc->sc_ata_channels[channel]); 390 391 return 0; 392 } 393 394 int 395 mvsata_intr(struct mvsata_hc *mvhc) 396 { 397 struct mvsata_softc *sc = mvhc->hc_sc; 398 struct mvsata_port *mvport; 399 uint32_t cause; 400 int port, handled = 0; 401 402 cause = MVSATA_HC_READ_4(mvhc, SATAHC_IC); 403 404 DPRINTFN(3, ("%s:%d: mvsata_intr: cause=0x%08x\n", 405 device_xname(MVSATA_DEV(sc)), mvhc->hc, cause)); 406 407 if (cause & SATAHC_IC_SAINTCOAL) 408 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, ~SATAHC_IC_SAINTCOAL); 409 cause &= ~SATAHC_IC_SAINTCOAL; 410 for (port = 0; port < sc->sc_port; port++) { 411 mvport = mvhc->hc_ports[port]; 412 413 if (cause & SATAHC_IC_DONE(port)) { 414 #ifndef MVSATA_WITHOUTDMA 415 handled = mvsata_edma_handle(mvport, NULL); 416 #endif 417 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 418 ~SATAHC_IC_DONE(port)); 419 } 420 421 if (cause & SATAHC_IC_SADEVINTERRUPT(port)) { 422 wdcintr(&mvport->port_ata_channel); 423 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 424 ~SATAHC_IC_SADEVINTERRUPT(port)); 425 handled = 1; 426 } 427 } 428 429 return handled; 430 } 431 432 int 433 mvsata_error(struct mvsata_port *mvport) 434 { 435 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 436 uint32_t cause; 437 438 cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC); 439 /* 440 * We must ack SATA_SE and SATA_FISIC before acking coresponding bits 441 * in EDMA_IEC. 442 */ 443 if (cause & EDMA_IE_SERRINT) { 444 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, 445 MVSATA_EDMA_READ_4(mvport, SATA_SEIM)); 446 } 447 if (cause & EDMA_IE_ETRANSINT) { 448 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 449 ~MVSATA_EDMA_READ_4(mvport, SATA_FISIM)); 450 } 451 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause); 452 453 DPRINTFN(3, ("%s:%d:%d:" 454 " mvsata_error: cause=0x%08x, mask=0x%08x, status=0x%08x\n", 455 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 456 mvport->port, cause, MVSATA_EDMA_READ_4(mvport, EDMA_IEM), 457 MVSATA_EDMA_READ_4(mvport, EDMA_S))); 458 459 cause &= MVSATA_EDMA_READ_4(mvport, EDMA_IEM); 460 if (!cause) 461 return 0; 462 463 if (cause & EDMA_IE_EDEVDIS) { 464 aprint_normal("%s:%d:%d: device disconnect\n", 465 device_xname(MVSATA_DEV2(mvport)), 466 mvport->port_hc->hc, mvport->port); 467 } 468 if (cause & EDMA_IE_EDEVCON) { 469 if (sc->sc_gen == gen1) 470 mvsata_devconn_gen1(mvport); 471 472 DPRINTFN(3, (" device connected\n")); 473 } 474 #ifndef MVSATA_WITHOUTDMA 475 if ((sc->sc_gen == gen1 && cause & EDMA_IE_ETRANSINT) || 476 (sc->sc_gen != gen1 && cause & EDMA_IE_ESELFDIS)) { 477 switch (mvport->port_edmamode) { 478 case dma: 479 case queued: 480 case ncq: 481 mvsata_edma_reset_qptr(mvport); 482 mvsata_edma_enable(mvport); 483 if (cause & EDMA_IE_EDEVERR) 484 break; 485 486 /* FALLTHROUGH */ 487 488 case nodma: 489 default: 490 aprint_error( 491 "%s:%d:%d: EDMA self disable happen 0x%x\n", 492 device_xname(MVSATA_DEV2(mvport)), 493 mvport->port_hc->hc, mvport->port, cause); 494 break; 495 } 496 } 497 #endif 498 if (cause & EDMA_IE_ETRANSINT) { 499 /* hot plug the Port Multiplier */ 500 aprint_normal("%s:%d:%d: detect Port Multiplier?\n", 501 device_xname(MVSATA_DEV2(mvport)), 502 mvport->port_hc->hc, mvport->port); 503 } 504 505 return 1; 506 } 507 508 509 /* 510 * ATA callback entry points 511 */ 512 513 static void 514 mvsata_probe_drive(struct ata_channel *chp) 515 { 516 struct mvsata_port * const mvport = (struct mvsata_port *)chp; 517 uint32_t sstat, sig; 518 519 sstat = sata_reset_interface(chp, mvport->port_iot, 520 mvport->port_sata_scontrol, mvport->port_sata_sstatus); 521 switch (sstat) { 522 case SStatus_DET_DEV: 523 mvsata_pmp_select(mvport, PMP_PORT_CTL); 524 sig = mvsata_softreset(mvport, AT_WAIT); 525 sata_interpret_sig(chp, 0, sig); 526 break; 527 default: 528 break; 529 } 530 } 531 532 #ifndef MVSATA_WITHOUTDMA 533 static int 534 mvsata_bio(struct ata_drive_datas *drvp, struct ata_bio *ata_bio) 535 { 536 struct ata_channel *chp = drvp->chnl_softc; 537 struct atac_softc *atac = chp->ch_atac; 538 struct ata_xfer *xfer; 539 540 DPRINTFN(1, ("%s:%d: mvsata_bio: drive=%d, blkno=%" PRId64 541 ", bcount=%ld\n", device_xname(atac->atac_dev), chp->ch_channel, 542 drvp->drive, ata_bio->blkno, ata_bio->bcount)); 543 544 xfer = ata_get_xfer(ATAXF_NOSLEEP); 545 if (xfer == NULL) 546 return ATACMD_TRY_AGAIN; 547 if (atac->atac_cap & ATAC_CAP_NOIRQ) 548 ata_bio->flags |= ATA_POLL; 549 if (ata_bio->flags & ATA_POLL) 550 xfer->c_flags |= C_POLL; 551 if ((drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) && 552 (ata_bio->flags & ATA_SINGLE) == 0) 553 xfer->c_flags |= C_DMA; 554 xfer->c_drive = drvp->drive; 555 xfer->c_cmd = ata_bio; 556 xfer->c_databuf = ata_bio->databuf; 557 xfer->c_bcount = ata_bio->bcount; 558 xfer->c_start = mvsata_bio_start; 559 xfer->c_intr = mvsata_bio_intr; 560 xfer->c_kill_xfer = mvsata_bio_kill_xfer; 561 ata_exec_xfer(chp, xfer); 562 return (ata_bio->flags & ATA_ITSDONE) ? ATACMD_COMPLETE : ATACMD_QUEUED; 563 } 564 565 static void 566 mvsata_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp) 567 { 568 struct ata_channel *chp = drvp->chnl_softc; 569 struct mvsata_port *mvport = (struct mvsata_port *)chp; 570 uint32_t edma_c; 571 uint32_t sig; 572 573 edma_c = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 574 575 DPRINTF(("%s:%d: mvsata_reset_drive: drive=%d (EDMA %sactive)\n", 576 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drvp->drive, 577 (edma_c & EDMA_CMD_EENEDMA) ? "" : "not ")); 578 579 if (edma_c & EDMA_CMD_EENEDMA) 580 mvsata_edma_disable(mvport, 10000, flags & AT_WAIT); 581 582 mvsata_pmp_select(mvport, drvp->drive); 583 584 sig = mvsata_softreset(mvport, flags & AT_WAIT); 585 586 if (sigp) 587 *sigp = sig; 588 589 if (edma_c & EDMA_CMD_EENEDMA) { 590 mvsata_edma_reset_qptr(mvport); 591 mvsata_edma_enable(mvport); 592 } 593 return; 594 } 595 596 static void 597 mvsata_reset_channel(struct ata_channel *chp, int flags) 598 { 599 struct mvsata_port *mvport = (struct mvsata_port *)chp; 600 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 601 struct ata_xfer *xfer; 602 uint32_t sstat, ctrl; 603 int i; 604 605 DPRINTF(("%s: mvsata_reset_channel: channel=%d\n", 606 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 607 608 mvsata_hreset_port(mvport); 609 sstat = sata_reset_interface(chp, mvport->port_iot, 610 mvport->port_sata_scontrol, mvport->port_sata_sstatus); 611 612 if (flags & AT_WAIT && sstat == SStatus_DET_DEV_NE && 613 sc->sc_gen != gen1) { 614 /* Downgrade to GenI */ 615 const uint32_t val = SControl_IPM_NONE | SControl_SPD_ANY | 616 SControl_DET_DISABLE; 617 618 MVSATA_EDMA_WRITE_4(mvport, mvport->port_sata_scontrol, val); 619 620 ctrl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICFG); 621 ctrl &= ~(1 << 17); /* Disable GenII */ 622 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICFG, ctrl); 623 624 mvsata_hreset_port(mvport); 625 sata_reset_interface(chp, mvport->port_iot, 626 mvport->port_sata_scontrol, mvport->port_sata_sstatus); 627 } 628 629 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 630 xfer = mvport->port_reqtbl[i].xfer; 631 if (xfer == NULL) 632 continue; 633 chp->ch_queue->active_xfer = xfer; 634 xfer->c_kill_xfer(chp, xfer, KILL_RESET); 635 } 636 637 mvsata_edma_config(mvport, mvport->port_edmamode); 638 mvsata_edma_reset_qptr(mvport); 639 mvsata_edma_enable(mvport); 640 return; 641 } 642 643 644 static int 645 mvsata_exec_command(struct ata_drive_datas *drvp, struct ata_command *ata_c) 646 { 647 struct ata_channel *chp = drvp->chnl_softc; 648 #ifdef MVSATA_DEBUG 649 struct mvsata_port *mvport = (struct mvsata_port *)chp; 650 #endif 651 struct ata_xfer *xfer; 652 int rv, s; 653 654 DPRINTFN(1, ("%s:%d: mvsata_exec_command: drive=%d, bcount=%d," 655 " r_lba=0x%012"PRIx64", r_count=0x%04x, r_features=0x%04x," 656 " r_device=0x%02x, r_command=0x%02x\n", 657 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, 658 drvp->drive, ata_c->bcount, ata_c->r_lba, ata_c->r_count, 659 ata_c->r_features, ata_c->r_device, ata_c->r_command)); 660 661 xfer = ata_get_xfer(ata_c->flags & AT_WAIT ? ATAXF_CANSLEEP : 662 ATAXF_NOSLEEP); 663 if (xfer == NULL) 664 return ATACMD_TRY_AGAIN; 665 if (ata_c->flags & AT_POLL) 666 xfer->c_flags |= C_POLL; 667 if (ata_c->flags & AT_WAIT) 668 xfer->c_flags |= C_WAIT; 669 xfer->c_drive = drvp->drive; 670 xfer->c_databuf = ata_c->data; 671 xfer->c_bcount = ata_c->bcount; 672 xfer->c_cmd = ata_c; 673 xfer->c_start = mvsata_wdc_cmd_start; 674 xfer->c_intr = mvsata_wdc_cmd_intr; 675 xfer->c_kill_xfer = mvsata_wdc_cmd_kill_xfer; 676 s = splbio(); 677 ata_exec_xfer(chp, xfer); 678 #ifdef DIAGNOSTIC 679 if ((ata_c->flags & AT_POLL) != 0 && 680 (ata_c->flags & AT_DONE) == 0) 681 panic("mvsata_exec_command: polled command not done"); 682 #endif 683 if (ata_c->flags & AT_DONE) 684 rv = ATACMD_COMPLETE; 685 else { 686 if (ata_c->flags & AT_WAIT) { 687 while ((ata_c->flags & AT_DONE) == 0) 688 tsleep(ata_c, PRIBIO, "mvsatacmd", 0); 689 rv = ATACMD_COMPLETE; 690 } else 691 rv = ATACMD_QUEUED; 692 } 693 splx(s); 694 return rv; 695 } 696 697 static int 698 mvsata_addref(struct ata_drive_datas *drvp) 699 { 700 701 return 0; 702 } 703 704 static void 705 mvsata_delref(struct ata_drive_datas *drvp) 706 { 707 708 return; 709 } 710 711 static void 712 mvsata_killpending(struct ata_drive_datas *drvp) 713 { 714 715 return; 716 } 717 718 #if NATAPIBUS > 0 719 static void 720 mvsata_atapibus_attach(struct atabus_softc *ata_sc) 721 { 722 struct ata_channel *chp = ata_sc->sc_chan; 723 struct atac_softc *atac = chp->ch_atac; 724 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 725 struct scsipi_channel *chan = &chp->ch_atapi_channel; 726 727 /* 728 * Fill in the scsipi_adapter. 729 */ 730 adapt->adapt_dev = atac->atac_dev; 731 adapt->adapt_nchannels = atac->atac_nchannels; 732 adapt->adapt_request = mvsata_atapi_scsipi_request; 733 adapt->adapt_minphys = mvsata_atapi_minphys; 734 atac->atac_atapi_adapter.atapi_probe_device = mvsata_atapi_probe_device; 735 736 /* 737 * Fill in the scsipi_channel. 738 */ 739 memset(chan, 0, sizeof(*chan)); 740 chan->chan_adapter = adapt; 741 chan->chan_bustype = &mvsata_atapi_bustype; 742 chan->chan_channel = chp->ch_channel; 743 chan->chan_flags = SCSIPI_CHAN_OPENINGS; 744 chan->chan_openings = 1; 745 chan->chan_max_periph = 1; 746 chan->chan_ntargets = 1; 747 chan->chan_nluns = 1; 748 749 chp->atapibus = 750 config_found_ia(ata_sc->sc_dev, "atapi", chan, atapiprint); 751 } 752 753 static void 754 mvsata_atapi_scsipi_request(struct scsipi_channel *chan, 755 scsipi_adapter_req_t req, void *arg) 756 { 757 struct scsipi_adapter *adapt = chan->chan_adapter; 758 struct scsipi_periph *periph; 759 struct scsipi_xfer *sc_xfer; 760 struct mvsata_softc *sc = device_private(adapt->adapt_dev); 761 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 762 struct ata_xfer *xfer; 763 int channel = chan->chan_channel; 764 int drive, s; 765 766 switch (req) { 767 case ADAPTER_REQ_RUN_XFER: 768 sc_xfer = arg; 769 periph = sc_xfer->xs_periph; 770 drive = periph->periph_target; 771 772 if (!device_is_active(atac->atac_dev)) { 773 sc_xfer->error = XS_DRIVER_STUFFUP; 774 scsipi_done(sc_xfer); 775 return; 776 } 777 xfer = ata_get_xfer(ATAXF_NOSLEEP); 778 if (xfer == NULL) { 779 sc_xfer->error = XS_RESOURCE_SHORTAGE; 780 scsipi_done(sc_xfer); 781 return; 782 } 783 784 if (sc_xfer->xs_control & XS_CTL_POLL) 785 xfer->c_flags |= C_POLL; 786 xfer->c_drive = drive; 787 xfer->c_flags |= C_ATAPI; 788 xfer->c_cmd = sc_xfer; 789 xfer->c_databuf = sc_xfer->data; 790 xfer->c_bcount = sc_xfer->datalen; 791 xfer->c_start = mvsata_atapi_start; 792 xfer->c_intr = mvsata_atapi_intr; 793 xfer->c_kill_xfer = mvsata_atapi_kill_xfer; 794 xfer->c_dscpoll = 0; 795 s = splbio(); 796 ata_exec_xfer(atac->atac_channels[channel], xfer); 797 #ifdef DIAGNOSTIC 798 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 && 799 (sc_xfer->xs_status & XS_STS_DONE) == 0) 800 panic("mvsata_atapi_scsipi_request:" 801 " polled command not done"); 802 #endif 803 splx(s); 804 return; 805 806 default: 807 /* Not supported, nothing to do. */ 808 ; 809 } 810 } 811 812 static void 813 mvsata_atapi_minphys(struct buf *bp) 814 { 815 816 if (bp->b_bcount > MAXPHYS) 817 bp->b_bcount = MAXPHYS; 818 minphys(bp); 819 } 820 821 static void 822 mvsata_atapi_probe_device(struct atapibus_softc *sc, int target) 823 { 824 struct scsipi_channel *chan = sc->sc_channel; 825 struct scsipi_periph *periph; 826 struct ataparams ids; 827 struct ataparams *id = &ids; 828 struct mvsata_softc *mvc = 829 device_private(chan->chan_adapter->adapt_dev); 830 struct atac_softc *atac = &mvc->sc_wdcdev.sc_atac; 831 struct ata_channel *chp = atac->atac_channels[chan->chan_channel]; 832 struct ata_drive_datas *drvp = &chp->ch_drive[target]; 833 struct scsipibus_attach_args sa; 834 char serial_number[21], model[41], firmware_revision[9]; 835 int s; 836 837 /* skip if already attached */ 838 if (scsipi_lookup_periph(chan, target, 0) != NULL) 839 return; 840 841 /* if no ATAPI device detected at attach time, skip */ 842 if (drvp->drive_type != ATA_DRIVET_ATAPI) { 843 DPRINTF(("%s:%d: mvsata_atapi_probe_device:" 844 " drive %d not present\n", 845 device_xname(atac->atac_dev), chp->ch_channel, target)); 846 return; 847 } 848 849 /* Some ATAPI devices need a bit more time after software reset. */ 850 delay(5000); 851 if (ata_get_params(drvp, AT_WAIT, id) == 0) { 852 #ifdef ATAPI_DEBUG_PROBE 853 log(LOG_DEBUG, "%s:%d: drive %d: cmdsz 0x%x drqtype 0x%x\n", 854 device_xname(atac->atac_dev), chp->ch_channel, target, 855 id->atap_config & ATAPI_CFG_CMD_MASK, 856 id->atap_config & ATAPI_CFG_DRQ_MASK); 857 #endif 858 periph = scsipi_alloc_periph(M_NOWAIT); 859 if (periph == NULL) { 860 aprint_error_dev(atac->atac_dev, 861 "unable to allocate periph" 862 " for channel %d drive %d\n", 863 chp->ch_channel, target); 864 return; 865 } 866 periph->periph_dev = NULL; 867 periph->periph_channel = chan; 868 periph->periph_switch = &atapi_probe_periphsw; 869 periph->periph_target = target; 870 periph->periph_lun = 0; 871 periph->periph_quirks = PQUIRK_ONLYBIG; 872 873 #ifdef SCSIPI_DEBUG 874 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI && 875 SCSIPI_DEBUG_TARGET == target) 876 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS; 877 #endif 878 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config); 879 if (id->atap_config & ATAPI_CFG_REMOV) 880 periph->periph_flags |= PERIPH_REMOVABLE; 881 if (periph->periph_type == T_SEQUENTIAL) { 882 s = splbio(); 883 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW; 884 splx(s); 885 } 886 887 sa.sa_periph = periph; 888 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config); 889 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ? 890 T_REMOV : T_FIXED; 891 scsipi_strvis((u_char *)model, 40, id->atap_model, 40); 892 scsipi_strvis((u_char *)serial_number, 20, id->atap_serial, 20); 893 scsipi_strvis((u_char *)firmware_revision, 8, id->atap_revision, 894 8); 895 sa.sa_inqbuf.vendor = model; 896 sa.sa_inqbuf.product = serial_number; 897 sa.sa_inqbuf.revision = firmware_revision; 898 899 /* 900 * Determine the operating mode capabilities of the device. 901 */ 902 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16) 903 periph->periph_cap |= PERIPH_CAP_CMD16; 904 /* XXX This is gross. */ 905 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK); 906 907 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa); 908 909 if (drvp->drv_softc) 910 ata_probe_caps(drvp); 911 else { 912 s = splbio(); 913 drvp->drive_type = ATA_DRIVET_NONE; 914 splx(s); 915 } 916 } else { 917 DPRINTF(("%s:%d: mvsata_atapi_probe_device:" 918 " ATAPI_IDENTIFY_DEVICE failed for drive %d: error 0x%x\n", 919 device_xname(atac->atac_dev), chp->ch_channel, target, 920 chp->ch_error)); 921 s = splbio(); 922 drvp->drive_type = ATA_DRIVET_NONE; 923 splx(s); 924 } 925 } 926 927 /* 928 * Kill off all pending xfers for a periph. 929 * 930 * Must be called at splbio(). 931 */ 932 static void 933 mvsata_atapi_kill_pending(struct scsipi_periph *periph) 934 { 935 struct atac_softc *atac = 936 device_private(periph->periph_channel->chan_adapter->adapt_dev); 937 struct ata_channel *chp = 938 atac->atac_channels[periph->periph_channel->chan_channel]; 939 940 ata_kill_pending(&chp->ch_drive[periph->periph_target]); 941 } 942 #endif /* NATAPIBUS > 0 */ 943 #endif /* MVSATA_WITHOUTDMA */ 944 945 946 /* 947 * mvsata_setup_channel() 948 * Setup EDMA registers and prepare/purge DMA resources. 949 * We assuming already stopped the EDMA. 950 */ 951 static void 952 mvsata_setup_channel(struct ata_channel *chp) 953 { 954 #if !defined(MVSATA_WITHOUTDMA) || defined(MVSATA_DEBUG) 955 struct mvsata_port *mvport = (struct mvsata_port *)chp; 956 #endif 957 struct ata_drive_datas *drvp; 958 uint32_t edma_mode; 959 int drive, s; 960 #ifndef MVSATA_WITHOUTDMA 961 int i; 962 const int crqb_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 963 const int crpb_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 964 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 965 #endif 966 967 DPRINTF(("%s:%d: mvsata_setup_channel: ", 968 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel)); 969 970 edma_mode = nodma; 971 for (drive = 0; drive < chp->ch_ndrives; drive++) { 972 drvp = &chp->ch_drive[drive]; 973 974 /* If no drive, skip */ 975 if (drvp->drive_type == ATA_DRIVET_NONE) 976 continue; 977 978 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 979 /* use Ultra/DMA */ 980 s = splbio(); 981 drvp->drive_flags &= ~ATA_DRIVE_DMA; 982 splx(s); 983 } 984 985 if (drvp->drive_flags & (ATA_DRIVE_UDMA | ATA_DRIVE_DMA)) 986 if (drvp->drive_type == ATA_DRIVET_ATA) 987 edma_mode = dma; 988 } 989 990 DPRINTF(("EDMA %sactive mode\n", (edma_mode == nodma) ? "not " : "")); 991 992 #ifndef MVSATA_WITHOUTDMA 993 if (edma_mode == nodma) { 994 no_edma: 995 if (mvport->port_crqb != NULL) 996 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 997 mvport->port_crqb_dmamap, mvport->port_crqb); 998 if (mvport->port_crpb != NULL) 999 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 1000 mvport->port_crpb_dmamap, mvport->port_crpb); 1001 if (mvport->port_eprd != NULL) 1002 mvsata_edma_resource_purge(mvport, mvport->port_dmat, 1003 mvport->port_eprd_dmamap, mvport->port_eprd); 1004 1005 return; 1006 } 1007 1008 if (mvport->port_crqb == NULL) 1009 mvport->port_crqb = mvsata_edma_resource_prepare(mvport, 1010 mvport->port_dmat, &mvport->port_crqb_dmamap, crqb_size, 1); 1011 if (mvport->port_crpb == NULL) 1012 mvport->port_crpb = mvsata_edma_resource_prepare(mvport, 1013 mvport->port_dmat, &mvport->port_crpb_dmamap, crpb_size, 0); 1014 if (mvport->port_eprd == NULL) { 1015 mvport->port_eprd = mvsata_edma_resource_prepare(mvport, 1016 mvport->port_dmat, &mvport->port_eprd_dmamap, eprd_buf_size, 1017 1); 1018 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 1019 mvport->port_reqtbl[i].eprd_offset = 1020 i * MVSATA_EPRD_MAX_SIZE; 1021 mvport->port_reqtbl[i].eprd = mvport->port_eprd + 1022 i * MVSATA_EPRD_MAX_SIZE / sizeof(struct eprd); 1023 } 1024 } 1025 1026 if (mvport->port_crqb == NULL || mvport->port_crpb == NULL || 1027 mvport->port_eprd == NULL) { 1028 aprint_error_dev(MVSATA_DEV2(mvport), 1029 "channel %d: can't use EDMA\n", chp->ch_channel); 1030 s = splbio(); 1031 for (drive = 0; drive < chp->ch_ndrives; drive++) { 1032 drvp = &chp->ch_drive[drive]; 1033 1034 /* If no drive, skip */ 1035 if (drvp->drive_type == ATA_DRIVET_NONE) 1036 continue; 1037 1038 drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA); 1039 } 1040 splx(s); 1041 goto no_edma; 1042 } 1043 1044 mvsata_edma_config(mvport, edma_mode); 1045 mvsata_edma_reset_qptr(mvport); 1046 mvsata_edma_enable(mvport); 1047 #endif 1048 } 1049 1050 #ifndef MVSATA_WITHOUTDMA 1051 static void 1052 mvsata_bio_start(struct ata_channel *chp, struct ata_xfer *xfer) 1053 { 1054 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1055 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 1056 struct atac_softc *atac = chp->ch_atac; 1057 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1058 struct ata_bio *ata_bio = xfer->c_cmd; 1059 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1060 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1061 u_int16_t cyl; 1062 u_int8_t head, sect, cmd = 0; 1063 int nblks, error; 1064 1065 DPRINTFN(2, ("%s:%d: mvsata_bio_start: drive=%d\n", 1066 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1067 1068 if (xfer->c_flags & C_DMA) 1069 if (drvp->n_xfers <= NXFER) 1070 drvp->n_xfers++; 1071 1072 again: 1073 /* 1074 * 1075 * When starting a multi-sector transfer, or doing single-sector 1076 * transfers... 1077 */ 1078 if (xfer->c_skip == 0 || (ata_bio->flags & ATA_SINGLE) != 0) { 1079 if (ata_bio->flags & ATA_SINGLE) 1080 nblks = 1; 1081 else 1082 nblks = xfer->c_bcount / ata_bio->lp->d_secsize; 1083 /* Check for bad sectors and adjust transfer, if necessary. */ 1084 if ((ata_bio->lp->d_flags & D_BADSECT) != 0) { 1085 long blkdiff; 1086 int i; 1087 1088 for (i = 0; (blkdiff = ata_bio->badsect[i]) != -1; 1089 i++) { 1090 blkdiff -= ata_bio->blkno; 1091 if (blkdiff < 0) 1092 continue; 1093 if (blkdiff == 0) 1094 /* Replace current block of transfer. */ 1095 ata_bio->blkno = 1096 ata_bio->lp->d_secperunit - 1097 ata_bio->lp->d_nsectors - i - 1; 1098 if (blkdiff < nblks) { 1099 /* Bad block inside transfer. */ 1100 ata_bio->flags |= ATA_SINGLE; 1101 nblks = 1; 1102 } 1103 break; 1104 } 1105 /* Transfer is okay now. */ 1106 } 1107 if (xfer->c_flags & C_DMA) { 1108 ata_bio->nblks = nblks; 1109 ata_bio->nbytes = xfer->c_bcount; 1110 1111 if (xfer->c_flags & C_POLL) 1112 sc->sc_enable_intr(mvport, 0 /*off*/); 1113 error = mvsata_edma_enqueue(mvport, ata_bio, 1114 (char *)xfer->c_databuf + xfer->c_skip); 1115 if (error) { 1116 if (error == EINVAL) { 1117 /* 1118 * We can't do DMA on this transfer 1119 * for some reason. Fall back to 1120 * PIO. 1121 */ 1122 xfer->c_flags &= ~C_DMA; 1123 error = 0; 1124 goto do_pio; 1125 } 1126 if (error == EBUSY) { 1127 aprint_error_dev(atac->atac_dev, 1128 "channel %d: EDMA Queue full\n", 1129 chp->ch_channel); 1130 /* 1131 * XXXX: Perhaps, after it waits for 1132 * a while, it is necessary to call 1133 * bio_start again. 1134 */ 1135 } 1136 ata_bio->error = ERR_DMA; 1137 ata_bio->r_error = 0; 1138 mvsata_bio_done(chp, xfer); 1139 return; 1140 } 1141 chp->ch_flags |= ATACH_DMA_WAIT; 1142 /* start timeout machinery */ 1143 if ((xfer->c_flags & C_POLL) == 0) 1144 callout_reset(&chp->ch_callout, 1145 ATA_DELAY / 1000 * hz, 1146 mvsata_edma_timeout, xfer); 1147 /* wait for irq */ 1148 goto intr; 1149 } /* else not DMA */ 1150 do_pio: 1151 if (ata_bio->flags & ATA_LBA48) { 1152 sect = 0; 1153 cyl = 0; 1154 head = 0; 1155 } else if (ata_bio->flags & ATA_LBA) { 1156 sect = (ata_bio->blkno >> 0) & 0xff; 1157 cyl = (ata_bio->blkno >> 8) & 0xffff; 1158 head = (ata_bio->blkno >> 24) & 0x0f; 1159 head |= WDSD_LBA; 1160 } else { 1161 int blkno = ata_bio->blkno; 1162 sect = blkno % ata_bio->lp->d_nsectors; 1163 sect++; /* Sectors begin with 1, not 0. */ 1164 blkno /= ata_bio->lp->d_nsectors; 1165 head = blkno % ata_bio->lp->d_ntracks; 1166 blkno /= ata_bio->lp->d_ntracks; 1167 cyl = blkno; 1168 head |= WDSD_CHS; 1169 } 1170 ata_bio->nblks = min(nblks, ata_bio->multi); 1171 ata_bio->nbytes = ata_bio->nblks * ata_bio->lp->d_secsize; 1172 KASSERT(nblks == 1 || (ata_bio->flags & ATA_SINGLE) == 0); 1173 if (ata_bio->nblks > 1) 1174 cmd = (ata_bio->flags & ATA_READ) ? 1175 WDCC_READMULTI : WDCC_WRITEMULTI; 1176 else 1177 cmd = (ata_bio->flags & ATA_READ) ? 1178 WDCC_READ : WDCC_WRITE; 1179 1180 /* EDMA disable, if enabled this channel. */ 1181 if (mvport->port_edmamode != nodma) 1182 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1183 1184 mvsata_pmp_select(mvport, xfer->c_drive); 1185 1186 /* Do control operations specially. */ 1187 if (__predict_false(drvp->state < READY)) { 1188 /* 1189 * Actually, we want to be careful not to mess with 1190 * the control state if the device is currently busy, 1191 * but we can assume that we never get to this point 1192 * if that's the case. 1193 */ 1194 /* 1195 * If it's not a polled command, we need the kernel 1196 * thread 1197 */ 1198 if ((xfer->c_flags & C_POLL) == 0 && cpu_intr_p()) { 1199 chp->ch_queue->queue_freeze++; 1200 wakeup(&chp->ch_thread); 1201 return; 1202 } 1203 if (mvsata_bio_ready(mvport, ata_bio, xfer->c_drive, 1204 (xfer->c_flags & C_POLL) ? AT_POLL : 0) != 0) { 1205 mvsata_bio_done(chp, xfer); 1206 return; 1207 } 1208 } 1209 1210 /* Initiate command! */ 1211 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1212 switch(wdc_wait_for_ready(chp, ATA_DELAY, wait_flags)) { 1213 case WDCWAIT_OK: 1214 break; 1215 case WDCWAIT_TOUT: 1216 goto timeout; 1217 case WDCWAIT_THR: 1218 return; 1219 } 1220 if (ata_bio->flags & ATA_LBA48) 1221 wdccommandext(chp, 0, atacmd_to48(cmd), 1222 ata_bio->blkno, nblks, 0, WDSD_LBA); 1223 else 1224 wdccommand(chp, 0, cmd, cyl, 1225 head, sect, nblks, 1226 (ata_bio->lp->d_type == DTYPE_ST506) ? 1227 ata_bio->lp->d_precompcyl / 4 : 0); 1228 1229 /* start timeout machinery */ 1230 if ((xfer->c_flags & C_POLL) == 0) 1231 callout_reset(&chp->ch_callout, 1232 ATA_DELAY / 1000 * hz, wdctimeout, chp); 1233 } else if (ata_bio->nblks > 1) { 1234 /* The number of blocks in the last stretch may be smaller. */ 1235 nblks = xfer->c_bcount / ata_bio->lp->d_secsize; 1236 if (ata_bio->nblks > nblks) { 1237 ata_bio->nblks = nblks; 1238 ata_bio->nbytes = xfer->c_bcount; 1239 } 1240 } 1241 /* If this was a write and not using DMA, push the data. */ 1242 if ((ata_bio->flags & ATA_READ) == 0) { 1243 /* 1244 * we have to busy-wait here, we can't rely on running in 1245 * thread context. 1246 */ 1247 if (wdc_wait_for_drq(chp, ATA_DELAY, AT_POLL) != 0) { 1248 aprint_error_dev(atac->atac_dev, 1249 "channel %d: drive %d timeout waiting for DRQ," 1250 " st=0x%02x, err=0x%02x\n", 1251 chp->ch_channel, xfer->c_drive, chp->ch_status, 1252 chp->ch_error); 1253 ata_bio->error = TIMEOUT; 1254 mvsata_bio_done(chp, xfer); 1255 return; 1256 } 1257 if (chp->ch_status & WDCS_ERR) { 1258 ata_bio->error = ERROR; 1259 ata_bio->r_error = chp->ch_error; 1260 mvsata_bio_done(chp, xfer); 1261 return; 1262 } 1263 1264 wdc->dataout_pio(chp, drvp->drive_flags, 1265 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1266 } 1267 1268 intr: 1269 /* Wait for IRQ (either real or polled) */ 1270 if ((ata_bio->flags & ATA_POLL) == 0) { 1271 chp->ch_flags |= ATACH_IRQ_WAIT; 1272 } else { 1273 /* Wait for at last 400ns for status bit to be valid */ 1274 delay(1); 1275 if (chp->ch_flags & ATACH_DMA_WAIT) { 1276 mvsata_edma_wait(mvport, xfer, ATA_DELAY); 1277 sc->sc_enable_intr(mvport, 1 /*on*/); 1278 chp->ch_flags &= ~ATACH_DMA_WAIT; 1279 } 1280 mvsata_bio_intr(chp, xfer, 0); 1281 if ((ata_bio->flags & ATA_ITSDONE) == 0) 1282 goto again; 1283 } 1284 return; 1285 1286 timeout: 1287 aprint_error_dev(atac->atac_dev, 1288 "channel %d: drive %d not ready, st=0x%02x, err=0x%02x\n", 1289 chp->ch_channel, xfer->c_drive, chp->ch_status, chp->ch_error); 1290 ata_bio->error = TIMEOUT; 1291 mvsata_bio_done(chp, xfer); 1292 return; 1293 } 1294 1295 static int 1296 mvsata_bio_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 1297 { 1298 struct atac_softc *atac = chp->ch_atac; 1299 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1300 struct ata_bio *ata_bio = xfer->c_cmd; 1301 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1302 1303 DPRINTFN(2, ("%s:%d: mvsata_bio_intr: drive=%d\n", 1304 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1305 1306 chp->ch_flags &= ~(ATACH_IRQ_WAIT|ATACH_DMA_WAIT); 1307 1308 /* 1309 * If we missed an interrupt transfer, reset and restart. 1310 * Don't try to continue transfer, we may have missed cycles. 1311 */ 1312 if (xfer->c_flags & C_TIMEOU) { 1313 ata_bio->error = TIMEOUT; 1314 mvsata_bio_done(chp, xfer); 1315 return 1; 1316 } 1317 1318 /* Is it not a transfer, but a control operation? */ 1319 if (!(xfer->c_flags & C_DMA) && drvp->state < READY) { 1320 aprint_error_dev(atac->atac_dev, 1321 "channel %d: drive %d bad state %d in mvsata_bio_intr\n", 1322 chp->ch_channel, xfer->c_drive, drvp->state); 1323 panic("mvsata_bio_intr: bad state"); 1324 } 1325 1326 /* Ack interrupt done by wdc_wait_for_unbusy */ 1327 if (!(xfer->c_flags & C_DMA) && 1328 (wdc_wait_for_unbusy(chp, (irq == 0) ? ATA_DELAY : 0, AT_POLL) 1329 == WDCWAIT_TOUT)) { 1330 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1331 return 0; /* IRQ was not for us */ 1332 aprint_error_dev(atac->atac_dev, 1333 "channel %d: drive %d timeout, c_bcount=%d, c_skip%d\n", 1334 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 1335 xfer->c_skip); 1336 ata_bio->error = TIMEOUT; 1337 mvsata_bio_done(chp, xfer); 1338 return 1; 1339 } 1340 1341 if (xfer->c_flags & C_DMA) { 1342 if (ata_bio->error == NOERROR) 1343 goto end; 1344 if (ata_bio->error == ERR_DMA) 1345 ata_dmaerr(drvp, 1346 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 1347 } 1348 1349 /* if we had an error, end */ 1350 if (ata_bio->error != NOERROR) { 1351 mvsata_bio_done(chp, xfer); 1352 return 1; 1353 } 1354 1355 /* If this was a read and not using DMA, fetch the data. */ 1356 if ((ata_bio->flags & ATA_READ) != 0) { 1357 if ((chp->ch_status & WDCS_DRQ) != WDCS_DRQ) { 1358 aprint_error_dev(atac->atac_dev, 1359 "channel %d: drive %d read intr before drq\n", 1360 chp->ch_channel, xfer->c_drive); 1361 ata_bio->error = TIMEOUT; 1362 mvsata_bio_done(chp, xfer); 1363 return 1; 1364 } 1365 wdc->datain_pio(chp, drvp->drive_flags, 1366 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes); 1367 } 1368 1369 end: 1370 ata_bio->blkno += ata_bio->nblks; 1371 ata_bio->blkdone += ata_bio->nblks; 1372 xfer->c_skip += ata_bio->nbytes; 1373 xfer->c_bcount -= ata_bio->nbytes; 1374 /* See if this transfer is complete. */ 1375 if (xfer->c_bcount > 0) { 1376 if ((ata_bio->flags & ATA_POLL) == 0) 1377 /* Start the next operation */ 1378 mvsata_bio_start(chp, xfer); 1379 else 1380 /* Let mvsata_bio_start do the loop */ 1381 return 1; 1382 } else { /* Done with this transfer */ 1383 ata_bio->error = NOERROR; 1384 mvsata_bio_done(chp, xfer); 1385 } 1386 return 1; 1387 } 1388 1389 static void 1390 mvsata_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason) 1391 { 1392 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1393 struct atac_softc *atac = chp->ch_atac; 1394 struct ata_bio *ata_bio = xfer->c_cmd; 1395 int drive = xfer->c_drive; 1396 1397 DPRINTFN(2, ("%s:%d: mvsata_bio_kill_xfer: drive=%d\n", 1398 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 1399 1400 /* EDMA restart, if enabled */ 1401 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) { 1402 mvsata_edma_reset_qptr(mvport); 1403 mvsata_edma_enable(mvport); 1404 } 1405 1406 ata_free_xfer(chp, xfer); 1407 1408 ata_bio->flags |= ATA_ITSDONE; 1409 switch (reason) { 1410 case KILL_GONE: 1411 ata_bio->error = ERR_NODEV; 1412 break; 1413 case KILL_RESET: 1414 ata_bio->error = ERR_RESET; 1415 break; 1416 default: 1417 aprint_error_dev(atac->atac_dev, 1418 "mvsata_bio_kill_xfer: unknown reason %d\n", reason); 1419 panic("mvsata_bio_kill_xfer"); 1420 } 1421 ata_bio->r_error = WDCE_ABRT; 1422 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc); 1423 } 1424 1425 static void 1426 mvsata_bio_done(struct ata_channel *chp, struct ata_xfer *xfer) 1427 { 1428 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1429 struct ata_bio *ata_bio = xfer->c_cmd; 1430 int drive = xfer->c_drive; 1431 1432 DPRINTFN(2, ("%s:%d: mvsata_bio_done: drive=%d, flags=0x%x\n", 1433 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive, 1434 (u_int)xfer->c_flags)); 1435 1436 callout_stop(&chp->ch_callout); 1437 1438 /* EDMA restart, if enabled */ 1439 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) { 1440 mvsata_edma_reset_qptr(mvport); 1441 mvsata_edma_enable(mvport); 1442 } 1443 1444 /* feed back residual bcount to our caller */ 1445 ata_bio->bcount = xfer->c_bcount; 1446 1447 /* mark controller inactive and free xfer */ 1448 KASSERT(chp->ch_queue->active_xfer != NULL); 1449 chp->ch_queue->active_xfer = NULL; 1450 ata_free_xfer(chp, xfer); 1451 1452 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1453 ata_bio->error = ERR_NODEV; 1454 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1455 wakeup(&chp->ch_queue->active_xfer); 1456 } 1457 ata_bio->flags |= ATA_ITSDONE; 1458 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc); 1459 atastart(chp); 1460 } 1461 1462 static int 1463 mvsata_bio_ready(struct mvsata_port *mvport, struct ata_bio *ata_bio, int drive, 1464 int flags) 1465 { 1466 struct ata_channel *chp = &mvport->port_ata_channel; 1467 struct atac_softc *atac = chp->ch_atac; 1468 struct ata_drive_datas *drvp = &chp->ch_drive[drive]; 1469 const char *errstring; 1470 1471 /* 1472 * disable interrupts, all commands here should be quick 1473 * enough to be able to poll, and we don't go here that often 1474 */ 1475 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1476 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1477 DELAY(10); 1478 errstring = "wait"; 1479 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1480 goto ctrltimeout; 1481 wdccommandshort(chp, 0, WDCC_RECAL); 1482 /* Wait for at least 400ns for status bit to be valid */ 1483 DELAY(1); 1484 errstring = "recal"; 1485 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1486 goto ctrltimeout; 1487 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1488 goto ctrlerror; 1489 /* Don't try to set modes if controller can't be adjusted */ 1490 if (atac->atac_set_modes == NULL) 1491 goto geometry; 1492 /* Also don't try if the drive didn't report its mode */ 1493 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 1494 goto geometry; 1495 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1496 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 1497 errstring = "piomode"; 1498 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1499 goto ctrltimeout; 1500 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1501 goto ctrlerror; 1502 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1503 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1504 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 1505 else if (drvp->drive_flags & ATA_DRIVE_DMA) 1506 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1507 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 1508 else 1509 goto geometry; 1510 errstring = "dmamode"; 1511 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1512 goto ctrltimeout; 1513 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1514 goto ctrlerror; 1515 geometry: 1516 if (ata_bio->flags & ATA_LBA) 1517 goto multimode; 1518 wdccommand(chp, 0, WDCC_IDP, ata_bio->lp->d_ncylinders, 1519 ata_bio->lp->d_ntracks - 1, 0, ata_bio->lp->d_nsectors, 1520 (ata_bio->lp->d_type == DTYPE_ST506) ? 1521 ata_bio->lp->d_precompcyl / 4 : 0); 1522 errstring = "geometry"; 1523 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1524 goto ctrltimeout; 1525 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1526 goto ctrlerror; 1527 multimode: 1528 if (ata_bio->multi == 1) 1529 goto ready; 1530 wdccommand(chp, 0, WDCC_SETMULTI, 0, 0, 0, ata_bio->multi, 0); 1531 errstring = "setmulti"; 1532 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags)) 1533 goto ctrltimeout; 1534 if (chp->ch_status & (WDCS_ERR | WDCS_DWF)) 1535 goto ctrlerror; 1536 ready: 1537 drvp->state = READY; 1538 /* 1539 * The drive is usable now 1540 */ 1541 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1542 delay(10); /* some drives need a little delay here */ 1543 return 0; 1544 1545 ctrltimeout: 1546 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s timed out\n", 1547 chp->ch_channel, drive, errstring); 1548 ata_bio->error = TIMEOUT; 1549 goto ctrldone; 1550 ctrlerror: 1551 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s ", 1552 chp->ch_channel, drive, errstring); 1553 if (chp->ch_status & WDCS_DWF) { 1554 aprint_error("drive fault\n"); 1555 ata_bio->error = ERR_DF; 1556 } else { 1557 aprint_error("error (%x)\n", chp->ch_error); 1558 ata_bio->r_error = chp->ch_error; 1559 ata_bio->error = ERROR; 1560 } 1561 ctrldone: 1562 drvp->state = 0; 1563 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1564 return -1; 1565 } 1566 1567 static void 1568 mvsata_wdc_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer) 1569 { 1570 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1571 int drive = xfer->c_drive; 1572 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1573 struct ata_command *ata_c = xfer->c_cmd; 1574 1575 DPRINTFN(1, ("%s:%d: mvsata_cmd_start: drive=%d\n", 1576 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drive)); 1577 1578 /* First, EDMA disable, if enabled this channel. */ 1579 if (mvport->port_edmamode != nodma) 1580 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1581 1582 mvsata_pmp_select(mvport, drive); 1583 1584 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1585 switch(wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1586 ata_c->r_st_bmask, ata_c->timeout, wait_flags)) { 1587 case WDCWAIT_OK: 1588 break; 1589 case WDCWAIT_TOUT: 1590 ata_c->flags |= AT_TIMEOU; 1591 mvsata_wdc_cmd_done(chp, xfer); 1592 return; 1593 case WDCWAIT_THR: 1594 return; 1595 } 1596 if (ata_c->flags & AT_POLL) 1597 /* polled command, disable interrupts */ 1598 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1599 if ((ata_c->flags & AT_LBA48) != 0) { 1600 wdccommandext(chp, 0, ata_c->r_command, 1601 ata_c->r_lba, ata_c->r_count, ata_c->r_features, 1602 ata_c->r_device & ~0x10); 1603 } else { 1604 wdccommand(chp, 0, ata_c->r_command, 1605 (ata_c->r_lba >> 8) & 0xffff, 1606 (((ata_c->flags & AT_LBA) != 0) ? WDSD_LBA : 0) | 1607 ((ata_c->r_lba >> 24) & 0x0f), 1608 ata_c->r_lba & 0xff, 1609 ata_c->r_count & 0xff, 1610 ata_c->r_features & 0xff); 1611 } 1612 1613 if ((ata_c->flags & AT_POLL) == 0) { 1614 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for interrupt */ 1615 callout_reset(&chp->ch_callout, ata_c->timeout / 1000 * hz, 1616 wdctimeout, chp); 1617 return; 1618 } 1619 /* 1620 * Polled command. Wait for drive ready or drq. Done in intr(). 1621 * Wait for at last 400ns for status bit to be valid. 1622 */ 1623 delay(10); /* 400ns delay */ 1624 mvsata_wdc_cmd_intr(chp, xfer, 0); 1625 } 1626 1627 static int 1628 mvsata_wdc_cmd_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 1629 { 1630 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1631 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1632 struct ata_command *ata_c = xfer->c_cmd; 1633 int bcount = ata_c->bcount; 1634 char *data = ata_c->data; 1635 int wflags; 1636 int drive_flags; 1637 1638 if (ata_c->r_command == WDCC_IDENTIFY || 1639 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) 1640 /* 1641 * The IDENTIFY data has been designed as an array of 1642 * u_int16_t, so we can byteswap it on the fly. 1643 * Historically it's what we have always done so keeping it 1644 * here ensure binary backward compatibility. 1645 */ 1646 drive_flags = ATA_DRIVE_NOSTREAM | 1647 chp->ch_drive[xfer->c_drive].drive_flags; 1648 else 1649 /* 1650 * Other data structure are opaque and should be transfered 1651 * as is. 1652 */ 1653 drive_flags = chp->ch_drive[xfer->c_drive].drive_flags; 1654 1655 if ((ata_c->flags & (AT_WAIT | AT_POLL)) == (AT_WAIT | AT_POLL)) 1656 /* both wait and poll, we can tsleep here */ 1657 wflags = AT_WAIT | AT_POLL; 1658 else 1659 wflags = AT_POLL; 1660 1661 again: 1662 DPRINTFN(1, ("%s:%d: mvsata_cmd_intr: drive=%d\n", 1663 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive)); 1664 1665 /* 1666 * after a ATAPI_SOFT_RESET, the device will have released the bus. 1667 * Reselect again, it doesn't hurt for others commands, and the time 1668 * penalty for the extra register write is acceptable, 1669 * wdc_exec_command() isn't called often (mostly for autoconfig) 1670 */ 1671 if ((xfer->c_flags & C_ATAPI) != 0) { 1672 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1673 } 1674 if ((ata_c->flags & AT_XFDONE) != 0) { 1675 /* 1676 * We have completed a data xfer. The drive should now be 1677 * in its initial state 1678 */ 1679 if (wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ, 1680 ata_c->r_st_bmask, (irq == 0) ? ata_c->timeout : 0, 1681 wflags) == WDCWAIT_TOUT) { 1682 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1683 return 0; /* IRQ was not for us */ 1684 ata_c->flags |= AT_TIMEOU; 1685 } 1686 goto out; 1687 } 1688 if (wdcwait(chp, ata_c->r_st_pmask, ata_c->r_st_pmask, 1689 (irq == 0) ? ata_c->timeout : 0, wflags) == WDCWAIT_TOUT) { 1690 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 1691 return 0; /* IRQ was not for us */ 1692 ata_c->flags |= AT_TIMEOU; 1693 goto out; 1694 } 1695 if (ata_c->flags & AT_READ) { 1696 if ((chp->ch_status & WDCS_DRQ) == 0) { 1697 ata_c->flags |= AT_TIMEOU; 1698 goto out; 1699 } 1700 wdc->datain_pio(chp, drive_flags, data, bcount); 1701 /* at this point the drive should be in its initial state */ 1702 ata_c->flags |= AT_XFDONE; 1703 /* 1704 * XXX checking the status register again here cause some 1705 * hardware to timeout. 1706 */ 1707 } else if (ata_c->flags & AT_WRITE) { 1708 if ((chp->ch_status & WDCS_DRQ) == 0) { 1709 ata_c->flags |= AT_TIMEOU; 1710 goto out; 1711 } 1712 wdc->dataout_pio(chp, drive_flags, data, bcount); 1713 ata_c->flags |= AT_XFDONE; 1714 if ((ata_c->flags & AT_POLL) == 0) { 1715 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for intr */ 1716 callout_reset(&chp->ch_callout, 1717 mstohz(ata_c->timeout), wdctimeout, chp); 1718 return 1; 1719 } else 1720 goto again; 1721 } 1722 out: 1723 mvsata_wdc_cmd_done(chp, xfer); 1724 return 1; 1725 } 1726 1727 static void 1728 mvsata_wdc_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 1729 int reason) 1730 { 1731 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1732 struct ata_command *ata_c = xfer->c_cmd; 1733 1734 DPRINTFN(1, ("%s:%d: mvsata_cmd_kill_xfer: drive=%d\n", 1735 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive)); 1736 1737 switch (reason) { 1738 case KILL_GONE: 1739 ata_c->flags |= AT_GONE; 1740 break; 1741 case KILL_RESET: 1742 ata_c->flags |= AT_RESET; 1743 break; 1744 default: 1745 aprint_error_dev(MVSATA_DEV2(mvport), 1746 "mvsata_cmd_kill_xfer: unknown reason %d\n", reason); 1747 panic("mvsata_cmd_kill_xfer"); 1748 } 1749 mvsata_wdc_cmd_done_end(chp, xfer); 1750 } 1751 1752 static void 1753 mvsata_wdc_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer) 1754 { 1755 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1756 struct atac_softc *atac = chp->ch_atac; 1757 struct ata_command *ata_c = xfer->c_cmd; 1758 1759 DPRINTFN(1, ("%s:%d: mvsata_cmd_done: drive=%d, flags=0x%x\n", 1760 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 1761 ata_c->flags)); 1762 1763 if (chp->ch_status & WDCS_DWF) 1764 ata_c->flags |= AT_DF; 1765 if (chp->ch_status & WDCS_ERR) { 1766 ata_c->flags |= AT_ERROR; 1767 ata_c->r_error = chp->ch_error; 1768 } 1769 if ((ata_c->flags & AT_READREG) != 0 && 1770 device_is_active(atac->atac_dev) && 1771 (ata_c->flags & (AT_ERROR | AT_DF)) == 0) { 1772 ata_c->r_status = MVSATA_WDC_READ_1(mvport, SRB_CS); 1773 ata_c->r_error = MVSATA_WDC_READ_1(mvport, SRB_FE); 1774 ata_c->r_count = MVSATA_WDC_READ_1(mvport, SRB_SC); 1775 ata_c->r_lba = 1776 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 0; 1777 ata_c->r_lba |= 1778 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 8; 1779 ata_c->r_lba |= 1780 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 16; 1781 ata_c->r_device = MVSATA_WDC_READ_1(mvport, SRB_H); 1782 if ((ata_c->flags & AT_LBA48) != 0) { 1783 if ((ata_c->flags & AT_POLL) != 0) { 1784 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1785 WDCTL_HOB|WDCTL_4BIT|WDCTL_IDS); 1786 } else { 1787 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1788 WDCTL_HOB|WDCTL_4BIT); 1789 } 1790 ata_c->r_count |= 1791 MVSATA_WDC_READ_1(mvport, SRB_SC) << 8; 1792 ata_c->r_lba |= 1793 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 24; 1794 ata_c->r_lba |= 1795 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 32; 1796 ata_c->r_lba |= 1797 (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 40; 1798 if ((ata_c->flags & AT_POLL) != 0) { 1799 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1800 WDCTL_4BIT|WDCTL_IDS); 1801 } else { 1802 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, 1803 WDCTL_4BIT); 1804 } 1805 } else { 1806 ata_c->r_lba |= 1807 (uint64_t)(ata_c->r_device & 0x0f) << 24; 1808 } 1809 } 1810 callout_stop(&chp->ch_callout); 1811 chp->ch_queue->active_xfer = NULL; 1812 if (ata_c->flags & AT_POLL) { 1813 /* enable interrupts */ 1814 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1815 delay(10); /* some drives need a little delay here */ 1816 } 1817 if (chp->ch_drive[xfer->c_drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1818 mvsata_wdc_cmd_kill_xfer(chp, xfer, KILL_GONE); 1819 chp->ch_drive[xfer->c_drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1820 wakeup(&chp->ch_queue->active_xfer); 1821 } else 1822 mvsata_wdc_cmd_done_end(chp, xfer); 1823 } 1824 1825 static void 1826 mvsata_wdc_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer) 1827 { 1828 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1829 struct ata_command *ata_c = xfer->c_cmd; 1830 1831 /* EDMA restart, if enabled */ 1832 if (mvport->port_edmamode != nodma) { 1833 mvsata_edma_reset_qptr(mvport); 1834 mvsata_edma_enable(mvport); 1835 } 1836 1837 ata_c->flags |= AT_DONE; 1838 ata_free_xfer(chp, xfer); 1839 if (ata_c->flags & AT_WAIT) 1840 wakeup(ata_c); 1841 else if (ata_c->callback) 1842 ata_c->callback(ata_c->callback_arg); 1843 atastart(chp); 1844 1845 return; 1846 } 1847 1848 #if NATAPIBUS > 0 1849 static void 1850 mvsata_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer) 1851 { 1852 struct mvsata_softc *sc = (struct mvsata_softc *)chp->ch_atac; 1853 struct mvsata_port *mvport = (struct mvsata_port *)chp; 1854 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac; 1855 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 1856 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1857 const int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0; 1858 const char *errstring; 1859 1860 DPRINTFN(2, ("%s:%d:%d: mvsata_atapi_start: scsi flags 0x%x\n", 1861 device_xname(chp->ch_atac->atac_dev), chp->ch_channel, 1862 xfer->c_drive, sc_xfer->xs_control)); 1863 1864 if (mvport->port_edmamode != nodma) 1865 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags); 1866 1867 mvsata_pmp_select(mvport, xfer->c_drive); 1868 1869 if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER)) 1870 drvp->n_xfers++; 1871 1872 /* Do control operations specially. */ 1873 if (__predict_false(drvp->state < READY)) { 1874 /* If it's not a polled command, we need the kernel thread */ 1875 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 && cpu_intr_p()) { 1876 chp->ch_queue->queue_freeze++; 1877 wakeup(&chp->ch_thread); 1878 return; 1879 } 1880 /* 1881 * disable interrupts, all commands here should be quick 1882 * enough to be able to poll, and we don't go here that often 1883 */ 1884 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS); 1885 1886 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1887 /* Don't try to set mode if controller can't be adjusted */ 1888 if (atac->atac_set_modes == NULL) 1889 goto ready; 1890 /* Also don't try if the drive didn't report its mode */ 1891 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0) 1892 goto ready; 1893 errstring = "unbusy"; 1894 if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags)) 1895 goto timeout; 1896 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1897 0x08 | drvp->PIO_mode, WDSF_SET_MODE); 1898 errstring = "piomode"; 1899 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags)) 1900 goto timeout; 1901 if (chp->ch_status & WDCS_ERR) { 1902 if (chp->ch_error == WDCE_ABRT) { 1903 /* 1904 * Some ATAPI drives reject PIO settings. 1905 * Fall back to PIO mode 3 since that's the 1906 * minimum for ATAPI. 1907 */ 1908 aprint_error_dev(atac->atac_dev, 1909 "channel %d drive %d: PIO mode %d rejected," 1910 " falling back to PIO mode 3\n", 1911 chp->ch_channel, xfer->c_drive, 1912 drvp->PIO_mode); 1913 if (drvp->PIO_mode > 3) 1914 drvp->PIO_mode = 3; 1915 } else 1916 goto error; 1917 } 1918 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1919 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1920 0x40 | drvp->UDMA_mode, WDSF_SET_MODE); 1921 else 1922 if (drvp->drive_flags & ATA_DRIVE_DMA) 1923 wdccommand(chp, 0, SET_FEATURES, 0, 0, 0, 1924 0x20 | drvp->DMA_mode, WDSF_SET_MODE); 1925 else 1926 goto ready; 1927 errstring = "dmamode"; 1928 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags)) 1929 goto timeout; 1930 if (chp->ch_status & WDCS_ERR) { 1931 if (chp->ch_error == WDCE_ABRT) { 1932 if (drvp->drive_flags & ATA_DRIVE_UDMA) 1933 goto error; 1934 else { 1935 /* 1936 * The drive rejected our DMA setting. 1937 * Fall back to mode 1. 1938 */ 1939 aprint_error_dev(atac->atac_dev, 1940 "channel %d drive %d:" 1941 " DMA mode %d rejected," 1942 " falling back to DMA mode 0\n", 1943 chp->ch_channel, xfer->c_drive, 1944 drvp->DMA_mode); 1945 if (drvp->DMA_mode > 0) 1946 drvp->DMA_mode = 0; 1947 } 1948 } else 1949 goto error; 1950 } 1951 ready: 1952 drvp->state = READY; 1953 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 1954 delay(10); /* some drives need a little delay here */ 1955 } 1956 /* start timeout machinery */ 1957 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 1958 callout_reset(&chp->ch_callout, mstohz(sc_xfer->timeout), 1959 wdctimeout, chp); 1960 1961 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 1962 switch (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags) < 0) { 1963 case WDCWAIT_OK: 1964 break; 1965 case WDCWAIT_TOUT: 1966 aprint_error_dev(atac->atac_dev, "not ready, st = %02x\n", 1967 chp->ch_status); 1968 sc_xfer->error = XS_TIMEOUT; 1969 mvsata_atapi_reset(chp, xfer); 1970 return; 1971 case WDCWAIT_THR: 1972 return; 1973 } 1974 1975 /* 1976 * Even with WDCS_ERR, the device should accept a command packet 1977 * Limit length to what can be stuffed into the cylinder register 1978 * (16 bits). Some CD-ROMs seem to interpret '0' as 65536, 1979 * but not all devices do that and it's not obvious from the 1980 * ATAPI spec that that behaviour should be expected. If more 1981 * data is necessary, multiple data transfer phases will be done. 1982 */ 1983 1984 wdccommand(chp, 0, ATAPI_PKT_CMD, 1985 xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff, 0, 0, 0, 1986 (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA : 0); 1987 1988 /* 1989 * If there is no interrupt for CMD input, busy-wait for it (done in 1990 * the interrupt routine. If it is a polled command, call the interrupt 1991 * routine until command is done. 1992 */ 1993 if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) != 1994 ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) { 1995 /* Wait for at last 400ns for status bit to be valid */ 1996 DELAY(1); 1997 mvsata_atapi_intr(chp, xfer, 0); 1998 } else 1999 chp->ch_flags |= ATACH_IRQ_WAIT; 2000 if (sc_xfer->xs_control & XS_CTL_POLL) { 2001 if (chp->ch_flags & ATACH_DMA_WAIT) { 2002 wdc_dmawait(chp, xfer, sc_xfer->timeout); 2003 chp->ch_flags &= ~ATACH_DMA_WAIT; 2004 } 2005 while ((sc_xfer->xs_status & XS_STS_DONE) == 0) { 2006 /* Wait for at last 400ns for status bit to be valid */ 2007 DELAY(1); 2008 mvsata_atapi_intr(chp, xfer, 0); 2009 } 2010 } 2011 return; 2012 2013 timeout: 2014 aprint_error_dev(atac->atac_dev, "channel %d drive %d: %s timed out\n", 2015 chp->ch_channel, xfer->c_drive, errstring); 2016 sc_xfer->error = XS_TIMEOUT; 2017 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2018 delay(10); /* some drives need a little delay here */ 2019 mvsata_atapi_reset(chp, xfer); 2020 return; 2021 2022 error: 2023 aprint_error_dev(atac->atac_dev, 2024 "channel %d drive %d: %s error (0x%x)\n", 2025 chp->ch_channel, xfer->c_drive, errstring, chp->ch_error); 2026 sc_xfer->error = XS_SHORTSENSE; 2027 sc_xfer->sense.atapi_sense = chp->ch_error; 2028 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 2029 delay(10); /* some drives need a little delay here */ 2030 mvsata_atapi_reset(chp, xfer); 2031 return; 2032 } 2033 2034 static int 2035 mvsata_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) 2036 { 2037 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2038 struct atac_softc *atac = chp->ch_atac; 2039 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2040 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2041 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2042 int len, phase, ire, error, retries=0, i; 2043 void *cmd; 2044 2045 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr\n", 2046 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive)); 2047 2048 /* Is it not a transfer, but a control operation? */ 2049 if (drvp->state < READY) { 2050 aprint_error_dev(atac->atac_dev, 2051 "channel %d drive %d: bad state %d\n", 2052 chp->ch_channel, xfer->c_drive, drvp->state); 2053 panic("mvsata_atapi_intr: bad state"); 2054 } 2055 /* 2056 * If we missed an interrupt in a PIO transfer, reset and restart. 2057 * Don't try to continue transfer, we may have missed cycles. 2058 */ 2059 if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) { 2060 sc_xfer->error = XS_TIMEOUT; 2061 mvsata_atapi_reset(chp, xfer); 2062 return 1; 2063 } 2064 2065 /* Ack interrupt done in wdc_wait_for_unbusy */ 2066 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM); 2067 if (wdc_wait_for_unbusy(chp, 2068 (irq == 0) ? sc_xfer->timeout : 0, AT_POLL) == WDCWAIT_TOUT) { 2069 if (irq && (xfer->c_flags & C_TIMEOU) == 0) 2070 return 0; /* IRQ was not for us */ 2071 aprint_error_dev(atac->atac_dev, 2072 "channel %d: device timeout, c_bcount=%d, c_skip=%d\n", 2073 chp->ch_channel, xfer->c_bcount, xfer->c_skip); 2074 if (xfer->c_flags & C_DMA) 2075 ata_dmaerr(drvp, 2076 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2077 sc_xfer->error = XS_TIMEOUT; 2078 mvsata_atapi_reset(chp, xfer); 2079 return 1; 2080 } 2081 2082 /* 2083 * If we missed an IRQ and were using DMA, flag it as a DMA error 2084 * and reset device. 2085 */ 2086 if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) { 2087 ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2088 sc_xfer->error = XS_RESET; 2089 mvsata_atapi_reset(chp, xfer); 2090 return (1); 2091 } 2092 /* 2093 * if the request sense command was aborted, report the short sense 2094 * previously recorded, else continue normal processing 2095 */ 2096 2097 again: 2098 len = MVSATA_WDC_READ_1(mvport, SRB_LBAM) + 2099 256 * MVSATA_WDC_READ_1(mvport, SRB_LBAH); 2100 ire = MVSATA_WDC_READ_1(mvport, SRB_SC); 2101 phase = (ire & (WDCI_CMD | WDCI_IN)) | (chp->ch_status & WDCS_DRQ); 2102 DPRINTF(( 2103 "mvsata_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x ire 0x%x :", 2104 xfer->c_bcount, len, chp->ch_status, chp->ch_error, ire)); 2105 2106 switch (phase) { 2107 case PHASE_CMDOUT: 2108 cmd = sc_xfer->cmd; 2109 DPRINTF(("PHASE_CMDOUT\n")); 2110 /* Init the DMA channel if necessary */ 2111 if (xfer->c_flags & C_DMA) { 2112 error = mvsata_bdma_init(mvport, sc_xfer, 2113 (char *)xfer->c_databuf + xfer->c_skip); 2114 if (error) { 2115 if (error == EINVAL) { 2116 /* 2117 * We can't do DMA on this transfer 2118 * for some reason. Fall back to PIO. 2119 */ 2120 xfer->c_flags &= ~C_DMA; 2121 error = 0; 2122 } else { 2123 sc_xfer->error = XS_DRIVER_STUFFUP; 2124 break; 2125 } 2126 } 2127 } 2128 2129 /* send packet command */ 2130 /* Commands are 12 or 16 bytes long. It's 32-bit aligned */ 2131 wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen); 2132 2133 /* Start the DMA channel if necessary */ 2134 if (xfer->c_flags & C_DMA) { 2135 mvsata_bdma_start(mvport); 2136 chp->ch_flags |= ATACH_DMA_WAIT; 2137 } 2138 2139 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2140 chp->ch_flags |= ATACH_IRQ_WAIT; 2141 return 1; 2142 2143 case PHASE_DATAOUT: 2144 /* write data */ 2145 DPRINTF(("PHASE_DATAOUT\n")); 2146 if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 || 2147 (xfer->c_flags & C_DMA) != 0) { 2148 aprint_error_dev(atac->atac_dev, 2149 "channel %d drive %d: bad data phase DATAOUT\n", 2150 chp->ch_channel, xfer->c_drive); 2151 if (xfer->c_flags & C_DMA) 2152 ata_dmaerr(drvp, 2153 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2154 sc_xfer->error = XS_TIMEOUT; 2155 mvsata_atapi_reset(chp, xfer); 2156 return 1; 2157 } 2158 xfer->c_lenoff = len - xfer->c_bcount; 2159 if (xfer->c_bcount < len) { 2160 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2161 " warning: write only %d of %d requested bytes\n", 2162 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2163 len); 2164 len = xfer->c_bcount; 2165 } 2166 2167 wdc->dataout_pio(chp, drvp->drive_flags, 2168 (char *)xfer->c_databuf + xfer->c_skip, len); 2169 2170 for (i = xfer->c_lenoff; i > 0; i -= 2) 2171 MVSATA_WDC_WRITE_2(mvport, SRB_PIOD, 0); 2172 2173 xfer->c_skip += len; 2174 xfer->c_bcount -= len; 2175 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2176 chp->ch_flags |= ATACH_IRQ_WAIT; 2177 return 1; 2178 2179 case PHASE_DATAIN: 2180 /* Read data */ 2181 DPRINTF(("PHASE_DATAIN\n")); 2182 if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 || 2183 (xfer->c_flags & C_DMA) != 0) { 2184 aprint_error_dev(atac->atac_dev, 2185 "channel %d drive %d: bad data phase DATAIN\n", 2186 chp->ch_channel, xfer->c_drive); 2187 if (xfer->c_flags & C_DMA) 2188 ata_dmaerr(drvp, 2189 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2190 sc_xfer->error = XS_TIMEOUT; 2191 mvsata_atapi_reset(chp, xfer); 2192 return 1; 2193 } 2194 xfer->c_lenoff = len - xfer->c_bcount; 2195 if (xfer->c_bcount < len) { 2196 aprint_error_dev(atac->atac_dev, "channel %d drive %d:" 2197 " warning: reading only %d of %d bytes\n", 2198 chp->ch_channel, xfer->c_drive, xfer->c_bcount, 2199 len); 2200 len = xfer->c_bcount; 2201 } 2202 2203 wdc->datain_pio(chp, drvp->drive_flags, 2204 (char *)xfer->c_databuf + xfer->c_skip, len); 2205 2206 if (xfer->c_lenoff > 0) 2207 wdcbit_bucket(chp, len - xfer->c_bcount); 2208 2209 xfer->c_skip += len; 2210 xfer->c_bcount -= len; 2211 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) 2212 chp->ch_flags |= ATACH_IRQ_WAIT; 2213 return 1; 2214 2215 case PHASE_ABORTED: 2216 case PHASE_COMPLETED: 2217 DPRINTF(("PHASE_COMPLETED\n")); 2218 if (xfer->c_flags & C_DMA) 2219 xfer->c_bcount -= sc_xfer->datalen; 2220 sc_xfer->resid = xfer->c_bcount; 2221 mvsata_atapi_phase_complete(xfer); 2222 return 1; 2223 2224 default: 2225 if (++retries<500) { 2226 DELAY(100); 2227 chp->ch_status = MVSATA_WDC_READ_1(mvport, SRB_CS); 2228 chp->ch_error = MVSATA_WDC_READ_1(mvport, SRB_FE); 2229 goto again; 2230 } 2231 aprint_error_dev(atac->atac_dev, 2232 "channel %d drive %d: unknown phase 0x%x\n", 2233 chp->ch_channel, xfer->c_drive, phase); 2234 if (chp->ch_status & WDCS_ERR) { 2235 sc_xfer->error = XS_SHORTSENSE; 2236 sc_xfer->sense.atapi_sense = chp->ch_error; 2237 } else { 2238 if (xfer->c_flags & C_DMA) 2239 ata_dmaerr(drvp, 2240 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2241 sc_xfer->error = XS_RESET; 2242 mvsata_atapi_reset(chp, xfer); 2243 return (1); 2244 } 2245 } 2246 DPRINTF(("mvsata_atapi_intr: mvsata_atapi_done() (end), error 0x%x " 2247 "sense 0x%x\n", sc_xfer->error, sc_xfer->sense.atapi_sense)); 2248 mvsata_atapi_done(chp, xfer); 2249 return 1; 2250 } 2251 2252 static void 2253 mvsata_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, 2254 int reason) 2255 { 2256 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2257 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2258 2259 /* remove this command from xfer queue */ 2260 switch (reason) { 2261 case KILL_GONE: 2262 sc_xfer->error = XS_DRIVER_STUFFUP; 2263 break; 2264 2265 case KILL_RESET: 2266 sc_xfer->error = XS_RESET; 2267 break; 2268 2269 default: 2270 aprint_error_dev(MVSATA_DEV2(mvport), 2271 "mvsata_atapi_kill_xfer: unknown reason %d\n", reason); 2272 panic("mvsata_atapi_kill_xfer"); 2273 } 2274 ata_free_xfer(chp, xfer); 2275 scsipi_done(sc_xfer); 2276 } 2277 2278 static void 2279 mvsata_atapi_reset(struct ata_channel *chp, struct ata_xfer *xfer) 2280 { 2281 struct atac_softc *atac = chp->ch_atac; 2282 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2283 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2284 2285 mvsata_pmp_select(mvport, xfer->c_drive); 2286 2287 wdccommandshort(chp, 0, ATAPI_SOFT_RESET); 2288 drvp->state = 0; 2289 if (wdc_wait_for_unbusy(chp, WDC_RESET_WAIT, AT_POLL) != 0) { 2290 printf("%s:%d:%d: reset failed\n", device_xname(atac->atac_dev), 2291 chp->ch_channel, xfer->c_drive); 2292 sc_xfer->error = XS_SELTIMEOUT; 2293 } 2294 mvsata_atapi_done(chp, xfer); 2295 return; 2296 } 2297 2298 static void 2299 mvsata_atapi_phase_complete(struct ata_xfer *xfer) 2300 { 2301 struct ata_channel *chp = xfer->c_chp; 2302 struct atac_softc *atac = chp->ch_atac; 2303 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 2304 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2305 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 2306 2307 /* wait for DSC if needed */ 2308 if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) { 2309 DPRINTFN(1, 2310 ("%s:%d:%d: mvsata_atapi_phase_complete: polldsc %d\n", 2311 device_xname(atac->atac_dev), chp->ch_channel, 2312 xfer->c_drive, xfer->c_dscpoll)); 2313 if (cold) 2314 panic("mvsata_atapi_phase_complete: cold"); 2315 2316 if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10, AT_POLL) == 2317 WDCWAIT_TOUT) { 2318 /* 10ms not enough, try again in 1 tick */ 2319 if (xfer->c_dscpoll++ > mstohz(sc_xfer->timeout)) { 2320 aprint_error_dev(atac->atac_dev, 2321 "channel %d: wait_for_dsc failed\n", 2322 chp->ch_channel); 2323 sc_xfer->error = XS_TIMEOUT; 2324 mvsata_atapi_reset(chp, xfer); 2325 return; 2326 } else 2327 callout_reset(&chp->ch_callout, 1, 2328 mvsata_atapi_polldsc, xfer); 2329 return; 2330 } 2331 } 2332 2333 /* 2334 * Some drive occasionally set WDCS_ERR with 2335 * "ATA illegal length indication" in the error 2336 * register. If we read some data the sense is valid 2337 * anyway, so don't report the error. 2338 */ 2339 if (chp->ch_status & WDCS_ERR && 2340 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 || 2341 sc_xfer->resid == sc_xfer->datalen)) { 2342 /* save the short sense */ 2343 sc_xfer->error = XS_SHORTSENSE; 2344 sc_xfer->sense.atapi_sense = chp->ch_error; 2345 if ((sc_xfer->xs_periph->periph_quirks & PQUIRK_NOSENSE) == 0) { 2346 /* ask scsipi to send a REQUEST_SENSE */ 2347 sc_xfer->error = XS_BUSY; 2348 sc_xfer->status = SCSI_CHECK; 2349 } else 2350 if (wdc->dma_status & (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) { 2351 ata_dmaerr(drvp, 2352 (xfer->c_flags & C_POLL) ? AT_POLL : 0); 2353 sc_xfer->error = XS_RESET; 2354 mvsata_atapi_reset(chp, xfer); 2355 return; 2356 } 2357 } 2358 if (xfer->c_bcount != 0) 2359 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr:" 2360 " bcount value is %d after io\n", 2361 device_xname(atac->atac_dev), chp->ch_channel, 2362 xfer->c_drive, xfer->c_bcount)); 2363 #ifdef DIAGNOSTIC 2364 if (xfer->c_bcount < 0) 2365 aprint_error_dev(atac->atac_dev, 2366 "channel %d drive %d: mvsata_atapi_intr:" 2367 " warning: bcount value is %d after io\n", 2368 chp->ch_channel, xfer->c_drive, xfer->c_bcount); 2369 #endif 2370 2371 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_phase_complete:" 2372 " mvsata_atapi_done(), error 0x%x sense 0x%x\n", 2373 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 2374 sc_xfer->error, sc_xfer->sense.atapi_sense)); 2375 mvsata_atapi_done(chp, xfer); 2376 } 2377 2378 static void 2379 mvsata_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer) 2380 { 2381 struct atac_softc *atac = chp->ch_atac; 2382 struct scsipi_xfer *sc_xfer = xfer->c_cmd; 2383 int drive = xfer->c_drive; 2384 2385 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_done: flags 0x%x\n", 2386 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, 2387 (u_int)xfer->c_flags)); 2388 callout_stop(&chp->ch_callout); 2389 /* mark controller inactive and free the command */ 2390 chp->ch_queue->active_xfer = NULL; 2391 ata_free_xfer(chp, xfer); 2392 2393 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 2394 sc_xfer->error = XS_DRIVER_STUFFUP; 2395 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 2396 wakeup(&chp->ch_queue->active_xfer); 2397 } 2398 2399 DPRINTFN(1, ("%s:%d: mvsata_atapi_done: scsipi_done\n", 2400 device_xname(atac->atac_dev), chp->ch_channel)); 2401 scsipi_done(sc_xfer); 2402 DPRINTFN(1, ("%s:%d: atastart from wdc_atapi_done, flags 0x%x\n", 2403 device_xname(atac->atac_dev), chp->ch_channel, chp->ch_flags)); 2404 atastart(chp); 2405 } 2406 2407 static void 2408 mvsata_atapi_polldsc(void *arg) 2409 { 2410 2411 mvsata_atapi_phase_complete(arg); 2412 } 2413 #endif /* NATAPIBUS > 0 */ 2414 2415 2416 /* 2417 * XXXX: Shall we need lock for race condition in mvsata_edma_enqueue{,_gen2}(), 2418 * if supported queuing command by atabus? The race condition will not happen 2419 * if this is called only to the thread of atabus. 2420 */ 2421 static int 2422 mvsata_edma_enqueue(struct mvsata_port *mvport, struct ata_bio *ata_bio, 2423 void *databuf) 2424 { 2425 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2426 struct ata_channel *chp = &mvport->port_ata_channel; 2427 struct eprd *eprd; 2428 bus_addr_t crqb_base_addr; 2429 bus_dmamap_t data_dmamap; 2430 uint32_t reg; 2431 int quetag, erqqip, erqqop, next, rv, i; 2432 2433 DPRINTFN(2, ("%s:%d:%d: mvsata_edma_enqueue:" 2434 " blkno=0x%" PRIx64 ", nbytes=%d, flags=0x%x\n", 2435 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2436 mvport->port, ata_bio->blkno, ata_bio->nbytes, ata_bio->flags)); 2437 2438 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2439 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2440 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP); 2441 erqqip = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2442 next = erqqip; 2443 MVSATA_EDMAQ_INC(next); 2444 if (next == erqqop) 2445 /* queue full */ 2446 return EBUSY; 2447 if ((quetag = mvsata_quetag_get(mvport)) == -1) 2448 /* tag nothing */ 2449 return EBUSY; 2450 DPRINTFN(2, (" erqqip=%d, quetag=%d\n", erqqip, quetag)); 2451 2452 rv = mvsata_dma_bufload(mvport, quetag, databuf, ata_bio->nbytes, 2453 ata_bio->flags); 2454 if (rv != 0) 2455 return rv; 2456 2457 KASSERT(mvport->port_reqtbl[quetag].xfer == NULL); 2458 KASSERT(chp->ch_queue->active_xfer != NULL); 2459 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer; 2460 2461 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2462 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap; 2463 eprd = mvport->port_reqtbl[quetag].eprd; 2464 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2465 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2466 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2467 2468 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2469 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2470 eprd->eot = htole16(0); 2471 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2472 eprd++; 2473 } 2474 (eprd - 1)->eot |= htole16(EPRD_EOT); 2475 #ifdef MVSATA_DEBUG 2476 if (mvsata_debug >= 3) 2477 mvsata_print_eprd(mvport, quetag); 2478 #endif 2479 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2480 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE, 2481 BUS_DMASYNC_PREWRITE); 2482 2483 /* setup EDMA Command Request Block (CRQB) Data */ 2484 sc->sc_edma_setup_crqb(mvport, erqqip, quetag, ata_bio); 2485 #ifdef MVSATA_DEBUG 2486 if (mvsata_debug >= 3) 2487 mvsata_print_crqb(mvport, erqqip); 2488 #endif 2489 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 2490 erqqip * sizeof(union mvsata_crqb), 2491 sizeof(union mvsata_crqb), BUS_DMASYNC_PREWRITE); 2492 2493 MVSATA_EDMAQ_INC(erqqip); 2494 2495 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2496 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2497 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2498 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2499 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2500 2501 return 0; 2502 } 2503 2504 static int 2505 mvsata_edma_handle(struct mvsata_port *mvport, struct ata_xfer *xfer1) 2506 { 2507 struct ata_channel *chp = &mvport->port_ata_channel; 2508 struct crpb *crpb; 2509 struct ata_bio *ata_bio; 2510 struct ata_xfer *xfer; 2511 uint32_t reg; 2512 int erqqip, erqqop, erpqip, erpqop, prev_erpqop, quetag, handled = 0, n; 2513 2514 /* First, Sync for Request Queue buffer */ 2515 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP); 2516 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2517 if (mvport->port_prev_erqqop != erqqop) { 2518 const int s = sizeof(union mvsata_crqb); 2519 2520 if (mvport->port_prev_erqqop < erqqop) 2521 n = erqqop - mvport->port_prev_erqqop; 2522 else { 2523 if (erqqop > 0) 2524 bus_dmamap_sync(mvport->port_dmat, 2525 mvport->port_crqb_dmamap, 0, erqqop * s, 2526 BUS_DMASYNC_POSTWRITE); 2527 n = MVSATA_EDMAQ_LEN - mvport->port_prev_erqqop; 2528 } 2529 if (n > 0) 2530 bus_dmamap_sync(mvport->port_dmat, 2531 mvport->port_crqb_dmamap, 2532 mvport->port_prev_erqqop * s, n * s, 2533 BUS_DMASYNC_POSTWRITE); 2534 mvport->port_prev_erqqop = erqqop; 2535 } 2536 2537 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQIP); 2538 erpqip = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2539 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQOP); 2540 erpqop = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; 2541 2542 DPRINTFN(3, ("%s:%d:%d: mvsata_edma_handle: erpqip=%d, erpqop=%d\n", 2543 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2544 mvport->port, erpqip, erpqop)); 2545 2546 if (erpqop == erpqip) 2547 return 0; 2548 2549 if (erpqop < erpqip) 2550 n = erpqip - erpqop; 2551 else { 2552 if (erpqip > 0) 2553 bus_dmamap_sync(mvport->port_dmat, 2554 mvport->port_crpb_dmamap, 2555 0, erpqip * sizeof(struct crpb), 2556 BUS_DMASYNC_POSTREAD); 2557 n = MVSATA_EDMAQ_LEN - erpqop; 2558 } 2559 if (n > 0) 2560 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2561 erpqop * sizeof(struct crpb), 2562 n * sizeof(struct crpb), BUS_DMASYNC_POSTREAD); 2563 2564 prev_erpqop = erpqop; 2565 while (erpqop != erpqip) { 2566 #ifdef MVSATA_DEBUG 2567 if (mvsata_debug >= 3) 2568 mvsata_print_crpb(mvport, erpqop); 2569 #endif 2570 crpb = mvport->port_crpb + erpqop; 2571 quetag = CRPB_CHOSTQUETAG(le16toh(crpb->id)); 2572 KASSERT(chp->ch_queue->active_xfer != NULL); 2573 xfer = chp->ch_queue->active_xfer; 2574 KASSERT(xfer == mvport->port_reqtbl[quetag].xfer); 2575 #ifdef DIAGNOSTIC 2576 if (xfer == NULL) 2577 panic("unknown response received: %s:%d:%d: tag 0x%x\n", 2578 device_xname(MVSATA_DEV2(mvport)), 2579 mvport->port_hc->hc, mvport->port, quetag); 2580 #endif 2581 2582 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2583 mvport->port_reqtbl[quetag].eprd_offset, 2584 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2585 2586 chp->ch_status = CRPB_CDEVSTS(le16toh(crpb->rspflg)); 2587 chp->ch_error = CRPB_CEDMASTS(le16toh(crpb->rspflg)); 2588 ata_bio = xfer->c_cmd; 2589 ata_bio->error = NOERROR; 2590 ata_bio->r_error = 0; 2591 if (chp->ch_status & WDCS_ERR) 2592 ata_bio->error = ERROR; 2593 if (chp->ch_status & WDCS_BSY) 2594 ata_bio->error = TIMEOUT; 2595 if (chp->ch_error) 2596 ata_bio->error = ERR_DMA; 2597 2598 mvsata_dma_bufunload(mvport, quetag, ata_bio->flags); 2599 mvport->port_reqtbl[quetag].xfer = NULL; 2600 mvsata_quetag_put(mvport, quetag); 2601 MVSATA_EDMAQ_INC(erpqop); 2602 2603 #if 1 /* XXXX: flags clears here, because necessary the atabus layer. */ 2604 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) & 2605 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2606 if (erpqop == erqqip) 2607 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT); 2608 #endif 2609 mvsata_bio_intr(chp, xfer, 1); 2610 if (xfer1 == NULL) 2611 handled++; 2612 else if (xfer == xfer1) { 2613 handled = 1; 2614 break; 2615 } 2616 } 2617 if (prev_erpqop < erpqop) 2618 n = erpqop - prev_erpqop; 2619 else { 2620 if (erpqop > 0) 2621 bus_dmamap_sync(mvport->port_dmat, 2622 mvport->port_crpb_dmamap, 0, 2623 erpqop * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2624 n = MVSATA_EDMAQ_LEN - prev_erpqop; 2625 } 2626 if (n > 0) 2627 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap, 2628 prev_erpqop * sizeof(struct crpb), 2629 n * sizeof(struct crpb), BUS_DMASYNC_PREREAD); 2630 2631 reg &= ~EDMA_RESQP_ERPQP_MASK; 2632 reg |= (erpqop << EDMA_RESQP_ERPQP_SHIFT); 2633 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, reg); 2634 2635 #if 0 /* already cleared ago? */ 2636 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) & 2637 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT; 2638 if (erpqop == erqqip) 2639 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT); 2640 #endif 2641 2642 return handled; 2643 } 2644 2645 static int 2646 mvsata_edma_wait(struct mvsata_port *mvport, struct ata_xfer *xfer, int timeout) 2647 { 2648 struct ata_bio *ata_bio = xfer->c_cmd; 2649 int xtime; 2650 2651 for (xtime = 0; xtime < timeout / 10; xtime++) { 2652 if (mvsata_edma_handle(mvport, xfer)) 2653 return 0; 2654 if (ata_bio->flags & ATA_NOSLEEP) 2655 delay(10000); 2656 else 2657 tsleep(&xfer, PRIBIO, "mvsataipl", mstohz(10)); 2658 } 2659 2660 DPRINTF(("mvsata_edma_wait: timeout: %p\n", xfer)); 2661 mvsata_edma_rqq_remove(mvport, xfer); 2662 xfer->c_flags |= C_TIMEOU; 2663 return 1; 2664 } 2665 2666 static void 2667 mvsata_edma_timeout(void *arg) 2668 { 2669 struct ata_xfer *xfer = (struct ata_xfer *)arg; 2670 struct ata_channel *chp = xfer->c_chp; 2671 struct mvsata_port *mvport = (struct mvsata_port *)chp; 2672 int s; 2673 2674 s = splbio(); 2675 DPRINTF(("mvsata_edma_timeout: %p\n", xfer)); 2676 if ((chp->ch_flags & ATACH_IRQ_WAIT) != 0) { 2677 mvsata_edma_rqq_remove(mvport, xfer); 2678 xfer->c_flags |= C_TIMEOU; 2679 mvsata_bio_intr(chp, xfer, 1); 2680 } 2681 splx(s); 2682 } 2683 2684 static void 2685 mvsata_edma_rqq_remove(struct mvsata_port *mvport, struct ata_xfer *xfer) 2686 { 2687 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2688 struct ata_bio *ata_bio; 2689 bus_addr_t crqb_base_addr; 2690 int erqqip, i; 2691 2692 /* First, hardware reset, stop EDMA */ 2693 mvsata_hreset_port(mvport); 2694 2695 /* cleanup completed EDMA safely */ 2696 mvsata_edma_handle(mvport, NULL); 2697 2698 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2699 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, BUS_DMASYNC_PREWRITE); 2700 for (i = 0, erqqip = 0; i < MVSATA_EDMAQ_LEN; i++) { 2701 if (mvport->port_reqtbl[i].xfer == NULL) 2702 continue; 2703 2704 ata_bio = mvport->port_reqtbl[i].xfer->c_cmd; 2705 if (mvport->port_reqtbl[i].xfer == xfer) { 2706 /* remove xfer from EDMA request queue */ 2707 bus_dmamap_sync(mvport->port_dmat, 2708 mvport->port_eprd_dmamap, 2709 mvport->port_reqtbl[i].eprd_offset, 2710 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE); 2711 mvsata_dma_bufunload(mvport, i, ata_bio->flags); 2712 mvport->port_reqtbl[i].xfer = NULL; 2713 mvsata_quetag_put(mvport, i); 2714 continue; 2715 } 2716 2717 sc->sc_edma_setup_crqb(mvport, erqqip, i, ata_bio); 2718 erqqip++; 2719 } 2720 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0, 2721 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, 2722 BUS_DMASYNC_POSTWRITE); 2723 2724 mvsata_edma_config(mvport, mvport->port_edmamode); 2725 mvsata_edma_reset_qptr(mvport); 2726 mvsata_edma_enable(mvport); 2727 2728 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr & 2729 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK); 2730 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16); 2731 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 2732 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT)); 2733 } 2734 2735 #if NATAPIBUS > 0 2736 static int 2737 mvsata_bdma_init(struct mvsata_port *mvport, struct scsipi_xfer *sc_xfer, 2738 void *databuf) 2739 { 2740 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 2741 struct eprd *eprd; 2742 bus_dmamap_t data_dmamap; 2743 bus_addr_t eprd_addr; 2744 int quetag, rv; 2745 2746 DPRINTFN(2, 2747 ("%s:%d:%d: mvsata_bdma_init: datalen=%d, xs_control=0x%x\n", 2748 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 2749 mvport->port, sc_xfer->datalen, sc_xfer->xs_control)); 2750 2751 if ((quetag = mvsata_quetag_get(mvport)) == -1) 2752 /* tag nothing */ 2753 return EBUSY; 2754 DPRINTFN(2, (" quetag=%d\n", quetag)); 2755 2756 rv = mvsata_dma_bufload(mvport, quetag, databuf, sc_xfer->datalen, 2757 sc_xfer->xs_control & XS_CTL_DATA_IN ? ATA_READ : 0); 2758 if (rv != 0) 2759 return rv; 2760 2761 KASSERT(chp->ch_queue->active_xfer != NULL); 2762 KASSERT(mvport->port_reqtbl[quetag].xfer == NULL); 2763 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer; 2764 2765 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */ 2766 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap; 2767 eprd = mvport->port_reqtbl[quetag].eprd; 2768 for (i = 0; i < data_dmamap->dm_nsegs; i++) { 2769 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr; 2770 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len; 2771 2772 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK); 2773 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len)); 2774 eprd->eot = htole16(0); 2775 eprd->prdbah = htole32((ds_addr >> 16) >> 16); 2776 eprd++; 2777 } 2778 (eprd - 1)->eot |= htole16(EPRD_EOT); 2779 #ifdef MVSATA_DEBUG 2780 if (mvsata_debug >= 3) 2781 mvsata_print_eprd(mvport, quetag); 2782 #endif 2783 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap, 2784 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE, 2785 BUS_DMASYNC_PREWRITE); 2786 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 2787 mvport->port_reqtbl[quetag].eprd_offset; 2788 2789 MVSATA_EDMA_WRITE_4(mvport, DMA_DTLBA, eprd_addr & DMA_DTLBA_MASK); 2790 MVSATA_EDMA_WRITE_4(mvport, DMA_DTHBA, (eprd_addr >> 16) >> 16); 2791 2792 if (sc_xfer->xs_control & XS_CTL_DATA_IN) 2793 MVSATA_EDMA_WRITE_4(mvport, DMA_C, DMA_C_READ); 2794 else 2795 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 0); 2796 2797 return 0; 2798 } 2799 2800 static void 2801 mvsata_bdma_start(struct mvsata_port *mvport) 2802 { 2803 2804 #ifdef MVSATA_DEBUG 2805 if (mvsata_debug >= 3) 2806 mvsata_print_eprd(mvport, 0); 2807 #endif 2808 2809 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 2810 MVSATA_EDMA_READ_4(mvport, DMA_C) | DMA_C_START); 2811 } 2812 #endif 2813 #endif 2814 2815 2816 static int 2817 mvsata_port_init(struct mvsata_hc *mvhc, int port) 2818 { 2819 struct mvsata_softc *sc = mvhc->hc_sc; 2820 struct mvsata_port *mvport; 2821 struct ata_channel *chp; 2822 int channel, rv, i; 2823 const int crqbq_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN; 2824 const int crpbq_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN; 2825 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN; 2826 2827 mvport = malloc(sizeof(struct mvsata_port), M_DEVBUF, 2828 M_ZERO | M_NOWAIT); 2829 if (mvport == NULL) { 2830 aprint_error("%s:%d: can't allocate memory for port %d\n", 2831 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2832 return ENOMEM; 2833 } 2834 2835 mvport->port = port; 2836 mvport->port_hc = mvhc; 2837 mvport->port_edmamode = nodma; 2838 2839 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 2840 EDMA_REGISTERS_OFFSET + port * EDMA_REGISTERS_SIZE, 2841 EDMA_REGISTERS_SIZE, &mvport->port_ioh); 2842 if (rv != 0) { 2843 aprint_error("%s:%d: can't subregion EDMA %d registers\n", 2844 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2845 goto fail0; 2846 } 2847 mvport->port_iot = mvhc->hc_iot; 2848 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SS, 4, 2849 &mvport->port_sata_sstatus); 2850 if (rv != 0) { 2851 aprint_error("%s:%d:%d: couldn't subregion sstatus regs\n", 2852 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2853 goto fail0; 2854 } 2855 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SE, 4, 2856 &mvport->port_sata_serror); 2857 if (rv != 0) { 2858 aprint_error("%s:%d:%d: couldn't subregion serror regs\n", 2859 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2860 goto fail0; 2861 } 2862 if (sc->sc_rev == gen1) 2863 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh, 2864 SATAHC_I_R02(port), 4, &mvport->port_sata_scontrol); 2865 else 2866 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2867 SATA_SC, 4, &mvport->port_sata_scontrol); 2868 if (rv != 0) { 2869 aprint_error("%s:%d:%d: couldn't subregion scontrol regs\n", 2870 device_xname(MVSATA_DEV(sc)), mvhc->hc, port); 2871 goto fail0; 2872 } 2873 mvport->port_dmat = sc->sc_dmat; 2874 #ifndef MVSATA_WITHOUTDMA 2875 mvsata_quetag_init(mvport); 2876 #endif 2877 mvhc->hc_ports[port] = mvport; 2878 2879 channel = mvhc->hc * sc->sc_port + port; 2880 chp = &mvport->port_ata_channel; 2881 chp->ch_channel = channel; 2882 chp->ch_atac = &sc->sc_wdcdev.sc_atac; 2883 chp->ch_queue = &mvport->port_ata_queue; 2884 sc->sc_ata_channels[channel] = chp; 2885 2886 rv = mvsata_wdc_reg_init(mvport, sc->sc_wdcdev.regs + channel); 2887 if (rv != 0) 2888 goto fail0; 2889 2890 rv = bus_dmamap_create(mvport->port_dmat, crqbq_size, 1, crqbq_size, 0, 2891 BUS_DMA_NOWAIT, &mvport->port_crqb_dmamap); 2892 if (rv != 0) { 2893 aprint_error( 2894 "%s:%d:%d: EDMA CRQB map create failed: error=%d\n", 2895 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2896 goto fail0; 2897 } 2898 rv = bus_dmamap_create(mvport->port_dmat, crpbq_size, 1, crpbq_size, 0, 2899 BUS_DMA_NOWAIT, &mvport->port_crpb_dmamap); 2900 if (rv != 0) { 2901 aprint_error( 2902 "%s:%d:%d: EDMA CRPB map create failed: error=%d\n", 2903 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2904 goto fail1; 2905 } 2906 rv = bus_dmamap_create(mvport->port_dmat, eprd_buf_size, 1, 2907 eprd_buf_size, 0, BUS_DMA_NOWAIT, &mvport->port_eprd_dmamap); 2908 if (rv != 0) { 2909 aprint_error( 2910 "%s:%d:%d: EDMA ePRD buffer map create failed: error=%d\n", 2911 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv); 2912 goto fail2; 2913 } 2914 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) { 2915 rv = bus_dmamap_create(mvport->port_dmat, MAXPHYS, 2916 MAXPHYS / PAGE_SIZE, MAXPHYS, 0, BUS_DMA_NOWAIT, 2917 &mvport->port_reqtbl[i].data_dmamap); 2918 if (rv != 0) { 2919 aprint_error("%s:%d:%d:" 2920 " EDMA data map(%d) create failed: error=%d\n", 2921 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, i, 2922 rv); 2923 goto fail3; 2924 } 2925 } 2926 2927 return 0; 2928 2929 fail3: 2930 for (i--; i >= 0; i--) 2931 bus_dmamap_destroy(mvport->port_dmat, 2932 mvport->port_reqtbl[i].data_dmamap); 2933 bus_dmamap_destroy(mvport->port_dmat, mvport->port_eprd_dmamap); 2934 fail2: 2935 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crpb_dmamap); 2936 fail1: 2937 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crqb_dmamap); 2938 fail0: 2939 return rv; 2940 } 2941 2942 static int 2943 mvsata_wdc_reg_init(struct mvsata_port *mvport, struct wdc_regs *wdr) 2944 { 2945 int hc, port, rv, i; 2946 2947 hc = mvport->port_hc->hc; 2948 port = mvport->port; 2949 2950 /* Create subregion for Shadow Registers Map */ 2951 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2952 SHADOW_REG_BLOCK_OFFSET, SHADOW_REG_BLOCK_SIZE, &wdr->cmd_baseioh); 2953 if (rv != 0) { 2954 aprint_error("%s:%d:%d: couldn't subregion shadow block regs\n", 2955 device_xname(MVSATA_DEV2(mvport)), hc, port); 2956 return rv; 2957 } 2958 wdr->cmd_iot = mvport->port_iot; 2959 2960 /* Once create subregion for each command registers */ 2961 for (i = 0; i < WDC_NREG; i++) { 2962 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 2963 i * 4, sizeof(uint32_t), &wdr->cmd_iohs[i]); 2964 if (rv != 0) { 2965 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 2966 device_xname(MVSATA_DEV2(mvport)), hc, port); 2967 return rv; 2968 } 2969 } 2970 /* Create subregion for Alternate Status register */ 2971 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 2972 i * 4, sizeof(uint32_t), &wdr->ctl_ioh); 2973 if (rv != 0) { 2974 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n", 2975 device_xname(MVSATA_DEV2(mvport)), hc, port); 2976 return rv; 2977 } 2978 wdr->ctl_iot = mvport->port_iot; 2979 2980 wdc_init_shadow_regs(&mvport->port_ata_channel); 2981 2982 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2983 SATA_SS, sizeof(uint32_t) * 3, &wdr->sata_baseioh); 2984 if (rv != 0) { 2985 aprint_error("%s:%d:%d: couldn't subregion SATA regs\n", 2986 device_xname(MVSATA_DEV2(mvport)), hc, port); 2987 return rv; 2988 } 2989 wdr->sata_iot = mvport->port_iot; 2990 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2991 SATA_SC, sizeof(uint32_t), &wdr->sata_control); 2992 if (rv != 0) { 2993 aprint_error("%s:%d:%d: couldn't subregion SControl\n", 2994 device_xname(MVSATA_DEV2(mvport)), hc, port); 2995 return rv; 2996 } 2997 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 2998 SATA_SS, sizeof(uint32_t), &wdr->sata_status); 2999 if (rv != 0) { 3000 aprint_error("%s:%d:%d: couldn't subregion SStatus\n", 3001 device_xname(MVSATA_DEV2(mvport)), hc, port); 3002 return rv; 3003 } 3004 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, 3005 SATA_SE, sizeof(uint32_t), &wdr->sata_error); 3006 if (rv != 0) { 3007 aprint_error("%s:%d:%d: couldn't subregion SError\n", 3008 device_xname(MVSATA_DEV2(mvport)), hc, port); 3009 return rv; 3010 } 3011 3012 return 0; 3013 } 3014 3015 3016 #ifndef MVSATA_WITHOUTDMA 3017 /* 3018 * There are functions to determine Host Queue Tag. 3019 * XXXX: We hope to rotate Tag to facilitate debugging. 3020 */ 3021 3022 static inline void 3023 mvsata_quetag_init(struct mvsata_port *mvport) 3024 { 3025 3026 mvport->port_quetagidx = 0; 3027 } 3028 3029 static inline int 3030 mvsata_quetag_get(struct mvsata_port *mvport) 3031 { 3032 int begin = mvport->port_quetagidx; 3033 3034 do { 3035 if (mvport->port_reqtbl[mvport->port_quetagidx].xfer == NULL) { 3036 MVSATA_EDMAQ_INC(mvport->port_quetagidx); 3037 return mvport->port_quetagidx; 3038 } 3039 MVSATA_EDMAQ_INC(mvport->port_quetagidx); 3040 } while (mvport->port_quetagidx != begin); 3041 3042 return -1; 3043 } 3044 3045 static inline void 3046 mvsata_quetag_put(struct mvsata_port *mvport, int quetag) 3047 { 3048 3049 /* nothing */ 3050 } 3051 3052 static void * 3053 mvsata_edma_resource_prepare(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3054 bus_dmamap_t *dmamap, size_t size, int write) 3055 { 3056 bus_dma_segment_t seg; 3057 int nseg, rv; 3058 void *kva; 3059 3060 rv = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &seg, 1, &nseg, 3061 BUS_DMA_NOWAIT); 3062 if (rv != 0) { 3063 aprint_error("%s:%d:%d: DMA memory alloc failed: error=%d\n", 3064 device_xname(MVSATA_DEV2(mvport)), 3065 mvport->port_hc->hc, mvport->port, rv); 3066 goto fail; 3067 } 3068 3069 rv = bus_dmamem_map(dmat, &seg, nseg, size, &kva, BUS_DMA_NOWAIT); 3070 if (rv != 0) { 3071 aprint_error("%s:%d:%d: DMA memory map failed: error=%d\n", 3072 device_xname(MVSATA_DEV2(mvport)), 3073 mvport->port_hc->hc, mvport->port, rv); 3074 goto free; 3075 } 3076 3077 rv = bus_dmamap_load(dmat, *dmamap, kva, size, NULL, 3078 BUS_DMA_NOWAIT | (write ? BUS_DMA_WRITE : BUS_DMA_READ)); 3079 if (rv != 0) { 3080 aprint_error("%s:%d:%d: DMA map load failed: error=%d\n", 3081 device_xname(MVSATA_DEV2(mvport)), 3082 mvport->port_hc->hc, mvport->port, rv); 3083 goto unmap; 3084 } 3085 3086 if (!write) 3087 bus_dmamap_sync(dmat, *dmamap, 0, size, BUS_DMASYNC_PREREAD); 3088 3089 return kva; 3090 3091 unmap: 3092 bus_dmamem_unmap(dmat, kva, size); 3093 free: 3094 bus_dmamem_free(dmat, &seg, nseg); 3095 fail: 3096 return NULL; 3097 } 3098 3099 /* ARGSUSED */ 3100 static void 3101 mvsata_edma_resource_purge(struct mvsata_port *mvport, bus_dma_tag_t dmat, 3102 bus_dmamap_t dmamap, void *kva) 3103 { 3104 3105 bus_dmamap_unload(dmat, dmamap); 3106 bus_dmamem_unmap(dmat, kva, dmamap->dm_mapsize); 3107 bus_dmamem_free(dmat, dmamap->dm_segs, dmamap->dm_nsegs); 3108 } 3109 3110 static int 3111 mvsata_dma_bufload(struct mvsata_port *mvport, int index, void *databuf, 3112 size_t datalen, int flags) 3113 { 3114 int rv, lop, sop; 3115 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3116 3117 lop = (flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE; 3118 sop = (flags & ATA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 3119 3120 rv = bus_dmamap_load(mvport->port_dmat, data_dmamap, databuf, datalen, 3121 NULL, BUS_DMA_NOWAIT | lop); 3122 if (rv) { 3123 aprint_error("%s:%d:%d: buffer load failed: error=%d", 3124 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc, 3125 mvport->port, rv); 3126 return rv; 3127 } 3128 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3129 data_dmamap->dm_mapsize, sop); 3130 3131 return 0; 3132 } 3133 3134 static inline void 3135 mvsata_dma_bufunload(struct mvsata_port *mvport, int index, int flags) 3136 { 3137 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap; 3138 3139 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0, 3140 data_dmamap->dm_mapsize, 3141 (flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3142 bus_dmamap_unload(mvport->port_dmat, data_dmamap); 3143 } 3144 #endif 3145 3146 static void 3147 mvsata_hreset_port(struct mvsata_port *mvport) 3148 { 3149 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3150 3151 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EATARST); 3152 3153 delay(25); /* allow reset propagation */ 3154 3155 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3156 3157 mvport->_fix_phy_param._fix_phy(mvport); 3158 3159 if (sc->sc_gen == gen1) 3160 delay(1000); 3161 } 3162 3163 static void 3164 mvsata_reset_port(struct mvsata_port *mvport) 3165 { 3166 device_t parent = device_parent(MVSATA_DEV2(mvport)); 3167 3168 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3169 3170 mvsata_hreset_port(mvport); 3171 3172 if (device_is_a(parent, "pci")) 3173 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3174 EDMA_CFG_RESERVED | EDMA_CFG_ERDBSZ); 3175 else /* SoC */ 3176 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, 3177 EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2); 3178 MVSATA_EDMA_WRITE_4(mvport, EDMA_T, 0); 3179 MVSATA_EDMA_WRITE_4(mvport, SATA_SEIM, 0x019c0000); 3180 MVSATA_EDMA_WRITE_4(mvport, SATA_SE, ~0); 3181 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 0); 3182 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, 0); 3183 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, 0); 3184 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3185 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3186 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3187 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, 0); 3188 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3189 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, 0); 3190 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0); 3191 MVSATA_EDMA_WRITE_4(mvport, EDMA_TC, 0); 3192 MVSATA_EDMA_WRITE_4(mvport, EDMA_IORT, 0xbc); 3193 } 3194 3195 static void 3196 mvsata_reset_hc(struct mvsata_hc *mvhc) 3197 { 3198 #if 0 3199 uint32_t val; 3200 #endif 3201 3202 MVSATA_HC_WRITE_4(mvhc, SATAHC_ICT, 0); 3203 MVSATA_HC_WRITE_4(mvhc, SATAHC_ITT, 0); 3204 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 0); 3205 3206 #if 0 /* XXXX needs? */ 3207 MVSATA_HC_WRITE_4(mvhc, 0x01c, 0); 3208 3209 /* 3210 * Keep the SS during power on and the reference clock bits (reset 3211 * sample) 3212 */ 3213 val = MVSATA_HC_READ_4(mvhc, 0x020); 3214 val &= 0x1c1c1c1c; 3215 val |= 0x03030303; 3216 MVSATA_HC_READ_4(mvhc, 0x020, 0); 3217 #endif 3218 } 3219 3220 #define WDCDELAY 100 /* 100 microseconds */ 3221 #define WDCNDELAY_RST (WDC_RESET_WAIT * 1000 / WDCDELAY) 3222 3223 static uint32_t 3224 mvsata_softreset(struct mvsata_port *mvport, int waitok) 3225 { 3226 uint32_t sig0 = ~0; 3227 int timeout, nloop; 3228 uint8_t st0; 3229 3230 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_RST | WDCTL_IDS | WDCTL_4BIT); 3231 delay(10); 3232 (void) MVSATA_WDC_READ_1(mvport, SRB_FE); 3233 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_IDS | WDCTL_4BIT); 3234 delay(10); 3235 3236 if (!waitok) 3237 nloop = WDCNDELAY_RST; 3238 else 3239 nloop = WDC_RESET_WAIT * hz / 1000; 3240 3241 /* wait for BSY to deassert */ 3242 for (timeout = 0; timeout < nloop; timeout++) { 3243 st0 = MVSATA_WDC_READ_1(mvport, SRB_CS); 3244 3245 if ((st0 & WDCS_BSY) == 0) { 3246 sig0 = MVSATA_WDC_READ_1(mvport, SRB_SC) << 0; 3247 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 8; 3248 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 16; 3249 sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 24; 3250 goto out; 3251 } 3252 if (!waitok) 3253 delay(WDCDELAY); 3254 else 3255 tsleep(&nloop, PRIBIO, "atarst", 1); 3256 } 3257 3258 out: 3259 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT); 3260 return sig0; 3261 } 3262 3263 #ifndef MVSATA_WITHOUTDMA 3264 static void 3265 mvsata_edma_reset_qptr(struct mvsata_port *mvport) 3266 { 3267 const bus_addr_t crpb_addr = 3268 mvport->port_crpb_dmamap->dm_segs[0].ds_addr; 3269 const uint32_t crpb_addr_mask = 3270 EDMA_RESQP_ERPQBAP_MASK | EDMA_RESQP_ERPQBA_MASK; 3271 3272 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0); 3273 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0); 3274 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0); 3275 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, (crpb_addr >> 16) >> 16); 3276 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0); 3277 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, (crpb_addr & crpb_addr_mask)); 3278 } 3279 3280 static inline void 3281 mvsata_edma_enable(struct mvsata_port *mvport) 3282 { 3283 3284 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EENEDMA); 3285 } 3286 3287 static int 3288 mvsata_edma_disable(struct mvsata_port *mvport, int timeout, int waitok) 3289 { 3290 uint32_t status, command; 3291 int ms; 3292 3293 if (MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) { 3294 for (ms = 0; ms < timeout; ms++) { 3295 status = MVSATA_EDMA_READ_4(mvport, EDMA_S); 3296 if (status & EDMA_S_EDMAIDLE) 3297 break; 3298 if (waitok) 3299 tsleep(&waitok, PRIBIO, "mvsata_edma1", 3300 mstohz(1)); 3301 else 3302 delay(1000); 3303 } 3304 if (ms == timeout) { 3305 aprint_error("%s:%d:%d: unable to stop EDMA\n", 3306 device_xname(MVSATA_DEV2(mvport)), 3307 mvport->port_hc->hc, mvport->port); 3308 return EBUSY; 3309 } 3310 3311 /* The diable bit (eDsEDMA) is self negated. */ 3312 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA); 3313 3314 for ( ; ms < timeout; ms++) { 3315 command = MVSATA_EDMA_READ_4(mvport, EDMA_CMD); 3316 if (!(command & EDMA_CMD_EENEDMA)) 3317 break; 3318 if (waitok) 3319 tsleep(&waitok, PRIBIO, "mvsata_edma2", 3320 mstohz(1)); 3321 else 3322 delay(1000); 3323 } 3324 if (ms == timeout) { 3325 aprint_error("%s:%d:%d: unable to stop EDMA\n", 3326 device_xname(MVSATA_DEV2(mvport)), 3327 mvport->port_hc->hc, mvport->port); 3328 return EBUSY; 3329 } 3330 } 3331 return 0; 3332 } 3333 3334 /* 3335 * Set EDMA registers according to mode. 3336 * ex. NCQ/TCQ(queued)/non queued. 3337 */ 3338 static void 3339 mvsata_edma_config(struct mvsata_port *mvport, int mode) 3340 { 3341 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3342 uint32_t reg; 3343 3344 reg = MVSATA_EDMA_READ_4(mvport, EDMA_CFG); 3345 reg |= EDMA_CFG_RESERVED; 3346 3347 if (mode == ncq) { 3348 if (sc->sc_gen == gen1) { 3349 aprint_error_dev(MVSATA_DEV2(mvport), 3350 "GenI not support NCQ\n"); 3351 return; 3352 } else if (sc->sc_gen == gen2) 3353 reg |= EDMA_CFG_EDEVERR; 3354 reg |= EDMA_CFG_ESATANATVCMDQUE; 3355 } else if (mode == queued) { 3356 reg &= ~EDMA_CFG_ESATANATVCMDQUE; 3357 reg |= EDMA_CFG_EQUE; 3358 } else 3359 reg &= ~(EDMA_CFG_ESATANATVCMDQUE | EDMA_CFG_EQUE); 3360 3361 if (sc->sc_gen == gen1) 3362 reg |= EDMA_CFG_ERDBSZ; 3363 else if (sc->sc_gen == gen2) 3364 reg |= (EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN); 3365 else if (sc->sc_gen == gen2e) { 3366 device_t parent = device_parent(MVSATA_DEV(sc)); 3367 3368 reg |= (EDMA_CFG_EMASKRXPM | EDMA_CFG_EHOSTQUEUECACHEEN); 3369 reg &= ~(EDMA_CFG_EEDMAFBS | EDMA_CFG_EEDMAQUELEN); 3370 3371 if (device_is_a(parent, "pci")) 3372 reg |= ( 3373 #if NATAPIBUS > 0 3374 EDMA_CFG_EEARLYCOMPLETIONEN | 3375 #endif 3376 EDMA_CFG_ECUTTHROUGHEN | 3377 EDMA_CFG_EWRBUFFERLEN | 3378 EDMA_CFG_ERDBSZEXT); 3379 } 3380 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, reg); 3381 3382 reg = ( 3383 EDMA_IE_EIORDYERR | 3384 EDMA_IE_ETRANSINT | 3385 EDMA_IE_EDEVCON | 3386 EDMA_IE_EDEVDIS); 3387 if (sc->sc_gen != gen1) 3388 reg |= ( 3389 EDMA_IE_TRANSPROTERR | 3390 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKTXERR_FISTXABORTED) | 3391 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3392 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3393 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3394 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_SATACRC) | 3395 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3396 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3397 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3398 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3399 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3400 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3401 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_SATACRC) | 3402 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_OTHERERRORS) | 3403 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) | 3404 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_INTERNALFIFO) | 3405 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_SATACRC) | 3406 EDMA_IE_ESELFDIS); 3407 3408 if (mode == ncq) 3409 reg |= EDMA_IE_EDEVERR; 3410 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, reg); 3411 reg = MVSATA_EDMA_READ_4(mvport, EDMA_HC); 3412 reg &= ~EDMA_IE_EDEVERR; 3413 if (mode != ncq) 3414 reg |= EDMA_IE_EDEVERR; 3415 MVSATA_EDMA_WRITE_4(mvport, EDMA_HC, reg); 3416 if (sc->sc_gen == gen2e) { 3417 /* 3418 * Clear FISWait4HostRdyEn[0] and [2]. 3419 * [0]: Device to Host FIS with <ERR> or <DF> bit set to 1. 3420 * [2]: SDB FIS is received with <ERR> bit set to 1. 3421 */ 3422 reg = MVSATA_EDMA_READ_4(mvport, SATA_FISC); 3423 reg &= ~(SATA_FISC_FISWAIT4HOSTRDYEN_B0 | 3424 SATA_FISC_FISWAIT4HOSTRDYEN_B2); 3425 MVSATA_EDMA_WRITE_4(mvport, SATA_FISC, reg); 3426 } 3427 3428 mvport->port_edmamode = mode; 3429 } 3430 3431 3432 /* 3433 * Generation dependent functions 3434 */ 3435 3436 static void 3437 mvsata_edma_setup_crqb(struct mvsata_port *mvport, int erqqip, int quetag, 3438 struct ata_bio *ata_bio) 3439 { 3440 struct crqb *crqb; 3441 bus_addr_t eprd_addr; 3442 daddr_t blkno; 3443 uint32_t rw; 3444 uint8_t cmd, head; 3445 int i; 3446 const int drive = 3447 mvport->port_ata_channel.ch_queue->active_xfer->c_drive; 3448 3449 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3450 mvport->port_reqtbl[quetag].eprd_offset; 3451 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3452 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA; 3453 if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) { 3454 head = WDSD_LBA; 3455 } else { 3456 head = 0; 3457 } 3458 blkno = ata_bio->blkno; 3459 if (ata_bio->flags & ATA_LBA48) 3460 cmd = atacmd_to48(cmd); 3461 else { 3462 head |= ((ata_bio->blkno >> 24) & 0xf); 3463 blkno &= 0xffffff; 3464 } 3465 crqb = &mvport->port_crqb->crqb + erqqip; 3466 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3467 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3468 crqb->ctrlflg = 3469 htole16(rw | CRQB_CHOSTQUETAG(quetag) | CRQB_CPMPORT(drive)); 3470 i = 0; 3471 if (mvport->port_edmamode == dma) { 3472 if (ata_bio->flags & ATA_LBA48) 3473 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3474 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks >> 8)); 3475 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3476 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks)); 3477 } else { /* ncq/queued */ 3478 3479 /* 3480 * XXXX: Oops, ata command is not correct. And, atabus layer 3481 * has not been supported yet now. 3482 * Queued DMA read/write. 3483 * read/write FPDMAQueued. 3484 */ 3485 3486 if (ata_bio->flags & ATA_LBA48) 3487 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3488 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks >> 8)); 3489 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3490 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks)); 3491 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3492 CRQB_ATACOMMAND_SECTORCOUNT, quetag << 3)); 3493 } 3494 if (ata_bio->flags & ATA_LBA48) { 3495 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3496 CRQB_ATACOMMAND_LBALOW, blkno >> 24)); 3497 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3498 CRQB_ATACOMMAND_LBAMID, blkno >> 32)); 3499 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND( 3500 CRQB_ATACOMMAND_LBAHIGH, blkno >> 40)); 3501 } 3502 crqb->atacommand[i++] = 3503 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBALOW, blkno)); 3504 crqb->atacommand[i++] = 3505 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAMID, blkno >> 8)); 3506 crqb->atacommand[i++] = 3507 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAHIGH, blkno >> 16)); 3508 crqb->atacommand[i++] = 3509 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_DEVICE, head)); 3510 crqb->atacommand[i++] = htole16( 3511 CRQB_ATACOMMAND(CRQB_ATACOMMAND_COMMAND, cmd) | 3512 CRQB_ATACOMMAND_LAST); 3513 } 3514 #endif 3515 3516 static uint32_t 3517 mvsata_read_preamps_gen1(struct mvsata_port *mvport) 3518 { 3519 struct mvsata_hc *hc = mvport->port_hc; 3520 uint32_t reg; 3521 3522 reg = MVSATA_HC_READ_4(hc, SATAHC_I_PHYMODE(mvport->port)); 3523 /* 3524 * [12:11] : pre 3525 * [7:5] : amps 3526 */ 3527 return reg & 0x000018e0; 3528 } 3529 3530 static void 3531 mvsata_fix_phy_gen1(struct mvsata_port *mvport) 3532 { 3533 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3534 struct mvsata_hc *mvhc = mvport->port_hc; 3535 uint32_t reg; 3536 int port = mvport->port, fix_apm_sq = 0; 3537 3538 if (sc->sc_model == PCI_PRODUCT_MARVELL_88SX5080) { 3539 if (sc->sc_rev == 0x01) 3540 fix_apm_sq = 1; 3541 } else { 3542 if (sc->sc_rev == 0x00) 3543 fix_apm_sq = 1; 3544 } 3545 3546 if (fix_apm_sq) { 3547 /* 3548 * Disable auto-power management 3549 * 88SX50xx FEr SATA#12 3550 */ 3551 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_LTMODE(port)); 3552 reg |= (1 << 19); 3553 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_LTMODE(port), reg); 3554 3555 /* 3556 * Fix squelch threshold 3557 * 88SX50xx FEr SATA#9 3558 */ 3559 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYCONTROL(port)); 3560 reg &= ~0x3; 3561 reg |= 0x1; 3562 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYCONTROL(port), reg); 3563 } 3564 3565 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3566 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYMODE(port)); 3567 reg &= ~0x000018e0; /* pre and amps mask */ 3568 reg |= mvport->_fix_phy_param.pre_amps; 3569 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYMODE(port), reg); 3570 } 3571 3572 static void 3573 mvsata_devconn_gen1(struct mvsata_port *mvport) 3574 { 3575 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3576 3577 /* Fix for 88SX50xx FEr SATA#2 */ 3578 mvport->_fix_phy_param._fix_phy(mvport); 3579 3580 /* If disk is connected, then enable the activity LED */ 3581 if (sc->sc_rev == 0x03) { 3582 /* XXXXX */ 3583 } 3584 } 3585 3586 static uint32_t 3587 mvsata_read_preamps_gen2(struct mvsata_port *mvport) 3588 { 3589 uint32_t reg; 3590 3591 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3592 /* 3593 * [10:8] : amps 3594 * [7:5] : pre 3595 */ 3596 return reg & 0x000007e0; 3597 } 3598 3599 static void 3600 mvsata_fix_phy_gen2(struct mvsata_port *mvport) 3601 { 3602 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport)); 3603 uint32_t reg; 3604 3605 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3606 sc->sc_gen == gen2e) { 3607 /* 3608 * Fix for 3609 * 88SX60X1 FEr SATA #23 3610 * 88SX6042/88SX7042 FEr SATA #23 3611 * 88F5182 FEr #SATA-S13 3612 * 88F5082 FEr #SATA-S13 3613 */ 3614 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3615 reg &= ~(1 << 16); 3616 reg |= (1 << 31); 3617 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3618 3619 delay(200); 3620 3621 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3622 reg &= ~((1 << 16) | (1 << 31)); 3623 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3624 3625 delay(200); 3626 } 3627 3628 /* Fix values in PHY Mode 3 Register.*/ 3629 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3630 reg &= ~0x7F900000; 3631 reg |= 0x2A800000; 3632 /* Implement Guidline 88F5182, 88F5082, 88F6082 (GL# SATA-S11) */ 3633 if (sc->sc_model == PCI_PRODUCT_MARVELL_88F5082 || 3634 sc->sc_model == PCI_PRODUCT_MARVELL_88F5182 || 3635 sc->sc_model == PCI_PRODUCT_MARVELL_88F6082) 3636 reg &= ~0x0000001c; 3637 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, reg); 3638 3639 /* 3640 * Fix values in PHY Mode 4 Register. 3641 * 88SX60x1 FEr SATA#10 3642 * 88F5182 GL #SATA-S10 3643 * 88F5082 GL #SATA-S10 3644 */ 3645 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) || 3646 sc->sc_gen == gen2e) { 3647 uint32_t tmp = 0; 3648 3649 /* 88SX60x1 FEr SATA #13 */ 3650 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3651 tmp = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3); 3652 3653 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM4); 3654 reg |= (1 << 0); 3655 reg &= ~(1 << 1); 3656 /* PHY Mode 4 Register of Gen IIE has some restriction */ 3657 if (sc->sc_gen == gen2e) { 3658 reg &= ~0x5de3fffc; 3659 reg |= (1 << 2); 3660 } 3661 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM4, reg); 3662 3663 /* 88SX60x1 FEr SATA #13 */ 3664 if (sc->sc_gen == 2 && sc->sc_rev == 0x07) 3665 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, tmp); 3666 } 3667 3668 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3669 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2); 3670 reg &= ~0x000007e0; /* pre and amps mask */ 3671 reg |= mvport->_fix_phy_param.pre_amps; 3672 reg &= ~(1 << 16); 3673 if (sc->sc_gen == gen2e) { 3674 /* 3675 * according to mvSata 3.6.1, some IIE values are fixed. 3676 * some reserved fields must be written with fixed values. 3677 */ 3678 reg &= ~0xC30FF01F; 3679 reg |= 0x0000900F; 3680 } 3681 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg); 3682 } 3683 3684 #ifndef MVSATA_WITHOUTDMA 3685 static void 3686 mvsata_edma_setup_crqb_gen2e(struct mvsata_port *mvport, int erqqip, int quetag, 3687 struct ata_bio *ata_bio) 3688 { 3689 struct crqb_gen2e *crqb; 3690 bus_addr_t eprd_addr; 3691 daddr_t blkno; 3692 uint32_t ctrlflg, rw; 3693 uint8_t cmd, head; 3694 const int drive = 3695 mvport->port_ata_channel.ch_queue->active_xfer->c_drive; 3696 3697 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr + 3698 mvport->port_reqtbl[quetag].eprd_offset; 3699 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE; 3700 ctrlflg = (rw | CRQB_CDEVICEQUETAG(0) | CRQB_CPMPORT(drive) | 3701 CRQB_CPRDMODE_EPRD | CRQB_CHOSTQUETAG_GEN2(quetag)); 3702 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA; 3703 if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) { 3704 head = WDSD_LBA; 3705 } else { 3706 head = 0; 3707 } 3708 blkno = ata_bio->blkno; 3709 if (ata_bio->flags & ATA_LBA48) 3710 cmd = atacmd_to48(cmd); 3711 else { 3712 head |= ((ata_bio->blkno >> 24) & 0xf); 3713 blkno &= 0xffffff; 3714 } 3715 crqb = &mvport->port_crqb->crqb_gen2e + erqqip; 3716 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK); 3717 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16); 3718 crqb->ctrlflg = htole32(ctrlflg); 3719 if (mvport->port_edmamode == dma) { 3720 crqb->atacommand[0] = htole32(cmd << 16); 3721 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24); 3722 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff)); 3723 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff); 3724 } else { /* ncq/queued */ 3725 3726 /* 3727 * XXXX: Oops, ata command is not correct. And, atabus layer 3728 * has not been supported yet now. 3729 * Queued DMA read/write. 3730 * read/write FPDMAQueued. 3731 */ 3732 3733 crqb->atacommand[0] = htole32( 3734 (cmd << 16) | ((ata_bio->nblks & 0xff) << 24)); 3735 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24); 3736 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff) | 3737 ((ata_bio->nblks >> 8) & 0xff)); 3738 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff); 3739 crqb->atacommand[3] = htole32(quetag << 3); 3740 } 3741 } 3742 3743 3744 #ifdef MVSATA_DEBUG 3745 #define MVSATA_DEBUG_PRINT(type, size, n, p) \ 3746 do { \ 3747 int _i; \ 3748 u_char *_p = (p); \ 3749 \ 3750 printf(#type "(%d)", (n)); \ 3751 for (_i = 0; _i < (size); _i++, _p++) { \ 3752 if (_i % 16 == 0) \ 3753 printf("\n "); \ 3754 printf(" %02x", *_p); \ 3755 } \ 3756 printf("\n"); \ 3757 } while (0 /* CONSTCOND */) 3758 3759 static void 3760 mvsata_print_crqb(struct mvsata_port *mvport, int n) 3761 { 3762 3763 MVSATA_DEBUG_PRINT(crqb, sizeof(union mvsata_crqb), 3764 n, (u_char *)(mvport->port_crqb + n)); 3765 } 3766 3767 static void 3768 mvsata_print_crpb(struct mvsata_port *mvport, int n) 3769 { 3770 3771 MVSATA_DEBUG_PRINT(crpb, sizeof(struct crpb), 3772 n, (u_char *)(mvport->port_crpb + n)); 3773 } 3774 3775 static void 3776 mvsata_print_eprd(struct mvsata_port *mvport, int n) 3777 { 3778 struct eprd *eprd; 3779 int i = 0; 3780 3781 eprd = mvport->port_reqtbl[n].eprd; 3782 while (1 /*CONSTCOND*/) { 3783 MVSATA_DEBUG_PRINT(eprd, sizeof(struct eprd), 3784 i, (u_char *)eprd); 3785 if (eprd->eot & EPRD_EOT) 3786 break; 3787 eprd++; 3788 i++; 3789 } 3790 } 3791 #endif 3792 #endif 3793