1 /* $NetBSD: ata.c,v 1.149 2019/05/25 16:30:18 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.149 2019/05/25 16:30:18 christos Exp $"); 29 30 #include "opt_ata.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/device.h> 36 #include <sys/conf.h> 37 #include <sys/fcntl.h> 38 #include <sys/proc.h> 39 #include <sys/kthread.h> 40 #include <sys/errno.h> 41 #include <sys/ataio.h> 42 #include <sys/kmem.h> 43 #include <sys/intr.h> 44 #include <sys/bus.h> 45 #include <sys/once.h> 46 #include <sys/bitops.h> 47 48 #define ATABUS_PRIVATE 49 50 #include <dev/ata/ataconf.h> 51 #include <dev/ata/atareg.h> 52 #include <dev/ata/atavar.h> 53 #include <dev/ic/wdcvar.h> /* for PIOBM */ 54 55 #include "ioconf.h" 56 #include "locators.h" 57 58 #include "atapibus.h" 59 #include "ataraid.h" 60 #include "sata_pmp.h" 61 62 #if NATARAID > 0 63 #include <dev/ata/ata_raidvar.h> 64 #endif 65 #if NSATA_PMP > 0 66 #include <dev/ata/satapmpvar.h> 67 #endif 68 #include <dev/ata/satapmpreg.h> 69 70 #define DEBUG_FUNCS 0x08 71 #define DEBUG_PROBE 0x10 72 #define DEBUG_DETACH 0x20 73 #define DEBUG_XFERS 0x40 74 #ifdef ATADEBUG 75 #ifndef ATADEBUG_MASK 76 #define ATADEBUG_MASK 0 77 #endif 78 int atadebug_mask = ATADEBUG_MASK; 79 #define ATADEBUG_PRINT(args, level) \ 80 if (atadebug_mask & (level)) \ 81 printf args 82 #else 83 #define ATADEBUG_PRINT(args, level) 84 #endif 85 86 static ONCE_DECL(ata_init_ctrl); 87 static struct pool ata_xfer_pool; 88 89 /* 90 * A queue of atabus instances, used to ensure the same bus probe order 91 * for a given hardware configuration at each boot. Kthread probing 92 * devices on a atabus. Only one probing at once. 93 */ 94 static TAILQ_HEAD(, atabus_initq) atabus_initq_head; 95 static kmutex_t atabus_qlock; 96 static kcondvar_t atabus_qcv; 97 static lwp_t * atabus_cfg_lwp; 98 99 /***************************************************************************** 100 * ATA bus layer. 101 * 102 * ATA controllers attach an atabus instance, which handles probing the bus 103 * for drives, etc. 104 *****************************************************************************/ 105 106 dev_type_open(atabusopen); 107 dev_type_close(atabusclose); 108 dev_type_ioctl(atabusioctl); 109 110 const struct cdevsw atabus_cdevsw = { 111 .d_open = atabusopen, 112 .d_close = atabusclose, 113 .d_read = noread, 114 .d_write = nowrite, 115 .d_ioctl = atabusioctl, 116 .d_stop = nostop, 117 .d_tty = notty, 118 .d_poll = nopoll, 119 .d_mmap = nommap, 120 .d_kqfilter = nokqfilter, 121 .d_discard = nodiscard, 122 .d_flag = D_OTHER 123 }; 124 125 static void atabus_childdetached(device_t, device_t); 126 static int atabus_rescan(device_t, const char *, const int *); 127 static bool atabus_resume(device_t, const pmf_qual_t *); 128 static bool atabus_suspend(device_t, const pmf_qual_t *); 129 static void atabusconfig_thread(void *); 130 131 static void ata_channel_idle(struct ata_channel *); 132 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *); 133 static void ata_channel_freeze_locked(struct ata_channel *); 134 static void ata_thread_wake_locked(struct ata_channel *); 135 136 /* 137 * atabus_init: 138 * 139 * Initialize ATA subsystem structures. 140 */ 141 static int 142 atabus_init(void) 143 { 144 145 pool_init(&ata_xfer_pool, sizeof(struct ata_xfer), 0, 0, 0, 146 "ataspl", NULL, IPL_BIO); 147 TAILQ_INIT(&atabus_initq_head); 148 mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE); 149 cv_init(&atabus_qcv, "atainitq"); 150 return 0; 151 } 152 153 /* 154 * atabusprint: 155 * 156 * Autoconfiguration print routine used by ATA controllers when 157 * attaching an atabus instance. 158 */ 159 int 160 atabusprint(void *aux, const char *pnp) 161 { 162 struct ata_channel *chan = aux; 163 164 if (pnp) 165 aprint_normal("atabus at %s", pnp); 166 aprint_normal(" channel %d", chan->ch_channel); 167 168 return (UNCONF); 169 } 170 171 /* 172 * ataprint: 173 * 174 * Autoconfiguration print routine. 175 */ 176 int 177 ataprint(void *aux, const char *pnp) 178 { 179 struct ata_device *adev = aux; 180 181 if (pnp) 182 aprint_normal("wd at %s", pnp); 183 aprint_normal(" drive %d", adev->adev_drv_data->drive); 184 185 return (UNCONF); 186 } 187 188 /* 189 * ata_channel_attach: 190 * 191 * Common parts of attaching an atabus to an ATA controller channel. 192 */ 193 void 194 ata_channel_attach(struct ata_channel *chp) 195 { 196 if (chp->ch_flags & ATACH_DISABLED) 197 return; 198 199 ata_channel_init(chp); 200 201 KASSERT(chp->ch_queue != NULL); 202 203 chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp, 204 atabusprint); 205 } 206 207 /* 208 * ata_channel_detach: 209 * 210 * Common parts of detaching an atabus to an ATA controller channel. 211 */ 212 void 213 ata_channel_detach(struct ata_channel *chp) 214 { 215 if (chp->ch_flags & ATACH_DISABLED) 216 return; 217 218 ata_channel_destroy(chp); 219 220 chp->ch_flags |= ATACH_DETACHED; 221 } 222 223 static void 224 atabusconfig(struct atabus_softc *atabus_sc) 225 { 226 struct ata_channel *chp = atabus_sc->sc_chan; 227 struct atac_softc *atac = chp->ch_atac; 228 struct atabus_initq *atabus_initq = NULL; 229 int i, error; 230 231 /* we are in the atabus's thread context */ 232 ata_channel_lock(chp); 233 chp->ch_flags |= ATACH_TH_RUN; 234 ata_channel_unlock(chp); 235 236 /* 237 * Probe for the drives attached to controller, unless a PMP 238 * is already known 239 */ 240 /* XXX for SATA devices we will power up all drives at once */ 241 if (chp->ch_satapmp_nports == 0) 242 (*atac->atac_probe)(chp); 243 244 if (chp->ch_ndrives >= 2) { 245 ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n", 246 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type), 247 DEBUG_PROBE); 248 } 249 250 /* next operations will occurs in a separate thread */ 251 ata_channel_lock(chp); 252 chp->ch_flags &= ~ATACH_TH_RUN; 253 ata_channel_unlock(chp); 254 255 /* Make sure the devices probe in atabus order to avoid jitter. */ 256 mutex_enter(&atabus_qlock); 257 for (;;) { 258 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 259 if (atabus_initq->atabus_sc == atabus_sc) 260 break; 261 cv_wait(&atabus_qcv, &atabus_qlock); 262 } 263 mutex_exit(&atabus_qlock); 264 265 ata_channel_lock(chp); 266 267 /* If no drives, abort here */ 268 if (chp->ch_drive == NULL) 269 goto out; 270 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 271 for (i = 0; i < chp->ch_ndrives; i++) 272 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) 273 break; 274 if (i == chp->ch_ndrives) 275 goto out; 276 277 /* Shortcut in case we've been shutdown */ 278 if (chp->ch_flags & ATACH_SHUTDOWN) 279 goto out; 280 281 ata_channel_unlock(chp); 282 283 if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread, 284 atabus_sc, &atabus_cfg_lwp, 285 "%scnf", device_xname(atac->atac_dev))) != 0) 286 aprint_error_dev(atac->atac_dev, 287 "unable to create config thread: error %d\n", error); 288 return; 289 290 out: 291 ata_channel_unlock(chp); 292 293 mutex_enter(&atabus_qlock); 294 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 295 cv_broadcast(&atabus_qcv); 296 mutex_exit(&atabus_qlock); 297 298 kmem_free(atabus_initq, sizeof(*atabus_initq)); 299 300 ata_delref(chp); 301 302 config_pending_decr(atac->atac_dev); 303 } 304 305 /* 306 * atabus_configthread: finish attach of atabus's childrens, in a separate 307 * kernel thread. 308 */ 309 static void 310 atabusconfig_thread(void *arg) 311 { 312 struct atabus_softc *atabus_sc = arg; 313 struct ata_channel *chp = atabus_sc->sc_chan; 314 struct atac_softc *atac = chp->ch_atac; 315 struct atabus_initq *atabus_initq = NULL; 316 int i, s; 317 318 /* XXX seems wrong */ 319 mutex_enter(&atabus_qlock); 320 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 321 KASSERT(atabus_initq->atabus_sc == atabus_sc); 322 mutex_exit(&atabus_qlock); 323 324 /* 325 * First look for a port multiplier 326 */ 327 if (chp->ch_ndrives == PMP_MAX_DRIVES && 328 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 329 #if NSATA_PMP > 0 330 satapmp_attach(chp); 331 #else 332 aprint_error_dev(atabus_sc->sc_dev, 333 "SATA port multiplier not supported\n"); 334 /* no problems going on, all drives are ATA_DRIVET_NONE */ 335 #endif 336 } 337 338 /* 339 * Attach an ATAPI bus, if needed. 340 */ 341 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 342 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) { 343 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) { 344 #if NATAPIBUS > 0 345 (*atac->atac_atapibus_attach)(atabus_sc); 346 #else 347 /* 348 * Fake the autoconfig "not configured" message 349 */ 350 aprint_normal("atapibus at %s not configured\n", 351 device_xname(atac->atac_dev)); 352 chp->atapibus = NULL; 353 s = splbio(); 354 for (i = 0; i < chp->ch_ndrives; i++) { 355 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 356 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 357 } 358 splx(s); 359 #endif 360 break; 361 } 362 } 363 364 for (i = 0; i < chp->ch_ndrives; i++) { 365 struct ata_device adev; 366 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA && 367 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) { 368 continue; 369 } 370 if (chp->ch_drive[i].drv_softc != NULL) 371 continue; 372 memset(&adev, 0, sizeof(struct ata_device)); 373 adev.adev_bustype = atac->atac_bustype_ata; 374 adev.adev_channel = chp->ch_channel; 375 adev.adev_drv_data = &chp->ch_drive[i]; 376 chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev, 377 "ata_hl", &adev, ataprint); 378 if (chp->ch_drive[i].drv_softc != NULL) { 379 ata_probe_caps(&chp->ch_drive[i]); 380 } else { 381 s = splbio(); 382 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 383 splx(s); 384 } 385 } 386 387 /* now that we know the drives, the controller can set its modes */ 388 if (atac->atac_set_modes) { 389 (*atac->atac_set_modes)(chp); 390 ata_print_modes(chp); 391 } 392 #if NATARAID > 0 393 if (atac->atac_cap & ATAC_CAP_RAID) { 394 for (i = 0; i < chp->ch_ndrives; i++) { 395 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) { 396 ata_raid_check_component( 397 chp->ch_drive[i].drv_softc); 398 } 399 } 400 } 401 #endif /* NATARAID > 0 */ 402 403 /* 404 * reset drive_flags for unattached devices, reset state for attached 405 * ones 406 */ 407 s = splbio(); 408 for (i = 0; i < chp->ch_ndrives; i++) { 409 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 410 continue; 411 if (chp->ch_drive[i].drv_softc == NULL) { 412 chp->ch_drive[i].drive_flags = 0; 413 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 414 } else 415 chp->ch_drive[i].state = 0; 416 } 417 splx(s); 418 419 mutex_enter(&atabus_qlock); 420 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 421 cv_broadcast(&atabus_qcv); 422 mutex_exit(&atabus_qlock); 423 424 kmem_free(atabus_initq, sizeof(*atabus_initq)); 425 426 ata_delref(chp); 427 428 config_pending_decr(atac->atac_dev); 429 kthread_exit(0); 430 } 431 432 /* 433 * atabus_thread: 434 * 435 * Worker thread for the ATA bus. 436 */ 437 static void 438 atabus_thread(void *arg) 439 { 440 struct atabus_softc *sc = arg; 441 struct ata_channel *chp = sc->sc_chan; 442 struct ata_queue *chq = chp->ch_queue; 443 struct ata_xfer *xfer; 444 int i, rv; 445 446 ata_channel_lock(chp); 447 chp->ch_flags |= ATACH_TH_RUN; 448 449 /* 450 * Probe the drives. Reset type to indicate to controllers 451 * that can re-probe that all drives must be probed.. 452 * 453 * Note: ch_ndrives may be changed during the probe. 454 */ 455 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 456 for (i = 0; i < chp->ch_ndrives; i++) { 457 chp->ch_drive[i].drive_flags = 0; 458 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 459 } 460 ata_channel_unlock(chp); 461 462 atabusconfig(sc); 463 464 ata_channel_lock(chp); 465 for (;;) { 466 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET 467 | ATACH_TH_RECOVERY | ATACH_SHUTDOWN)) == 0 && 468 (chq->queue_active == 0 || chq->queue_freeze == 0)) { 469 chp->ch_flags &= ~ATACH_TH_RUN; 470 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 471 chp->ch_flags |= ATACH_TH_RUN; 472 } 473 if (chp->ch_flags & ATACH_SHUTDOWN) { 474 break; 475 } 476 if (chp->ch_flags & ATACH_TH_RESCAN) { 477 chp->ch_flags &= ~ATACH_TH_RESCAN; 478 ata_channel_unlock(chp); 479 atabusconfig(sc); 480 ata_channel_lock(chp); 481 } 482 if (chp->ch_flags & ATACH_TH_RESET) { 483 /* this will unfreeze the channel */ 484 ata_thread_run(chp, AT_WAIT, 485 ATACH_TH_RESET, ATACH_NODRIVE); 486 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) { 487 /* this will unfreeze the channel */ 488 for (i = 0; i < chp->ch_ndrives; i++) { 489 struct ata_drive_datas *drvp; 490 491 drvp = &chp->ch_drive[i]; 492 493 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 494 ata_thread_run(chp, 495 AT_WAIT, ATACH_TH_DRIVE_RESET, i); 496 } 497 } 498 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET; 499 } else if (chp->ch_flags & ATACH_TH_RECOVERY) { 500 /* 501 * This will unfreeze the channel; drops locks during 502 * run, so must wrap in splbio()/splx() to avoid 503 * spurious interrupts. XXX MPSAFE 504 */ 505 int s = splbio(); 506 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY, 507 chp->recovery_tfd); 508 splx(s); 509 } else if (chq->queue_active > 0 && chq->queue_freeze == 1) { 510 /* 511 * Caller has bumped queue_freeze, decrease it. This 512 * flow shalt never be executed for NCQ commands. 513 */ 514 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 515 KASSERT(chq->queue_active == 1); 516 517 ata_channel_thaw_locked(chp); 518 xfer = ata_queue_get_active_xfer_locked(chp); 519 520 KASSERT(xfer != NULL); 521 KASSERT((xfer->c_flags & C_POLL) == 0); 522 523 switch ((rv = ata_xfer_start(xfer))) { 524 case ATASTART_STARTED: 525 case ATASTART_POLL: 526 case ATASTART_ABORT: 527 break; 528 case ATASTART_TH: 529 default: 530 panic("%s: ata_xfer_start() unexpected rv %d", 531 __func__, rv); 532 /* NOTREACHED */ 533 } 534 } else if (chq->queue_freeze > 1) 535 panic("%s: queue_freeze", __func__); 536 537 /* Try to run down the queue once channel is unfrozen */ 538 if (chq->queue_freeze == 0) { 539 ata_channel_unlock(chp); 540 atastart(chp); 541 ata_channel_lock(chp); 542 } 543 } 544 chp->ch_thread = NULL; 545 cv_signal(&chp->ch_thr_idle); 546 ata_channel_unlock(chp); 547 kthread_exit(0); 548 } 549 550 static void 551 ata_thread_wake_locked(struct ata_channel *chp) 552 { 553 KASSERT(mutex_owned(&chp->ch_lock)); 554 ata_channel_freeze_locked(chp); 555 cv_signal(&chp->ch_thr_idle); 556 } 557 558 /* 559 * atabus_match: 560 * 561 * Autoconfiguration match routine. 562 */ 563 static int 564 atabus_match(device_t parent, cfdata_t cf, void *aux) 565 { 566 struct ata_channel *chp = aux; 567 568 if (chp == NULL) 569 return (0); 570 571 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel && 572 cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT) 573 return (0); 574 575 return (1); 576 } 577 578 /* 579 * atabus_attach: 580 * 581 * Autoconfiguration attach routine. 582 */ 583 static void 584 atabus_attach(device_t parent, device_t self, void *aux) 585 { 586 struct atabus_softc *sc = device_private(self); 587 struct ata_channel *chp = aux; 588 struct atabus_initq *initq; 589 int error; 590 591 sc->sc_chan = chp; 592 593 aprint_normal("\n"); 594 aprint_naive("\n"); 595 596 sc->sc_dev = self; 597 598 if (ata_addref(chp)) 599 return; 600 601 RUN_ONCE(&ata_init_ctrl, atabus_init); 602 603 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 604 initq->atabus_sc = sc; 605 mutex_enter(&atabus_qlock); 606 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 607 mutex_exit(&atabus_qlock); 608 config_pending_incr(sc->sc_dev); 609 610 /* XXX MPSAFE - no KTHREAD_MPSAFE, so protected by KERNEL_LOCK() */ 611 if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc, 612 &chp->ch_thread, "%s", device_xname(self))) != 0) 613 aprint_error_dev(self, 614 "unable to create kernel thread: error %d\n", error); 615 616 if (!pmf_device_register(self, atabus_suspend, atabus_resume)) 617 aprint_error_dev(self, "couldn't establish power handler\n"); 618 } 619 620 /* 621 * atabus_detach: 622 * 623 * Autoconfiguration detach routine. 624 */ 625 static int 626 atabus_detach(device_t self, int flags) 627 { 628 struct atabus_softc *sc = device_private(self); 629 struct ata_channel *chp = sc->sc_chan; 630 device_t dev = NULL; 631 int i, error = 0; 632 633 /* 634 * Detach atapibus and its children. 635 */ 636 if ((dev = chp->atapibus) != NULL) { 637 ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n", 638 device_xname(self), device_xname(dev)), DEBUG_DETACH); 639 640 error = config_detach(dev, flags); 641 if (error) 642 goto out; 643 KASSERT(chp->atapibus == NULL); 644 } 645 646 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 647 648 /* 649 * Detach our other children. 650 */ 651 for (i = 0; i < chp->ch_ndrives; i++) { 652 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 653 continue; 654 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 655 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 656 if ((dev = chp->ch_drive[i].drv_softc) != NULL) { 657 ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__, 658 __LINE__, device_xname(self), device_xname(dev)), 659 DEBUG_DETACH); 660 error = config_detach(dev, flags); 661 if (error) 662 goto out; 663 KASSERT(chp->ch_drive[i].drv_softc == NULL); 664 KASSERT(chp->ch_drive[i].drive_type == 0); 665 } 666 } 667 668 /* Shutdown the channel. */ 669 ata_channel_lock(chp); 670 chp->ch_flags |= ATACH_SHUTDOWN; 671 while (chp->ch_thread != NULL) { 672 cv_signal(&chp->ch_thr_idle); 673 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 674 } 675 ata_channel_unlock(chp); 676 677 atabus_free_drives(chp); 678 679 out: 680 #ifdef ATADEBUG 681 if (dev != NULL && error != 0) 682 ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__, 683 device_xname(self), error, device_xname(dev)), 684 DEBUG_DETACH); 685 #endif /* ATADEBUG */ 686 687 return (error); 688 } 689 690 void 691 atabus_childdetached(device_t self, device_t child) 692 { 693 bool found = false; 694 struct atabus_softc *sc = device_private(self); 695 struct ata_channel *chp = sc->sc_chan; 696 int i; 697 698 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 699 /* 700 * atapibus detached. 701 */ 702 if (child == chp->atapibus) { 703 chp->atapibus = NULL; 704 found = true; 705 for (i = 0; i < chp->ch_ndrives; i++) { 706 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI) 707 continue; 708 KASSERT(chp->ch_drive[i].drv_softc != NULL); 709 chp->ch_drive[i].drv_softc = NULL; 710 chp->ch_drive[i].drive_flags = 0; 711 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 712 } 713 } 714 715 /* 716 * Detach our other children. 717 */ 718 for (i = 0; i < chp->ch_ndrives; i++) { 719 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 720 continue; 721 if (child == chp->ch_drive[i].drv_softc) { 722 chp->ch_drive[i].drv_softc = NULL; 723 chp->ch_drive[i].drive_flags = 0; 724 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 725 chp->ch_satapmp_nports = 0; 726 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 727 found = true; 728 } 729 } 730 731 if (!found) 732 panic("%s: unknown child %p", device_xname(self), 733 (const void *)child); 734 } 735 736 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc), 737 atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan, 738 atabus_childdetached, DVF_DETACH_SHUTDOWN); 739 740 /***************************************************************************** 741 * Common ATA bus operations. 742 *****************************************************************************/ 743 744 /* allocate/free the channel's ch_drive[] array */ 745 int 746 atabus_alloc_drives(struct ata_channel *chp, int ndrives) 747 { 748 int i; 749 if (chp->ch_ndrives != ndrives) 750 atabus_free_drives(chp); 751 if (chp->ch_drive == NULL) { 752 chp->ch_drive = kmem_zalloc( 753 sizeof(struct ata_drive_datas) * ndrives, KM_NOSLEEP); 754 } 755 if (chp->ch_drive == NULL) { 756 aprint_error_dev(chp->ch_atac->atac_dev, 757 "can't alloc drive array\n"); 758 chp->ch_ndrives = 0; 759 return ENOMEM; 760 }; 761 for (i = 0; i < ndrives; i++) { 762 chp->ch_drive[i].chnl_softc = chp; 763 chp->ch_drive[i].drive = i; 764 } 765 chp->ch_ndrives = ndrives; 766 return 0; 767 } 768 769 void 770 atabus_free_drives(struct ata_channel *chp) 771 { 772 #ifdef DIAGNOSTIC 773 int i; 774 int dopanic = 0; 775 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 776 for (i = 0; i < chp->ch_ndrives; i++) { 777 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) { 778 printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n", 779 device_xname(chp->atabus), i, 780 chp->ch_drive[i].drive_type); 781 dopanic = 1; 782 } 783 if (chp->ch_drive[i].drv_softc != NULL) { 784 printf("%s: ch_drive[%d] attached to %s\n", 785 device_xname(chp->atabus), i, 786 device_xname(chp->ch_drive[i].drv_softc)); 787 dopanic = 1; 788 } 789 } 790 if (dopanic) 791 panic("atabus_free_drives"); 792 #endif 793 794 if (chp->ch_drive == NULL) 795 return; 796 kmem_free(chp->ch_drive, 797 sizeof(struct ata_drive_datas) * chp->ch_ndrives); 798 chp->ch_ndrives = 0; 799 chp->ch_drive = NULL; 800 } 801 802 /* Get the disk's parameters */ 803 int 804 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags, 805 struct ataparams *prms) 806 { 807 struct ata_xfer *xfer; 808 struct ata_channel *chp = drvp->chnl_softc; 809 struct atac_softc *atac = chp->ch_atac; 810 char *tb; 811 int i, rv; 812 uint16_t *p; 813 814 ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); 815 816 xfer = ata_get_xfer(chp, false); 817 if (xfer == NULL) { 818 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 819 DEBUG_FUNCS|DEBUG_PROBE); 820 return CMD_AGAIN; 821 } 822 823 tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP); 824 memset(prms, 0, sizeof(struct ataparams)); 825 826 if (drvp->drive_type == ATA_DRIVET_ATA) { 827 xfer->c_ata_c.r_command = WDCC_IDENTIFY; 828 xfer->c_ata_c.r_st_bmask = WDCS_DRDY; 829 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 830 xfer->c_ata_c.timeout = 3000; /* 3s */ 831 } else if (drvp->drive_type == ATA_DRIVET_ATAPI) { 832 xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE; 833 xfer->c_ata_c.r_st_bmask = 0; 834 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 835 xfer->c_ata_c.timeout = 10000; /* 10s */ 836 } else { 837 ATADEBUG_PRINT(("ata_get_parms: no disks\n"), 838 DEBUG_FUNCS|DEBUG_PROBE); 839 rv = CMD_ERR; 840 goto out; 841 } 842 xfer->c_ata_c.flags = AT_READ | flags; 843 xfer->c_ata_c.data = tb; 844 xfer->c_ata_c.bcount = ATA_BSIZE; 845 if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, 846 xfer) != ATACMD_COMPLETE) { 847 ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"), 848 DEBUG_FUNCS|DEBUG_PROBE); 849 rv = CMD_AGAIN; 850 goto out; 851 } 852 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 853 ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n", 854 xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE); 855 rv = CMD_ERR; 856 goto out; 857 } 858 /* if we didn't read any data something is wrong */ 859 if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) { 860 rv = CMD_ERR; 861 goto out; 862 } 863 864 /* Read in parameter block. */ 865 memcpy(prms, tb, sizeof(struct ataparams)); 866 867 /* 868 * Shuffle string byte order. 869 * ATAPI NEC, Mitsumi and Pioneer drives and 870 * old ATA TDK CompactFlash cards 871 * have different byte order. 872 */ 873 #if BYTE_ORDER == BIG_ENDIAN 874 # define M(n) prms->atap_model[(n) ^ 1] 875 #else 876 # define M(n) prms->atap_model[n] 877 #endif 878 if ( 879 #if BYTE_ORDER == BIG_ENDIAN 880 ! 881 #endif 882 ((drvp->drive_type == ATA_DRIVET_ATAPI) ? 883 ((M(0) == 'N' && M(1) == 'E') || 884 (M(0) == 'F' && M(1) == 'X') || 885 (M(0) == 'P' && M(1) == 'i')) : 886 ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) { 887 rv = CMD_OK; 888 goto out; 889 } 890 #undef M 891 for (i = 0; i < sizeof(prms->atap_model); i += 2) { 892 p = (uint16_t *)(prms->atap_model + i); 893 *p = bswap16(*p); 894 } 895 for (i = 0; i < sizeof(prms->atap_serial); i += 2) { 896 p = (uint16_t *)(prms->atap_serial + i); 897 *p = bswap16(*p); 898 } 899 for (i = 0; i < sizeof(prms->atap_revision); i += 2) { 900 p = (uint16_t *)(prms->atap_revision + i); 901 *p = bswap16(*p); 902 } 903 904 rv = CMD_OK; 905 out: 906 kmem_free(tb, ATA_BSIZE); 907 ata_free_xfer(chp, xfer); 908 return rv; 909 } 910 911 int 912 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags) 913 { 914 struct ata_xfer *xfer; 915 int rv; 916 struct ata_channel *chp = drvp->chnl_softc; 917 struct atac_softc *atac = chp->ch_atac; 918 919 ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS); 920 921 xfer = ata_get_xfer(chp, false); 922 if (xfer == NULL) { 923 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 924 DEBUG_FUNCS|DEBUG_PROBE); 925 return CMD_AGAIN; 926 } 927 928 xfer->c_ata_c.r_command = SET_FEATURES; 929 xfer->c_ata_c.r_st_bmask = 0; 930 xfer->c_ata_c.r_st_pmask = 0; 931 xfer->c_ata_c.r_features = WDSF_SET_MODE; 932 xfer->c_ata_c.r_count = mode; 933 xfer->c_ata_c.flags = flags; 934 xfer->c_ata_c.timeout = 1000; /* 1s */ 935 if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, 936 xfer) != ATACMD_COMPLETE) { 937 rv = CMD_AGAIN; 938 goto out; 939 } 940 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 941 rv = CMD_ERR; 942 goto out; 943 } 944 945 rv = CMD_OK; 946 947 out: 948 ata_free_xfer(chp, xfer); 949 return rv; 950 } 951 952 #if NATA_DMA 953 void 954 ata_dmaerr(struct ata_drive_datas *drvp, int flags) 955 { 956 ata_channel_lock_owned(drvp->chnl_softc); 957 958 /* 959 * Downgrade decision: if we get NERRS_MAX in NXFER. 960 * We start with n_dmaerrs set to NERRS_MAX-1 so that the 961 * first error within the first NXFER ops will immediatly trigger 962 * a downgrade. 963 * If we got an error and n_xfers is bigger than NXFER reset counters. 964 */ 965 drvp->n_dmaerrs++; 966 if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) { 967 ata_downgrade_mode(drvp, flags); 968 drvp->n_dmaerrs = NERRS_MAX-1; 969 drvp->n_xfers = 0; 970 return; 971 } 972 if (drvp->n_xfers > NXFER) { 973 drvp->n_dmaerrs = 1; /* just got an error */ 974 drvp->n_xfers = 1; /* restart counting from this error */ 975 } 976 } 977 #endif /* NATA_DMA */ 978 979 /* 980 * freeze the queue and wait for the controller to be idle. Caller has to 981 * unfreeze/restart the queue 982 */ 983 static void 984 ata_channel_idle(struct ata_channel *chp) 985 { 986 ata_channel_lock(chp); 987 ata_channel_freeze_locked(chp); 988 while (chp->ch_queue->queue_active > 0) { 989 chp->ch_queue->queue_flags |= QF_IDLE_WAIT; 990 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1); 991 } 992 ata_channel_unlock(chp); 993 } 994 995 /* 996 * Add a command to the queue and start controller. 997 * 998 * MUST BE CALLED AT splbio()! 999 */ 1000 void 1001 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1002 { 1003 1004 ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer, 1005 chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1006 1007 /* complete xfer setup */ 1008 xfer->c_chp = chp; 1009 1010 ata_channel_lock(chp); 1011 1012 /* 1013 * Standard commands are added to the end of command list, but 1014 * recovery commands must be run immediatelly. 1015 */ 1016 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1017 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, 1018 c_xferchain); 1019 else 1020 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer, 1021 c_xferchain); 1022 1023 /* 1024 * if polling and can sleep, wait for the xfer to be at head of queue 1025 */ 1026 if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) { 1027 while (chp->ch_queue->queue_active > 0 || 1028 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) { 1029 xfer->c_flags |= C_WAITACT; 1030 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock); 1031 xfer->c_flags &= ~C_WAITACT; 1032 } 1033 1034 /* 1035 * Free xfer now if it there was attempt to free it 1036 * while we were waiting. 1037 */ 1038 if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) { 1039 ata_channel_unlock(chp); 1040 1041 ata_free_xfer(chp, xfer); 1042 return; 1043 } 1044 } 1045 1046 ata_channel_unlock(chp); 1047 1048 ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n", 1049 chp->ch_flags), DEBUG_XFERS); 1050 atastart(chp); 1051 } 1052 1053 /* 1054 * Start I/O on a controller, for the given channel. 1055 * The first xfer may be not for our channel if the channel queues 1056 * are shared. 1057 * 1058 * MUST BE CALLED AT splbio()! 1059 * 1060 * XXX FIS-based switching with PMP 1061 * Currently atastart() never schedules concurrent NCQ transfers to more than 1062 * one drive, even when channel has several SATA drives attached via PMP. 1063 * To support concurrent transfers to different drives with PMP, it would be 1064 * necessary to implement FIS-based switching support in controller driver, 1065 * and then adjust error handling and recovery to stop assuming at most 1066 * one active drive. 1067 */ 1068 void 1069 atastart(struct ata_channel *chp) 1070 { 1071 struct atac_softc *atac = chp->ch_atac; 1072 struct ata_queue *chq = chp->ch_queue; 1073 struct ata_xfer *xfer, *axfer; 1074 bool skipq; 1075 1076 #ifdef ATA_DEBUG 1077 int spl1, spl2; 1078 1079 spl1 = splbio(); 1080 spl2 = splbio(); 1081 if (spl2 != spl1) { 1082 printf("atastart: not at splbio()\n"); 1083 panic("atastart"); 1084 } 1085 splx(spl2); 1086 splx(spl1); 1087 #endif /* ATA_DEBUG */ 1088 1089 ata_channel_lock(chp); 1090 1091 again: 1092 /* is there a xfer ? */ 1093 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) { 1094 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n", 1095 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1096 goto out; 1097 } 1098 1099 /* 1100 * if someone is waiting for the command to be active, wake it up 1101 * and let it process the command 1102 */ 1103 if (__predict_false(xfer->c_flags & C_WAITACT)) { 1104 ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d " 1105 "wait active\n", xfer, chp->ch_channel, xfer->c_drive), 1106 DEBUG_XFERS); 1107 cv_broadcast(&chp->ch_queue->c_active); 1108 goto out; 1109 } 1110 1111 skipq = ISSET(xfer->c_flags, C_SKIP_QUEUE); 1112 1113 /* is the queue frozen? */ 1114 if (__predict_false(!skipq && chq->queue_freeze > 0)) { 1115 if (chq->queue_flags & QF_IDLE_WAIT) { 1116 chq->queue_flags &= ~QF_IDLE_WAIT; 1117 cv_signal(&chp->ch_queue->queue_idle); 1118 } 1119 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d " 1120 "queue frozen: %d\n", 1121 __func__, chp, chp->ch_channel, xfer->c_drive, 1122 chq->queue_freeze), 1123 DEBUG_XFERS); 1124 goto out; 1125 } 1126 1127 /* all xfers on same queue must belong to the same channel */ 1128 KASSERT(xfer->c_chp == chp); 1129 1130 /* 1131 * Can only take the command if there are no current active 1132 * commands, or if the command is NCQ and the active commands are also 1133 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based 1134 * switching, can only send commands to single drive. 1135 * Need only check first xfer. 1136 * XXX FIS-based switching - revisit 1137 */ 1138 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) { 1139 if (!ISSET(xfer->c_flags, C_NCQ) || 1140 !ISSET(axfer->c_flags, C_NCQ) || 1141 xfer->c_drive != axfer->c_drive) 1142 goto out; 1143 } 1144 1145 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive]; 1146 1147 /* 1148 * Are we on limit of active xfers ? If the queue has more 1149 * than 1 openings, we keep one slot reserved for recovery or dump. 1150 */ 1151 KASSERT(chq->queue_active <= chq->queue_openings); 1152 const uint8_t chq_openings = (!skipq && chq->queue_openings > 1) 1153 ? (chq->queue_openings - 1) : chq->queue_openings; 1154 const uint8_t drv_openings = ISSET(xfer->c_flags, C_NCQ) 1155 ? drvp->drv_openings : ATA_MAX_OPENINGS; 1156 if (chq->queue_active >= MIN(chq_openings, drv_openings)) { 1157 if (skipq) { 1158 panic("%s: channel %d busy, xfer not possible", 1159 __func__, chp->ch_channel); 1160 } 1161 1162 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n", 1163 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1164 goto out; 1165 } 1166 1167 /* Slot allocation can fail if drv_openings < ch_openings */ 1168 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings)) 1169 goto out; 1170 1171 if (__predict_false(atac->atac_claim_hw)) { 1172 if (!atac->atac_claim_hw(chp, 0)) { 1173 ata_queue_free_slot(chp, xfer->c_slot); 1174 goto out; 1175 } 1176 } 1177 1178 /* Now committed to start the xfer */ 1179 1180 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n", 1181 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1182 if (drvp->drive_flags & ATA_DRIVE_RESET) { 1183 drvp->drive_flags &= ~ATA_DRIVE_RESET; 1184 drvp->state = 0; 1185 } 1186 1187 if (ISSET(xfer->c_flags, C_NCQ)) 1188 SET(chp->ch_flags, ATACH_NCQ); 1189 else 1190 CLR(chp->ch_flags, ATACH_NCQ); 1191 1192 SIMPLEQ_REMOVE_HEAD(&chq->queue_xfer, c_xferchain); 1193 1194 ata_activate_xfer_locked(chp, xfer); 1195 1196 if (atac->atac_cap & ATAC_CAP_NOIRQ) 1197 KASSERT(xfer->c_flags & C_POLL); 1198 1199 switch (ata_xfer_start(xfer)) { 1200 case ATASTART_TH: 1201 case ATASTART_ABORT: 1202 /* don't start any further commands in this case */ 1203 goto out; 1204 default: 1205 /* nothing to do */ 1206 break; 1207 } 1208 1209 /* Queue more commands if possible, but not during recovery or dump */ 1210 if (!skipq && chq->queue_active < chq->queue_openings) 1211 goto again; 1212 1213 out: 1214 ata_channel_unlock(chp); 1215 } 1216 1217 int 1218 ata_xfer_start(struct ata_xfer *xfer) 1219 { 1220 struct ata_channel *chp = xfer->c_chp; 1221 int rv; 1222 1223 KASSERT(mutex_owned(&chp->ch_lock)); 1224 1225 rv = xfer->ops->c_start(chp, xfer); 1226 switch (rv) { 1227 case ATASTART_STARTED: 1228 /* nothing to do */ 1229 break; 1230 case ATASTART_TH: 1231 /* postpone xfer to thread */ 1232 ata_thread_wake_locked(chp); 1233 break; 1234 case ATASTART_POLL: 1235 /* can happen even in thread context for some ATAPI devices */ 1236 ata_channel_unlock(chp); 1237 KASSERT(xfer->ops != NULL && xfer->ops->c_poll != NULL); 1238 xfer->ops->c_poll(chp, xfer); 1239 ata_channel_lock(chp); 1240 break; 1241 case ATASTART_ABORT: 1242 ata_channel_unlock(chp); 1243 KASSERT(xfer->ops != NULL && xfer->ops->c_abort != NULL); 1244 xfer->ops->c_abort(chp, xfer); 1245 ata_channel_lock(chp); 1246 break; 1247 } 1248 1249 return rv; 1250 } 1251 1252 static void 1253 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer) 1254 { 1255 struct ata_queue * const chq = chp->ch_queue; 1256 1257 KASSERT(mutex_owned(&chp->ch_lock)); 1258 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); 1259 1260 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1261 TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain); 1262 else { 1263 /* 1264 * Must go to head, so that ata_queue_get_active_xfer() 1265 * returns the recovery command, and not some other 1266 * random active transfer. 1267 */ 1268 TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain); 1269 } 1270 chq->active_xfers_used |= __BIT(xfer->c_slot); 1271 chq->queue_active++; 1272 } 1273 1274 /* 1275 * Does it's own locking, does not require splbio(). 1276 * flags - whether to block waiting for free xfer 1277 */ 1278 struct ata_xfer * 1279 ata_get_xfer(struct ata_channel *chp, bool waitok) 1280 { 1281 return pool_get(&ata_xfer_pool, 1282 PR_ZERO | (waitok ? PR_WAITOK : PR_NOWAIT)); 1283 } 1284 1285 /* 1286 * ata_deactivate_xfer() must be always called prior to ata_free_xfer() 1287 */ 1288 void 1289 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1290 { 1291 struct ata_queue *chq = chp->ch_queue; 1292 1293 ata_channel_lock(chp); 1294 1295 if (__predict_false(xfer->c_flags & (C_WAITACT|C_WAITTIMO))) { 1296 /* Someone is waiting for this xfer, so we can't free now */ 1297 xfer->c_flags |= C_FREE; 1298 cv_broadcast(&chq->c_active); 1299 ata_channel_unlock(chp); 1300 return; 1301 } 1302 1303 /* XXX move PIOBM and free_gw to deactivate? */ 1304 #if NATA_PIOBM /* XXX wdc dependent code */ 1305 if (__predict_false(xfer->c_flags & C_PIOBM)) { 1306 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1307 1308 /* finish the busmastering PIO */ 1309 (*wdc->piobm_done)(wdc->dma_arg, 1310 chp->ch_channel, xfer->c_drive); 1311 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT); 1312 } 1313 #endif 1314 1315 if (__predict_false(chp->ch_atac->atac_free_hw)) 1316 chp->ch_atac->atac_free_hw(chp); 1317 1318 ata_channel_unlock(chp); 1319 1320 if (__predict_true(!ISSET(xfer->c_flags, C_PRIVATE_ALLOC))) 1321 pool_put(&ata_xfer_pool, xfer); 1322 } 1323 1324 void 1325 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1326 { 1327 struct ata_queue * const chq = chp->ch_queue; 1328 1329 ata_channel_lock(chp); 1330 1331 KASSERT(chq->queue_active > 0); 1332 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0); 1333 1334 /* Stop only when this is last active xfer */ 1335 if (chq->queue_active == 1) 1336 callout_stop(&chp->c_timo_callout); 1337 1338 if (callout_invoking(&chp->c_timo_callout)) 1339 xfer->c_flags |= C_WAITTIMO; 1340 1341 TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain); 1342 chq->active_xfers_used &= ~__BIT(xfer->c_slot); 1343 chq->queue_active--; 1344 1345 ata_queue_free_slot(chp, xfer->c_slot); 1346 1347 if (xfer->c_flags & C_WAIT) 1348 cv_broadcast(&chq->c_cmd_finish); 1349 1350 ata_channel_unlock(chp); 1351 } 1352 1353 /* 1354 * Called in c_intr hook. Must be called before before any deactivations 1355 * are done - if there is drain pending, it calls c_kill_xfer hook which 1356 * deactivates the xfer. 1357 * Calls c_kill_xfer with channel lock free. 1358 * Returns true if caller should just exit without further processing. 1359 * Caller must not further access any part of xfer or any related controller 1360 * structures in that case, it should just return. 1361 */ 1362 bool 1363 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer) 1364 { 1365 int drive = xfer->c_drive; 1366 bool draining = false; 1367 1368 ata_channel_lock(chp); 1369 1370 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1371 ata_channel_unlock(chp); 1372 1373 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE); 1374 1375 ata_channel_lock(chp); 1376 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1377 cv_signal(&chp->ch_queue->queue_drain); 1378 draining = true; 1379 } 1380 1381 ata_channel_unlock(chp); 1382 1383 return draining; 1384 } 1385 1386 /* 1387 * Check for race of normal transfer handling vs. timeout. 1388 */ 1389 bool 1390 ata_timo_xfer_check(struct ata_xfer *xfer) 1391 { 1392 struct ata_channel *chp = xfer->c_chp; 1393 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1394 1395 ata_channel_lock(chp); 1396 1397 if (xfer->c_flags & C_WAITTIMO) { 1398 xfer->c_flags &= ~C_WAITTIMO; 1399 1400 /* Handle race vs. ata_free_xfer() */ 1401 if (xfer->c_flags & C_FREE) { 1402 xfer->c_flags &= ~C_FREE; 1403 ata_channel_unlock(chp); 1404 1405 device_printf(drvp->drv_softc, 1406 "xfer %"PRIxPTR" freed while invoking timeout\n", 1407 (intptr_t)xfer & PAGE_MASK); 1408 1409 ata_free_xfer(chp, xfer); 1410 return true; 1411 } 1412 1413 /* Race vs. callout_stop() in ata_deactivate_xfer() */ 1414 ata_channel_unlock(chp); 1415 1416 device_printf(drvp->drv_softc, 1417 "xfer %"PRIxPTR" deactivated while invoking timeout\n", 1418 (intptr_t)xfer & PAGE_MASK); 1419 return true; 1420 } 1421 1422 ata_channel_unlock(chp); 1423 1424 /* No race, proceed with timeout handling */ 1425 return false; 1426 } 1427 1428 /* 1429 * Kill off all active xfers for a ata_channel. 1430 * 1431 * Must be called with channel lock held. 1432 */ 1433 void 1434 ata_kill_active(struct ata_channel *chp, int reason, int flags) 1435 { 1436 struct ata_queue * const chq = chp->ch_queue; 1437 struct ata_xfer *xfer, *xfernext; 1438 1439 KASSERT(mutex_owned(&chp->ch_lock)); 1440 1441 TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) { 1442 ata_channel_unlock(chp); 1443 xfer->ops->c_kill_xfer(xfer->c_chp, xfer, reason); 1444 ata_channel_lock(chp); 1445 } 1446 } 1447 1448 /* 1449 * Kill off all pending xfers for a drive. 1450 */ 1451 void 1452 ata_kill_pending(struct ata_drive_datas *drvp) 1453 { 1454 struct ata_channel * const chp = drvp->chnl_softc; 1455 struct ata_queue * const chq = chp->ch_queue; 1456 struct ata_xfer *xfer; 1457 1458 ata_channel_lock(chp); 1459 1460 /* Kill all pending transfers */ 1461 while ((xfer = SIMPLEQ_FIRST(&chq->queue_xfer))) { 1462 KASSERT(xfer->c_chp == chp); 1463 1464 if (xfer->c_drive != drvp->drive) 1465 continue; 1466 1467 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain); 1468 1469 /* 1470 * Keep the lock, so that we get deadlock (and 'locking against 1471 * myself' with LOCKDEBUG), instead of silent 1472 * data corruption, if the hook tries to call back into 1473 * middle layer for inactive xfer. 1474 */ 1475 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE); 1476 } 1477 1478 /* Wait until all active transfers on the drive finish */ 1479 while (chq->queue_active > 0) { 1480 bool drv_active = false; 1481 1482 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { 1483 KASSERT(xfer->c_chp == chp); 1484 1485 if (xfer->c_drive == drvp->drive) { 1486 drv_active = true; 1487 break; 1488 } 1489 } 1490 1491 if (!drv_active) { 1492 /* all finished */ 1493 break; 1494 } 1495 1496 drvp->drive_flags |= ATA_DRIVE_WAITDRAIN; 1497 cv_wait(&chq->queue_drain, &chp->ch_lock); 1498 } 1499 1500 ata_channel_unlock(chp); 1501 } 1502 1503 static void 1504 ata_channel_freeze_locked(struct ata_channel *chp) 1505 { 1506 chp->ch_queue->queue_freeze++; 1507 1508 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1509 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1510 } 1511 1512 void 1513 ata_channel_freeze(struct ata_channel *chp) 1514 { 1515 ata_channel_lock(chp); 1516 ata_channel_freeze_locked(chp); 1517 ata_channel_unlock(chp); 1518 } 1519 1520 void 1521 ata_channel_thaw_locked(struct ata_channel *chp) 1522 { 1523 KASSERT(mutex_owned(&chp->ch_lock)); 1524 KASSERT(chp->ch_queue->queue_freeze > 0); 1525 1526 chp->ch_queue->queue_freeze--; 1527 1528 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1529 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1530 } 1531 1532 /* 1533 * ata_thread_run: 1534 * 1535 * Reset and ATA channel. Channel lock must be held. arg is type-specific. 1536 */ 1537 void 1538 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg) 1539 { 1540 struct atac_softc *atac = chp->ch_atac; 1541 bool threset = false; 1542 struct ata_drive_datas *drvp; 1543 1544 ata_channel_lock_owned(chp); 1545 1546 /* 1547 * If we can poll or wait it's OK, otherwise wake up the 1548 * kernel thread to do it for us. 1549 */ 1550 ATADEBUG_PRINT(("%s flags 0x%x ch_flags 0x%x\n", 1551 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS); 1552 if ((flags & (AT_POLL | AT_WAIT)) == 0) { 1553 switch (type) { 1554 case ATACH_TH_RESET: 1555 if (chp->ch_flags & ATACH_TH_RESET) { 1556 /* No need to schedule another reset */ 1557 return; 1558 } 1559 break; 1560 case ATACH_TH_DRIVE_RESET: 1561 { 1562 int drive = arg; 1563 1564 KASSERT(drive <= chp->ch_ndrives); 1565 drvp = &chp->ch_drive[drive]; 1566 1567 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 1568 /* No need to schedule another reset */ 1569 return; 1570 } 1571 drvp->drive_flags |= ATA_DRIVE_TH_RESET; 1572 break; 1573 } 1574 case ATACH_TH_RECOVERY: 1575 { 1576 uint32_t tfd = (uint32_t)arg; 1577 1578 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1579 chp->recovery_tfd = tfd; 1580 break; 1581 } 1582 default: 1583 panic("%s: unknown type: %x", __func__, type); 1584 /* NOTREACHED */ 1585 } 1586 1587 /* 1588 * Block execution of other commands while reset is scheduled 1589 * to a thread. 1590 */ 1591 ata_channel_freeze_locked(chp); 1592 chp->ch_flags |= type; 1593 1594 cv_signal(&chp->ch_thr_idle); 1595 return; 1596 } 1597 1598 /* Block execution of other commands during reset */ 1599 ata_channel_freeze_locked(chp); 1600 1601 /* 1602 * If reset has been scheduled to a thread, then clear 1603 * the flag now so that the thread won't try to execute it if 1604 * we happen to sleep, and thaw one more time after the reset. 1605 */ 1606 if (chp->ch_flags & type) { 1607 chp->ch_flags &= ~type; 1608 threset = true; 1609 } 1610 1611 switch (type) { 1612 case ATACH_TH_RESET: 1613 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags); 1614 1615 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1616 for (int drive = 0; drive < chp->ch_ndrives; drive++) 1617 chp->ch_drive[drive].state = 0; 1618 break; 1619 1620 case ATACH_TH_DRIVE_RESET: 1621 { 1622 int drive = arg; 1623 1624 KASSERT(drive <= chp->ch_ndrives); 1625 drvp = &chp->ch_drive[drive]; 1626 (*atac->atac_bustype_ata->ata_reset_drive)(drvp, flags, NULL); 1627 drvp->state = 0; 1628 break; 1629 } 1630 1631 case ATACH_TH_RECOVERY: 1632 { 1633 uint32_t tfd = (uint32_t)arg; 1634 1635 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1636 KASSERT(atac->atac_bustype_ata->ata_recovery != NULL); 1637 1638 SET(chp->ch_flags, ATACH_RECOVERING); 1639 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd); 1640 CLR(chp->ch_flags, ATACH_RECOVERING); 1641 break; 1642 } 1643 1644 default: 1645 panic("%s: unknown type: %x", __func__, type); 1646 /* NOTREACHED */ 1647 } 1648 1649 /* 1650 * Thaw one extra time to clear the freeze done when the reset has 1651 * been scheduled to the thread. 1652 */ 1653 if (threset) 1654 ata_channel_thaw_locked(chp); 1655 1656 /* Allow commands to run again */ 1657 ata_channel_thaw_locked(chp); 1658 1659 /* Signal the thread in case there is an xfer to run */ 1660 cv_signal(&chp->ch_thr_idle); 1661 } 1662 1663 int 1664 ata_addref(struct ata_channel *chp) 1665 { 1666 struct atac_softc *atac = chp->ch_atac; 1667 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1668 int s, error = 0; 1669 1670 s = splbio(); 1671 if (adapt->adapt_refcnt++ == 0 && 1672 adapt->adapt_enable != NULL) { 1673 error = (*adapt->adapt_enable)(atac->atac_dev, 1); 1674 if (error) 1675 adapt->adapt_refcnt--; 1676 } 1677 splx(s); 1678 return (error); 1679 } 1680 1681 void 1682 ata_delref(struct ata_channel *chp) 1683 { 1684 struct atac_softc *atac = chp->ch_atac; 1685 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1686 int s; 1687 1688 s = splbio(); 1689 if (adapt->adapt_refcnt-- == 1 && 1690 adapt->adapt_enable != NULL) 1691 (void) (*adapt->adapt_enable)(atac->atac_dev, 0); 1692 splx(s); 1693 } 1694 1695 void 1696 ata_print_modes(struct ata_channel *chp) 1697 { 1698 struct atac_softc *atac = chp->ch_atac; 1699 int drive; 1700 struct ata_drive_datas *drvp; 1701 1702 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1703 for (drive = 0; drive < chp->ch_ndrives; drive++) { 1704 drvp = &chp->ch_drive[drive]; 1705 if (drvp->drive_type == ATA_DRIVET_NONE || 1706 drvp->drv_softc == NULL) 1707 continue; 1708 aprint_verbose("%s(%s:%d:%d): using PIO mode %d", 1709 device_xname(drvp->drv_softc), 1710 device_xname(atac->atac_dev), 1711 chp->ch_channel, drvp->drive, drvp->PIO_mode); 1712 #if NATA_DMA 1713 if (drvp->drive_flags & ATA_DRIVE_DMA) 1714 aprint_verbose(", DMA mode %d", drvp->DMA_mode); 1715 #if NATA_UDMA 1716 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 1717 aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode); 1718 if (drvp->UDMA_mode == 2) 1719 aprint_verbose(" (Ultra/33)"); 1720 else if (drvp->UDMA_mode == 4) 1721 aprint_verbose(" (Ultra/66)"); 1722 else if (drvp->UDMA_mode == 5) 1723 aprint_verbose(" (Ultra/100)"); 1724 else if (drvp->UDMA_mode == 6) 1725 aprint_verbose(" (Ultra/133)"); 1726 } 1727 #endif /* NATA_UDMA */ 1728 #endif /* NATA_DMA */ 1729 #if NATA_DMA || NATA_PIOBM 1730 if (0 1731 #if NATA_DMA 1732 || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) 1733 #endif 1734 #if NATA_PIOBM 1735 /* PIOBM capable controllers use DMA for PIO commands */ 1736 || (atac->atac_cap & ATAC_CAP_PIOBM) 1737 #endif 1738 ) 1739 aprint_verbose(" (using DMA)"); 1740 1741 if (drvp->drive_flags & ATA_DRIVE_NCQ) { 1742 aprint_verbose(", NCQ (%d tags)%s", 1743 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings), 1744 (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO) 1745 ? " w/PRIO" : ""); 1746 } else if (drvp->drive_flags & ATA_DRIVE_WFUA) 1747 aprint_verbose(", WRITE DMA FUA EXT"); 1748 1749 #endif /* NATA_DMA || NATA_PIOBM */ 1750 aprint_verbose("\n"); 1751 } 1752 } 1753 1754 #if NATA_DMA 1755 /* 1756 * downgrade the transfer mode of a drive after an error. return 1 if 1757 * downgrade was possible, 0 otherwise. 1758 * 1759 * MUST BE CALLED AT splbio()! 1760 */ 1761 int 1762 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags) 1763 { 1764 struct ata_channel *chp = drvp->chnl_softc; 1765 struct atac_softc *atac = chp->ch_atac; 1766 device_t drv_dev = drvp->drv_softc; 1767 int cf_flags = device_cfdata(drv_dev)->cf_flags; 1768 1769 ata_channel_lock_owned(drvp->chnl_softc); 1770 1771 /* if drive or controller don't know its mode, we can't do much */ 1772 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 || 1773 (atac->atac_set_modes == NULL)) 1774 return 0; 1775 /* current drive mode was set by a config flag, let it this way */ 1776 if ((cf_flags & ATA_CONFIG_PIO_SET) || 1777 (cf_flags & ATA_CONFIG_DMA_SET) || 1778 (cf_flags & ATA_CONFIG_UDMA_SET)) 1779 return 0; 1780 1781 #if NATA_UDMA 1782 /* 1783 * If we were using Ultra-DMA mode, downgrade to the next lower mode. 1784 */ 1785 if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) { 1786 drvp->UDMA_mode--; 1787 aprint_error_dev(drv_dev, 1788 "transfer error, downgrading to Ultra-DMA mode %d\n", 1789 drvp->UDMA_mode); 1790 } 1791 #endif 1792 1793 /* 1794 * If we were using ultra-DMA, don't downgrade to multiword DMA. 1795 */ 1796 else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { 1797 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 1798 drvp->PIO_mode = drvp->PIO_cap; 1799 aprint_error_dev(drv_dev, 1800 "transfer error, downgrading to PIO mode %d\n", 1801 drvp->PIO_mode); 1802 } else /* already using PIO, can't downgrade */ 1803 return 0; 1804 1805 (*atac->atac_set_modes)(chp); 1806 ata_print_modes(chp); 1807 /* reset the channel, which will schedule all drives for setup */ 1808 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE); 1809 return 1; 1810 } 1811 #endif /* NATA_DMA */ 1812 1813 /* 1814 * Probe drive's capabilities, for use by the controller later 1815 * Assumes drvp points to an existing drive. 1816 */ 1817 void 1818 ata_probe_caps(struct ata_drive_datas *drvp) 1819 { 1820 struct ataparams params, params2; 1821 struct ata_channel *chp = drvp->chnl_softc; 1822 struct atac_softc *atac = chp->ch_atac; 1823 device_t drv_dev = drvp->drv_softc; 1824 int i, printed = 0; 1825 const char *sep = ""; 1826 int cf_flags; 1827 1828 if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) { 1829 /* IDENTIFY failed. Can't tell more about the device */ 1830 return; 1831 } 1832 if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) == 1833 (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) { 1834 /* 1835 * Controller claims 16 and 32 bit transfers. 1836 * Re-do an IDENTIFY with 32-bit transfers, 1837 * and compare results. 1838 */ 1839 ata_channel_lock(chp); 1840 drvp->drive_flags |= ATA_DRIVE_CAP32; 1841 ata_channel_unlock(chp); 1842 ata_get_params(drvp, AT_WAIT, ¶ms2); 1843 if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) { 1844 /* Not good. fall back to 16bits */ 1845 ata_channel_lock(chp); 1846 drvp->drive_flags &= ~ATA_DRIVE_CAP32; 1847 ata_channel_unlock(chp); 1848 } else { 1849 aprint_verbose_dev(drv_dev, "32-bit data port\n"); 1850 } 1851 } 1852 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */ 1853 if (params.atap_ata_major > 0x01 && 1854 params.atap_ata_major != 0xffff) { 1855 for (i = 14; i > 0; i--) { 1856 if (params.atap_ata_major & (1 << i)) { 1857 aprint_verbose_dev(drv_dev, 1858 "ATA version %d\n", i); 1859 drvp->ata_vers = i; 1860 break; 1861 } 1862 } 1863 } 1864 #endif 1865 1866 /* An ATAPI device is at last PIO mode 3 */ 1867 if (drvp->drive_type == ATA_DRIVET_ATAPI) 1868 drvp->PIO_mode = 3; 1869 1870 /* 1871 * It's not in the specs, but it seems that some drive 1872 * returns 0xffff in atap_extensions when this field is invalid 1873 */ 1874 if (params.atap_extensions != 0xffff && 1875 (params.atap_extensions & WDC_EXT_MODES)) { 1876 /* 1877 * XXX some drives report something wrong here (they claim to 1878 * support PIO mode 8 !). As mode is coded on 3 bits in 1879 * SET FEATURE, limit it to 7 (so limit i to 4). 1880 * If higher mode than 7 is found, abort. 1881 */ 1882 for (i = 7; i >= 0; i--) { 1883 if ((params.atap_piomode_supp & (1 << i)) == 0) 1884 continue; 1885 if (i > 4) 1886 return; 1887 /* 1888 * See if mode is accepted. 1889 * If the controller can't set its PIO mode, 1890 * assume the defaults are good, so don't try 1891 * to set it 1892 */ 1893 if (atac->atac_set_modes) 1894 /* 1895 * It's OK to pool here, it's fast enough 1896 * to not bother waiting for interrupt 1897 */ 1898 if (ata_set_mode(drvp, 0x08 | (i + 3), 1899 AT_WAIT) != CMD_OK) 1900 continue; 1901 if (!printed) { 1902 aprint_verbose_dev(drv_dev, 1903 "drive supports PIO mode %d", i + 3); 1904 sep = ","; 1905 printed = 1; 1906 } 1907 /* 1908 * If controller's driver can't set its PIO mode, 1909 * get the highter one for the drive. 1910 */ 1911 if (atac->atac_set_modes == NULL || 1912 atac->atac_pio_cap >= i + 3) { 1913 drvp->PIO_mode = i + 3; 1914 drvp->PIO_cap = i + 3; 1915 break; 1916 } 1917 } 1918 if (!printed) { 1919 /* 1920 * We didn't find a valid PIO mode. 1921 * Assume the values returned for DMA are buggy too 1922 */ 1923 return; 1924 } 1925 ata_channel_lock(chp); 1926 drvp->drive_flags |= ATA_DRIVE_MODE; 1927 ata_channel_unlock(chp); 1928 printed = 0; 1929 for (i = 7; i >= 0; i--) { 1930 if ((params.atap_dmamode_supp & (1 << i)) == 0) 1931 continue; 1932 #if NATA_DMA 1933 if ((atac->atac_cap & ATAC_CAP_DMA) && 1934 atac->atac_set_modes != NULL) 1935 if (ata_set_mode(drvp, 0x20 | i, AT_WAIT) 1936 != CMD_OK) 1937 continue; 1938 #endif 1939 if (!printed) { 1940 aprint_verbose("%s DMA mode %d", sep, i); 1941 sep = ","; 1942 printed = 1; 1943 } 1944 #if NATA_DMA 1945 if (atac->atac_cap & ATAC_CAP_DMA) { 1946 if (atac->atac_set_modes != NULL && 1947 atac->atac_dma_cap < i) 1948 continue; 1949 drvp->DMA_mode = i; 1950 drvp->DMA_cap = i; 1951 ata_channel_lock(chp); 1952 drvp->drive_flags |= ATA_DRIVE_DMA; 1953 ata_channel_unlock(chp); 1954 } 1955 #endif 1956 break; 1957 } 1958 if (params.atap_extensions & WDC_EXT_UDMA_MODES) { 1959 printed = 0; 1960 for (i = 7; i >= 0; i--) { 1961 if ((params.atap_udmamode_supp & (1 << i)) 1962 == 0) 1963 continue; 1964 #if NATA_UDMA 1965 if (atac->atac_set_modes != NULL && 1966 (atac->atac_cap & ATAC_CAP_UDMA)) 1967 if (ata_set_mode(drvp, 0x40 | i, 1968 AT_WAIT) != CMD_OK) 1969 continue; 1970 #endif 1971 if (!printed) { 1972 aprint_verbose("%s Ultra-DMA mode %d", 1973 sep, i); 1974 if (i == 2) 1975 aprint_verbose(" (Ultra/33)"); 1976 else if (i == 4) 1977 aprint_verbose(" (Ultra/66)"); 1978 else if (i == 5) 1979 aprint_verbose(" (Ultra/100)"); 1980 else if (i == 6) 1981 aprint_verbose(" (Ultra/133)"); 1982 sep = ","; 1983 printed = 1; 1984 } 1985 #if NATA_UDMA 1986 if (atac->atac_cap & ATAC_CAP_UDMA) { 1987 if (atac->atac_set_modes != NULL && 1988 atac->atac_udma_cap < i) 1989 continue; 1990 drvp->UDMA_mode = i; 1991 drvp->UDMA_cap = i; 1992 ata_channel_lock(chp); 1993 drvp->drive_flags |= ATA_DRIVE_UDMA; 1994 ata_channel_unlock(chp); 1995 } 1996 #endif 1997 break; 1998 } 1999 } 2000 } 2001 2002 ata_channel_lock(chp); 2003 drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM; 2004 if (drvp->drive_type == ATA_DRIVET_ATAPI) { 2005 if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM) 2006 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2007 } else { 2008 if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM) 2009 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2010 } 2011 ata_channel_unlock(chp); 2012 2013 /* Try to guess ATA version here, if it didn't get reported */ 2014 if (drvp->ata_vers == 0) { 2015 #if NATA_UDMA 2016 if (drvp->drive_flags & ATA_DRIVE_UDMA) 2017 drvp->ata_vers = 4; /* should be at last ATA-4 */ 2018 else 2019 #endif 2020 if (drvp->PIO_cap > 2) 2021 drvp->ata_vers = 2; /* should be at last ATA-2 */ 2022 } 2023 cf_flags = device_cfdata(drv_dev)->cf_flags; 2024 if (cf_flags & ATA_CONFIG_PIO_SET) { 2025 ata_channel_lock(chp); 2026 drvp->PIO_mode = 2027 (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF; 2028 drvp->drive_flags |= ATA_DRIVE_MODE; 2029 ata_channel_unlock(chp); 2030 } 2031 #if NATA_DMA 2032 if ((atac->atac_cap & ATAC_CAP_DMA) == 0) { 2033 /* don't care about DMA modes */ 2034 if (*sep != '\0') 2035 aprint_verbose("\n"); 2036 return; 2037 } 2038 if (cf_flags & ATA_CONFIG_DMA_SET) { 2039 ata_channel_lock(chp); 2040 if ((cf_flags & ATA_CONFIG_DMA_MODES) == 2041 ATA_CONFIG_DMA_DISABLE) { 2042 drvp->drive_flags &= ~ATA_DRIVE_DMA; 2043 } else { 2044 drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >> 2045 ATA_CONFIG_DMA_OFF; 2046 drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE; 2047 } 2048 ata_channel_unlock(chp); 2049 } 2050 2051 /* 2052 * Probe WRITE DMA FUA EXT. Support is mandatory for devices 2053 * supporting LBA48, but nevertheless confirm with the feature flag. 2054 */ 2055 if (drvp->drive_flags & ATA_DRIVE_DMA) { 2056 if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 2057 && (params.atap_cmd_def & ATA_CMDE_WFE)) { 2058 drvp->drive_flags |= ATA_DRIVE_WFUA; 2059 aprint_verbose("%s WRITE DMA FUA", sep); 2060 sep = ","; 2061 } 2062 } 2063 2064 /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */ 2065 ata_channel_lock(chp); 2066 drvp->drv_openings = 1; 2067 if (params.atap_sata_caps & SATA_NATIVE_CMDQ) { 2068 if (atac->atac_cap & ATAC_CAP_NCQ) 2069 drvp->drive_flags |= ATA_DRIVE_NCQ; 2070 drvp->drv_openings = 2071 (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1; 2072 aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings); 2073 sep = ","; 2074 2075 if (params.atap_sata_caps & SATA_NCQ_PRIO) { 2076 drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO; 2077 aprint_verbose(" w/PRIO"); 2078 } 2079 } 2080 ata_channel_unlock(chp); 2081 2082 if (*sep != '\0') 2083 aprint_verbose("\n"); 2084 2085 #if NATA_UDMA 2086 if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) { 2087 /* don't care about UDMA modes */ 2088 return; 2089 } 2090 if (cf_flags & ATA_CONFIG_UDMA_SET) { 2091 ata_channel_lock(chp); 2092 if ((cf_flags & ATA_CONFIG_UDMA_MODES) == 2093 ATA_CONFIG_UDMA_DISABLE) { 2094 drvp->drive_flags &= ~ATA_DRIVE_UDMA; 2095 } else { 2096 drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >> 2097 ATA_CONFIG_UDMA_OFF; 2098 drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE; 2099 } 2100 ata_channel_unlock(chp); 2101 } 2102 #endif /* NATA_UDMA */ 2103 #endif /* NATA_DMA */ 2104 } 2105 2106 /* management of the /dev/atabus* devices */ 2107 int 2108 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l) 2109 { 2110 struct atabus_softc *sc; 2111 int error; 2112 2113 sc = device_lookup_private(&atabus_cd, minor(dev)); 2114 if (sc == NULL) 2115 return (ENXIO); 2116 2117 if (sc->sc_flags & ATABUSCF_OPEN) 2118 return (EBUSY); 2119 2120 if ((error = ata_addref(sc->sc_chan)) != 0) 2121 return (error); 2122 2123 sc->sc_flags |= ATABUSCF_OPEN; 2124 2125 return (0); 2126 } 2127 2128 2129 int 2130 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l) 2131 { 2132 struct atabus_softc *sc = 2133 device_lookup_private(&atabus_cd, minor(dev)); 2134 2135 ata_delref(sc->sc_chan); 2136 2137 sc->sc_flags &= ~ATABUSCF_OPEN; 2138 2139 return (0); 2140 } 2141 2142 int 2143 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 2144 { 2145 struct atabus_softc *sc = 2146 device_lookup_private(&atabus_cd, minor(dev)); 2147 struct ata_channel *chp = sc->sc_chan; 2148 int min_drive, max_drive, drive; 2149 int error; 2150 2151 /* 2152 * Enforce write permission for ioctls that change the 2153 * state of the bus. Host adapter specific ioctls must 2154 * be checked by the adapter driver. 2155 */ 2156 switch (cmd) { 2157 case ATABUSIOSCAN: 2158 case ATABUSIODETACH: 2159 case ATABUSIORESET: 2160 if ((flag & FWRITE) == 0) 2161 return (EBADF); 2162 } 2163 2164 switch (cmd) { 2165 case ATABUSIORESET: 2166 ata_channel_lock(chp); 2167 ata_thread_run(sc->sc_chan, AT_WAIT | AT_POLL, 2168 ATACH_TH_RESET, ATACH_NODRIVE); 2169 ata_channel_unlock(chp); 2170 return 0; 2171 case ATABUSIOSCAN: 2172 { 2173 #if 0 2174 struct atabusioscan_args *a= 2175 (struct atabusioscan_args *)addr; 2176 #endif 2177 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2178 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2179 return (EOPNOTSUPP); 2180 return (EOPNOTSUPP); 2181 } 2182 case ATABUSIODETACH: 2183 { 2184 struct atabusiodetach_args *a= 2185 (struct atabusiodetach_args *)addr; 2186 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2187 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2188 return (EOPNOTSUPP); 2189 switch (a->at_dev) { 2190 case -1: 2191 min_drive = 0; 2192 max_drive = 1; 2193 break; 2194 case 0: 2195 case 1: 2196 min_drive = max_drive = a->at_dev; 2197 break; 2198 default: 2199 return (EINVAL); 2200 } 2201 for (drive = min_drive; drive <= max_drive; drive++) { 2202 if (chp->ch_drive[drive].drv_softc != NULL) { 2203 error = config_detach( 2204 chp->ch_drive[drive].drv_softc, 0); 2205 if (error) 2206 return (error); 2207 KASSERT(chp->ch_drive[drive].drv_softc == NULL); 2208 } 2209 } 2210 return 0; 2211 } 2212 default: 2213 return ENOTTY; 2214 } 2215 } 2216 2217 static bool 2218 atabus_suspend(device_t dv, const pmf_qual_t *qual) 2219 { 2220 struct atabus_softc *sc = device_private(dv); 2221 struct ata_channel *chp = sc->sc_chan; 2222 2223 ata_channel_idle(chp); 2224 2225 return true; 2226 } 2227 2228 static bool 2229 atabus_resume(device_t dv, const pmf_qual_t *qual) 2230 { 2231 struct atabus_softc *sc = device_private(dv); 2232 struct ata_channel *chp = sc->sc_chan; 2233 2234 /* 2235 * XXX joerg: with wdc, the first channel unfreezes the controler. 2236 * Move this the reset and queue idling into wdc. 2237 */ 2238 ata_channel_lock(chp); 2239 if (chp->ch_queue->queue_freeze == 0) { 2240 ata_channel_unlock(chp); 2241 goto out; 2242 } 2243 2244 /* unfreeze the queue and reset drives */ 2245 ata_channel_thaw_locked(chp); 2246 2247 /* reset channel only if there are drives attached */ 2248 if (chp->ch_ndrives > 0) 2249 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE); 2250 2251 ata_channel_unlock(chp); 2252 2253 out: 2254 return true; 2255 } 2256 2257 static int 2258 atabus_rescan(device_t self, const char *ifattr, const int *locators) 2259 { 2260 struct atabus_softc *sc = device_private(self); 2261 struct ata_channel *chp = sc->sc_chan; 2262 struct atabus_initq *initq; 2263 int i; 2264 2265 /* 2266 * we can rescan a port multiplier atabus, even if some devices are 2267 * still attached 2268 */ 2269 if (chp->ch_satapmp_nports == 0) { 2270 if (chp->atapibus != NULL) { 2271 return EBUSY; 2272 } 2273 2274 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 2275 for (i = 0; i < chp->ch_ndrives; i++) { 2276 if (chp->ch_drive[i].drv_softc != NULL) { 2277 return EBUSY; 2278 } 2279 } 2280 } 2281 2282 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 2283 initq->atabus_sc = sc; 2284 mutex_enter(&atabus_qlock); 2285 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 2286 mutex_exit(&atabus_qlock); 2287 config_pending_incr(sc->sc_dev); 2288 2289 ata_channel_lock(chp); 2290 chp->ch_flags |= ATACH_TH_RESCAN; 2291 cv_signal(&chp->ch_thr_idle); 2292 ata_channel_unlock(chp); 2293 2294 return 0; 2295 } 2296 2297 void 2298 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags) 2299 { 2300 KASSERT(mutex_owned(&chp->ch_lock)); 2301 2302 if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) { 2303 /* 2304 * can't use kpause(), we may be in interrupt context 2305 * or taking a crash dump 2306 */ 2307 delay(ms * 1000); 2308 } else { 2309 int pause = mstohz(ms); 2310 2311 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock); 2312 } 2313 } 2314 2315 void 2316 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count, 2317 uint16_t *features, uint8_t *device) 2318 { 2319 if ((xfer->c_flags & C_NCQ) == 0) { 2320 /* FUA handling for non-NCQ drives */ 2321 if (xfer->c_bio.flags & ATA_FUA 2322 && *cmd == WDCC_WRITEDMA_EXT) 2323 *cmd = WDCC_WRITEDMA_FUA_EXT; 2324 2325 return; 2326 } 2327 2328 *cmd = (xfer->c_bio.flags & ATA_READ) ? 2329 WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED; 2330 2331 /* for FPDMA the block count is in features */ 2332 *features = *count; 2333 2334 /* NCQ tag */ 2335 *count = (xfer->c_slot << 3); 2336 2337 if (xfer->c_bio.flags & ATA_PRIO_HIGH) 2338 *count |= WDSC_PRIO_HIGH; 2339 2340 /* other device flags */ 2341 if (xfer->c_bio.flags & ATA_FUA) 2342 *device |= WDSD_FUA; 2343 } 2344 2345 void 2346 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer) 2347 { 2348 struct ata_queue *chq = chp->ch_queue; 2349 struct ata_command *ata_c = &xfer->c_ata_c; 2350 2351 ata_channel_lock(chp); 2352 2353 while ((ata_c->flags & AT_DONE) == 0) 2354 cv_wait(&chq->c_cmd_finish, &chp->ch_lock); 2355 2356 ata_channel_unlock(chp); 2357 2358 KASSERT((ata_c->flags & AT_DONE) != 0); 2359 } 2360