1 /* $NetBSD: ata.c,v 1.153 2019/10/21 18:58:57 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.153 2019/10/21 18:58:57 christos Exp $"); 29 30 #include "opt_ata.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/device.h> 36 #include <sys/conf.h> 37 #include <sys/fcntl.h> 38 #include <sys/proc.h> 39 #include <sys/kthread.h> 40 #include <sys/errno.h> 41 #include <sys/ataio.h> 42 #include <sys/kmem.h> 43 #include <sys/intr.h> 44 #include <sys/bus.h> 45 #include <sys/once.h> 46 #include <sys/bitops.h> 47 48 #define ATABUS_PRIVATE 49 50 #include <dev/ata/ataconf.h> 51 #include <dev/ata/atareg.h> 52 #include <dev/ata/atavar.h> 53 #include <dev/ic/wdcvar.h> /* for PIOBM */ 54 55 #include "ioconf.h" 56 #include "locators.h" 57 58 #include "atapibus.h" 59 #include "ataraid.h" 60 #include "sata_pmp.h" 61 62 #if NATARAID > 0 63 #include <dev/ata/ata_raidvar.h> 64 #endif 65 #if NSATA_PMP > 0 66 #include <dev/ata/satapmpvar.h> 67 #endif 68 #include <dev/ata/satapmpreg.h> 69 70 #define DEBUG_FUNCS 0x08 71 #define DEBUG_PROBE 0x10 72 #define DEBUG_DETACH 0x20 73 #define DEBUG_XFERS 0x40 74 #ifdef ATADEBUG 75 #ifndef ATADEBUG_MASK 76 #define ATADEBUG_MASK 0 77 #endif 78 int atadebug_mask = ATADEBUG_MASK; 79 #define ATADEBUG_PRINT(args, level) \ 80 if (atadebug_mask & (level)) \ 81 printf args 82 #else 83 #define ATADEBUG_PRINT(args, level) 84 #endif 85 86 static ONCE_DECL(ata_init_ctrl); 87 static struct pool ata_xfer_pool; 88 89 /* 90 * A queue of atabus instances, used to ensure the same bus probe order 91 * for a given hardware configuration at each boot. Kthread probing 92 * devices on a atabus. Only one probing at once. 93 */ 94 static TAILQ_HEAD(, atabus_initq) atabus_initq_head; 95 static kmutex_t atabus_qlock; 96 static kcondvar_t atabus_qcv; 97 static lwp_t * atabus_cfg_lwp; 98 99 /***************************************************************************** 100 * ATA bus layer. 101 * 102 * ATA controllers attach an atabus instance, which handles probing the bus 103 * for drives, etc. 104 *****************************************************************************/ 105 106 dev_type_open(atabusopen); 107 dev_type_close(atabusclose); 108 dev_type_ioctl(atabusioctl); 109 110 const struct cdevsw atabus_cdevsw = { 111 .d_open = atabusopen, 112 .d_close = atabusclose, 113 .d_read = noread, 114 .d_write = nowrite, 115 .d_ioctl = atabusioctl, 116 .d_stop = nostop, 117 .d_tty = notty, 118 .d_poll = nopoll, 119 .d_mmap = nommap, 120 .d_kqfilter = nokqfilter, 121 .d_discard = nodiscard, 122 .d_flag = D_OTHER 123 }; 124 125 static void atabus_childdetached(device_t, device_t); 126 static int atabus_rescan(device_t, const char *, const int *); 127 static bool atabus_resume(device_t, const pmf_qual_t *); 128 static bool atabus_suspend(device_t, const pmf_qual_t *); 129 static void atabusconfig_thread(void *); 130 131 static void ata_channel_idle(struct ata_channel *); 132 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *); 133 static void ata_channel_freeze_locked(struct ata_channel *); 134 static void ata_thread_wake_locked(struct ata_channel *); 135 136 /* 137 * atabus_init: 138 * 139 * Initialize ATA subsystem structures. 140 */ 141 static int 142 atabus_init(void) 143 { 144 145 pool_init(&ata_xfer_pool, sizeof(struct ata_xfer), 0, 0, 0, 146 "ataspl", NULL, IPL_BIO); 147 TAILQ_INIT(&atabus_initq_head); 148 mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE); 149 cv_init(&atabus_qcv, "atainitq"); 150 return 0; 151 } 152 153 /* 154 * atabusprint: 155 * 156 * Autoconfiguration print routine used by ATA controllers when 157 * attaching an atabus instance. 158 */ 159 int 160 atabusprint(void *aux, const char *pnp) 161 { 162 struct ata_channel *chan = aux; 163 164 if (pnp) 165 aprint_normal("atabus at %s", pnp); 166 aprint_normal(" channel %d", chan->ch_channel); 167 168 return (UNCONF); 169 } 170 171 /* 172 * ataprint: 173 * 174 * Autoconfiguration print routine. 175 */ 176 int 177 ataprint(void *aux, const char *pnp) 178 { 179 struct ata_device *adev = aux; 180 181 if (pnp) 182 aprint_normal("wd at %s", pnp); 183 aprint_normal(" drive %d", adev->adev_drv_data->drive); 184 185 return (UNCONF); 186 } 187 188 /* 189 * ata_channel_attach: 190 * 191 * Common parts of attaching an atabus to an ATA controller channel. 192 */ 193 void 194 ata_channel_attach(struct ata_channel *chp) 195 { 196 if (chp->ch_flags & ATACH_DISABLED) 197 return; 198 199 ata_channel_init(chp); 200 201 KASSERT(chp->ch_queue != NULL); 202 203 chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp, 204 atabusprint); 205 } 206 207 /* 208 * ata_channel_detach: 209 * 210 * Common parts of detaching an atabus to an ATA controller channel. 211 */ 212 void 213 ata_channel_detach(struct ata_channel *chp) 214 { 215 if (chp->ch_flags & ATACH_DISABLED) 216 return; 217 218 ata_channel_destroy(chp); 219 220 chp->ch_flags |= ATACH_DETACHED; 221 } 222 223 static void 224 atabusconfig(struct atabus_softc *atabus_sc) 225 { 226 struct ata_channel *chp = atabus_sc->sc_chan; 227 struct atac_softc *atac = chp->ch_atac; 228 struct atabus_initq *atabus_initq = NULL; 229 int i, error; 230 231 /* we are in the atabus's thread context */ 232 ata_channel_lock(chp); 233 chp->ch_flags |= ATACH_TH_RUN; 234 ata_channel_unlock(chp); 235 236 /* 237 * Probe for the drives attached to controller, unless a PMP 238 * is already known 239 */ 240 /* XXX for SATA devices we will power up all drives at once */ 241 if (chp->ch_satapmp_nports == 0) 242 (*atac->atac_probe)(chp); 243 244 if (chp->ch_ndrives >= 2) { 245 ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n", 246 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type), 247 DEBUG_PROBE); 248 } 249 250 /* next operations will occurs in a separate thread */ 251 ata_channel_lock(chp); 252 chp->ch_flags &= ~ATACH_TH_RUN; 253 ata_channel_unlock(chp); 254 255 /* Make sure the devices probe in atabus order to avoid jitter. */ 256 mutex_enter(&atabus_qlock); 257 for (;;) { 258 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 259 if (atabus_initq->atabus_sc == atabus_sc) 260 break; 261 cv_wait(&atabus_qcv, &atabus_qlock); 262 } 263 mutex_exit(&atabus_qlock); 264 265 ata_channel_lock(chp); 266 267 /* If no drives, abort here */ 268 if (chp->ch_drive == NULL) 269 goto out; 270 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 271 for (i = 0; i < chp->ch_ndrives; i++) 272 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) 273 break; 274 if (i == chp->ch_ndrives) 275 goto out; 276 277 /* Shortcut in case we've been shutdown */ 278 if (chp->ch_flags & ATACH_SHUTDOWN) 279 goto out; 280 281 ata_channel_unlock(chp); 282 283 if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread, 284 atabus_sc, &atabus_cfg_lwp, 285 "%scnf", device_xname(atac->atac_dev))) != 0) 286 aprint_error_dev(atac->atac_dev, 287 "unable to create config thread: error %d\n", error); 288 return; 289 290 out: 291 ata_channel_unlock(chp); 292 293 mutex_enter(&atabus_qlock); 294 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 295 cv_broadcast(&atabus_qcv); 296 mutex_exit(&atabus_qlock); 297 298 kmem_free(atabus_initq, sizeof(*atabus_initq)); 299 300 ata_delref(chp); 301 302 config_pending_decr(atac->atac_dev); 303 } 304 305 /* 306 * atabus_configthread: finish attach of atabus's childrens, in a separate 307 * kernel thread. 308 */ 309 static void 310 atabusconfig_thread(void *arg) 311 { 312 struct atabus_softc *atabus_sc = arg; 313 struct ata_channel *chp = atabus_sc->sc_chan; 314 struct atac_softc *atac = chp->ch_atac; 315 struct atabus_initq *atabus_initq = NULL; 316 int i, s; 317 318 /* XXX seems wrong */ 319 mutex_enter(&atabus_qlock); 320 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 321 KASSERT(atabus_initq->atabus_sc == atabus_sc); 322 mutex_exit(&atabus_qlock); 323 324 /* 325 * First look for a port multiplier 326 */ 327 if (chp->ch_ndrives == PMP_MAX_DRIVES && 328 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 329 #if NSATA_PMP > 0 330 satapmp_attach(chp); 331 #else 332 aprint_error_dev(atabus_sc->sc_dev, 333 "SATA port multiplier not supported\n"); 334 /* no problems going on, all drives are ATA_DRIVET_NONE */ 335 #endif 336 } 337 338 /* 339 * Attach an ATAPI bus, if needed. 340 */ 341 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 342 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) { 343 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) { 344 #if NATAPIBUS > 0 345 (*atac->atac_atapibus_attach)(atabus_sc); 346 #else 347 /* 348 * Fake the autoconfig "not configured" message 349 */ 350 aprint_normal("atapibus at %s not configured\n", 351 device_xname(atac->atac_dev)); 352 chp->atapibus = NULL; 353 s = splbio(); 354 for (i = 0; i < chp->ch_ndrives; i++) { 355 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 356 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 357 } 358 splx(s); 359 #endif 360 break; 361 } 362 } 363 364 for (i = 0; i < chp->ch_ndrives; i++) { 365 struct ata_device adev; 366 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA && 367 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) { 368 continue; 369 } 370 if (chp->ch_drive[i].drv_softc != NULL) 371 continue; 372 memset(&adev, 0, sizeof(struct ata_device)); 373 adev.adev_bustype = atac->atac_bustype_ata; 374 adev.adev_channel = chp->ch_channel; 375 adev.adev_drv_data = &chp->ch_drive[i]; 376 chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev, 377 "ata_hl", &adev, ataprint); 378 if (chp->ch_drive[i].drv_softc != NULL) { 379 ata_probe_caps(&chp->ch_drive[i]); 380 } else { 381 s = splbio(); 382 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 383 splx(s); 384 } 385 } 386 387 /* now that we know the drives, the controller can set its modes */ 388 if (atac->atac_set_modes) { 389 (*atac->atac_set_modes)(chp); 390 ata_print_modes(chp); 391 } 392 #if NATARAID > 0 393 if (atac->atac_cap & ATAC_CAP_RAID) { 394 for (i = 0; i < chp->ch_ndrives; i++) { 395 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) { 396 ata_raid_check_component( 397 chp->ch_drive[i].drv_softc); 398 } 399 } 400 } 401 #endif /* NATARAID > 0 */ 402 403 /* 404 * reset drive_flags for unattached devices, reset state for attached 405 * ones 406 */ 407 s = splbio(); 408 for (i = 0; i < chp->ch_ndrives; i++) { 409 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 410 continue; 411 if (chp->ch_drive[i].drv_softc == NULL) { 412 chp->ch_drive[i].drive_flags = 0; 413 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 414 } else 415 chp->ch_drive[i].state = 0; 416 } 417 splx(s); 418 419 mutex_enter(&atabus_qlock); 420 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 421 cv_broadcast(&atabus_qcv); 422 mutex_exit(&atabus_qlock); 423 424 kmem_free(atabus_initq, sizeof(*atabus_initq)); 425 426 ata_delref(chp); 427 428 config_pending_decr(atac->atac_dev); 429 kthread_exit(0); 430 } 431 432 /* 433 * atabus_thread: 434 * 435 * Worker thread for the ATA bus. 436 */ 437 static void 438 atabus_thread(void *arg) 439 { 440 struct atabus_softc *sc = arg; 441 struct ata_channel *chp = sc->sc_chan; 442 struct ata_queue *chq = chp->ch_queue; 443 struct ata_xfer *xfer; 444 int i, rv; 445 446 ata_channel_lock(chp); 447 chp->ch_flags |= ATACH_TH_RUN; 448 449 /* 450 * Probe the drives. Reset type to indicate to controllers 451 * that can re-probe that all drives must be probed.. 452 * 453 * Note: ch_ndrives may be changed during the probe. 454 */ 455 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 456 for (i = 0; i < chp->ch_ndrives; i++) { 457 chp->ch_drive[i].drive_flags = 0; 458 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 459 } 460 ata_channel_unlock(chp); 461 462 atabusconfig(sc); 463 464 ata_channel_lock(chp); 465 for (;;) { 466 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET 467 | ATACH_TH_RECOVERY | ATACH_SHUTDOWN)) == 0 && 468 (chq->queue_active == 0 || chq->queue_freeze == 0)) { 469 chp->ch_flags &= ~ATACH_TH_RUN; 470 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 471 chp->ch_flags |= ATACH_TH_RUN; 472 } 473 if (chp->ch_flags & ATACH_SHUTDOWN) { 474 break; 475 } 476 if (chp->ch_flags & ATACH_TH_RESCAN) { 477 chp->ch_flags &= ~ATACH_TH_RESCAN; 478 ata_channel_unlock(chp); 479 atabusconfig(sc); 480 ata_channel_lock(chp); 481 } 482 if (chp->ch_flags & ATACH_TH_RESET) { 483 /* this will unfreeze the channel */ 484 ata_thread_run(chp, AT_WAIT, 485 ATACH_TH_RESET, ATACH_NODRIVE); 486 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) { 487 /* this will unfreeze the channel */ 488 for (i = 0; i < chp->ch_ndrives; i++) { 489 struct ata_drive_datas *drvp; 490 491 drvp = &chp->ch_drive[i]; 492 493 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 494 ata_thread_run(chp, 495 AT_WAIT, ATACH_TH_DRIVE_RESET, i); 496 } 497 } 498 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET; 499 } else if (chp->ch_flags & ATACH_TH_RECOVERY) { 500 /* 501 * This will unfreeze the channel; drops locks during 502 * run, so must wrap in splbio()/splx() to avoid 503 * spurious interrupts. XXX MPSAFE 504 */ 505 int s = splbio(); 506 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY, 507 chp->recovery_tfd); 508 splx(s); 509 } else if (chq->queue_active > 0 && chq->queue_freeze == 1) { 510 /* 511 * Caller has bumped queue_freeze, decrease it. This 512 * flow shalt never be executed for NCQ commands. 513 */ 514 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 515 KASSERT(chq->queue_active == 1); 516 517 ata_channel_thaw_locked(chp); 518 xfer = ata_queue_get_active_xfer_locked(chp); 519 520 KASSERT(xfer != NULL); 521 KASSERT((xfer->c_flags & C_POLL) == 0); 522 523 switch ((rv = ata_xfer_start(xfer))) { 524 case ATASTART_STARTED: 525 case ATASTART_POLL: 526 case ATASTART_ABORT: 527 break; 528 case ATASTART_TH: 529 default: 530 panic("%s: ata_xfer_start() unexpected rv %d", 531 __func__, rv); 532 /* NOTREACHED */ 533 } 534 } else if (chq->queue_freeze > 1) 535 panic("%s: queue_freeze", __func__); 536 537 /* Try to run down the queue once channel is unfrozen */ 538 if (chq->queue_freeze == 0) { 539 ata_channel_unlock(chp); 540 atastart(chp); 541 ata_channel_lock(chp); 542 } 543 } 544 chp->ch_thread = NULL; 545 cv_signal(&chp->ch_thr_idle); 546 ata_channel_unlock(chp); 547 kthread_exit(0); 548 } 549 550 static void 551 ata_thread_wake_locked(struct ata_channel *chp) 552 { 553 KASSERT(mutex_owned(&chp->ch_lock)); 554 ata_channel_freeze_locked(chp); 555 cv_signal(&chp->ch_thr_idle); 556 } 557 558 /* 559 * atabus_match: 560 * 561 * Autoconfiguration match routine. 562 */ 563 static int 564 atabus_match(device_t parent, cfdata_t cf, void *aux) 565 { 566 struct ata_channel *chp = aux; 567 568 if (chp == NULL) 569 return (0); 570 571 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel && 572 cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT) 573 return (0); 574 575 return (1); 576 } 577 578 /* 579 * atabus_attach: 580 * 581 * Autoconfiguration attach routine. 582 */ 583 static void 584 atabus_attach(device_t parent, device_t self, void *aux) 585 { 586 struct atabus_softc *sc = device_private(self); 587 struct ata_channel *chp = aux; 588 struct atabus_initq *initq; 589 int error; 590 591 sc->sc_chan = chp; 592 593 aprint_normal("\n"); 594 aprint_naive("\n"); 595 596 sc->sc_dev = self; 597 598 if (ata_addref(chp)) 599 return; 600 601 RUN_ONCE(&ata_init_ctrl, atabus_init); 602 603 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 604 initq->atabus_sc = sc; 605 mutex_enter(&atabus_qlock); 606 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 607 mutex_exit(&atabus_qlock); 608 config_pending_incr(sc->sc_dev); 609 610 /* XXX MPSAFE - no KTHREAD_MPSAFE, so protected by KERNEL_LOCK() */ 611 if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc, 612 &chp->ch_thread, "%s", device_xname(self))) != 0) 613 aprint_error_dev(self, 614 "unable to create kernel thread: error %d\n", error); 615 616 if (!pmf_device_register(self, atabus_suspend, atabus_resume)) 617 aprint_error_dev(self, "couldn't establish power handler\n"); 618 } 619 620 /* 621 * atabus_detach: 622 * 623 * Autoconfiguration detach routine. 624 */ 625 static int 626 atabus_detach(device_t self, int flags) 627 { 628 struct atabus_softc *sc = device_private(self); 629 struct ata_channel *chp = sc->sc_chan; 630 device_t dev = NULL; 631 int i, error = 0; 632 633 /* 634 * Detach atapibus and its children. 635 */ 636 if ((dev = chp->atapibus) != NULL) { 637 ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n", 638 device_xname(self), device_xname(dev)), DEBUG_DETACH); 639 640 error = config_detach(dev, flags); 641 if (error) 642 goto out; 643 KASSERT(chp->atapibus == NULL); 644 } 645 646 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 647 648 /* 649 * Detach our other children. 650 */ 651 for (i = 0; i < chp->ch_ndrives; i++) { 652 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 653 continue; 654 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 655 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 656 if ((dev = chp->ch_drive[i].drv_softc) != NULL) { 657 ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__, 658 __LINE__, device_xname(self), device_xname(dev)), 659 DEBUG_DETACH); 660 error = config_detach(dev, flags); 661 if (error) 662 goto out; 663 KASSERT(chp->ch_drive[i].drv_softc == NULL); 664 KASSERT(chp->ch_drive[i].drive_type == 0); 665 } 666 } 667 668 /* Shutdown the channel. */ 669 ata_channel_lock(chp); 670 chp->ch_flags |= ATACH_SHUTDOWN; 671 while (chp->ch_thread != NULL) { 672 cv_signal(&chp->ch_thr_idle); 673 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 674 } 675 ata_channel_unlock(chp); 676 677 atabus_free_drives(chp); 678 679 out: 680 #ifdef ATADEBUG 681 if (dev != NULL && error != 0) 682 ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__, 683 device_xname(self), error, device_xname(dev)), 684 DEBUG_DETACH); 685 #endif /* ATADEBUG */ 686 687 return (error); 688 } 689 690 void 691 atabus_childdetached(device_t self, device_t child) 692 { 693 bool found = false; 694 struct atabus_softc *sc = device_private(self); 695 struct ata_channel *chp = sc->sc_chan; 696 int i; 697 698 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 699 /* 700 * atapibus detached. 701 */ 702 if (child == chp->atapibus) { 703 chp->atapibus = NULL; 704 found = true; 705 for (i = 0; i < chp->ch_ndrives; i++) { 706 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI) 707 continue; 708 KASSERT(chp->ch_drive[i].drv_softc != NULL); 709 chp->ch_drive[i].drv_softc = NULL; 710 chp->ch_drive[i].drive_flags = 0; 711 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 712 } 713 } 714 715 /* 716 * Detach our other children. 717 */ 718 for (i = 0; i < chp->ch_ndrives; i++) { 719 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 720 continue; 721 if (child == chp->ch_drive[i].drv_softc) { 722 chp->ch_drive[i].drv_softc = NULL; 723 chp->ch_drive[i].drive_flags = 0; 724 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 725 chp->ch_satapmp_nports = 0; 726 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 727 found = true; 728 } 729 } 730 731 if (!found) 732 panic("%s: unknown child %p", device_xname(self), 733 (const void *)child); 734 } 735 736 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc), 737 atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan, 738 atabus_childdetached, DVF_DETACH_SHUTDOWN); 739 740 /***************************************************************************** 741 * Common ATA bus operations. 742 *****************************************************************************/ 743 744 /* allocate/free the channel's ch_drive[] array */ 745 int 746 atabus_alloc_drives(struct ata_channel *chp, int ndrives) 747 { 748 int i; 749 if (chp->ch_ndrives != ndrives) 750 atabus_free_drives(chp); 751 if (chp->ch_drive == NULL) { 752 void *drv; 753 754 ata_channel_unlock(chp); 755 drv = kmem_zalloc(sizeof(*chp->ch_drive) * ndrives, KM_SLEEP); 756 ata_channel_lock(chp); 757 758 if (chp->ch_drive != NULL) { 759 /* lost the race */ 760 kmem_free(drv, sizeof(*chp->ch_drive) * ndrives); 761 return 0; 762 } 763 chp->ch_drive = drv; 764 } 765 for (i = 0; i < ndrives; i++) { 766 chp->ch_drive[i].chnl_softc = chp; 767 chp->ch_drive[i].drive = i; 768 } 769 chp->ch_ndrives = ndrives; 770 return 0; 771 } 772 773 void 774 atabus_free_drives(struct ata_channel *chp) 775 { 776 #ifdef DIAGNOSTIC 777 int i; 778 int dopanic = 0; 779 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 780 for (i = 0; i < chp->ch_ndrives; i++) { 781 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) { 782 printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n", 783 device_xname(chp->atabus), i, 784 chp->ch_drive[i].drive_type); 785 dopanic = 1; 786 } 787 if (chp->ch_drive[i].drv_softc != NULL) { 788 printf("%s: ch_drive[%d] attached to %s\n", 789 device_xname(chp->atabus), i, 790 device_xname(chp->ch_drive[i].drv_softc)); 791 dopanic = 1; 792 } 793 } 794 if (dopanic) 795 panic("atabus_free_drives"); 796 #endif 797 798 if (chp->ch_drive == NULL) 799 return; 800 kmem_free(chp->ch_drive, 801 sizeof(struct ata_drive_datas) * chp->ch_ndrives); 802 chp->ch_ndrives = 0; 803 chp->ch_drive = NULL; 804 } 805 806 /* Get the disk's parameters */ 807 int 808 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags, 809 struct ataparams *prms) 810 { 811 struct ata_xfer *xfer; 812 struct ata_channel *chp = drvp->chnl_softc; 813 struct atac_softc *atac = chp->ch_atac; 814 char *tb; 815 int i, rv; 816 uint16_t *p; 817 818 ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); 819 820 xfer = ata_get_xfer(chp, false); 821 if (xfer == NULL) { 822 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 823 DEBUG_FUNCS|DEBUG_PROBE); 824 return CMD_AGAIN; 825 } 826 827 tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP); 828 memset(prms, 0, sizeof(struct ataparams)); 829 830 if (drvp->drive_type == ATA_DRIVET_ATA) { 831 xfer->c_ata_c.r_command = WDCC_IDENTIFY; 832 xfer->c_ata_c.r_st_bmask = WDCS_DRDY; 833 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 834 xfer->c_ata_c.timeout = 3000; /* 3s */ 835 } else if (drvp->drive_type == ATA_DRIVET_ATAPI) { 836 xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE; 837 xfer->c_ata_c.r_st_bmask = 0; 838 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 839 xfer->c_ata_c.timeout = 10000; /* 10s */ 840 } else { 841 ATADEBUG_PRINT(("ata_get_parms: no disks\n"), 842 DEBUG_FUNCS|DEBUG_PROBE); 843 rv = CMD_ERR; 844 goto out; 845 } 846 xfer->c_ata_c.flags = AT_READ | flags; 847 xfer->c_ata_c.data = tb; 848 xfer->c_ata_c.bcount = ATA_BSIZE; 849 if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, 850 xfer) != ATACMD_COMPLETE) { 851 ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"), 852 DEBUG_FUNCS|DEBUG_PROBE); 853 rv = CMD_AGAIN; 854 goto out; 855 } 856 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 857 ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n", 858 xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE); 859 rv = CMD_ERR; 860 goto out; 861 } 862 /* if we didn't read any data something is wrong */ 863 if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) { 864 rv = CMD_ERR; 865 goto out; 866 } 867 868 /* Read in parameter block. */ 869 memcpy(prms, tb, sizeof(struct ataparams)); 870 871 /* 872 * Shuffle string byte order. 873 * ATAPI NEC, Mitsumi and Pioneer drives and 874 * old ATA TDK CompactFlash cards 875 * have different byte order. 876 */ 877 #if BYTE_ORDER == BIG_ENDIAN 878 # define M(n) prms->atap_model[(n) ^ 1] 879 #else 880 # define M(n) prms->atap_model[n] 881 #endif 882 if ( 883 #if BYTE_ORDER == BIG_ENDIAN 884 ! 885 #endif 886 ((drvp->drive_type == ATA_DRIVET_ATAPI) ? 887 ((M(0) == 'N' && M(1) == 'E') || 888 (M(0) == 'F' && M(1) == 'X') || 889 (M(0) == 'P' && M(1) == 'i')) : 890 ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) { 891 rv = CMD_OK; 892 goto out; 893 } 894 #undef M 895 for (i = 0; i < sizeof(prms->atap_model); i += 2) { 896 p = (uint16_t *)(prms->atap_model + i); 897 *p = bswap16(*p); 898 } 899 for (i = 0; i < sizeof(prms->atap_serial); i += 2) { 900 p = (uint16_t *)(prms->atap_serial + i); 901 *p = bswap16(*p); 902 } 903 for (i = 0; i < sizeof(prms->atap_revision); i += 2) { 904 p = (uint16_t *)(prms->atap_revision + i); 905 *p = bswap16(*p); 906 } 907 908 rv = CMD_OK; 909 out: 910 kmem_free(tb, ATA_BSIZE); 911 ata_free_xfer(chp, xfer); 912 return rv; 913 } 914 915 int 916 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags) 917 { 918 struct ata_xfer *xfer; 919 int rv; 920 struct ata_channel *chp = drvp->chnl_softc; 921 struct atac_softc *atac = chp->ch_atac; 922 923 ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS); 924 925 xfer = ata_get_xfer(chp, false); 926 if (xfer == NULL) { 927 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 928 DEBUG_FUNCS|DEBUG_PROBE); 929 return CMD_AGAIN; 930 } 931 932 xfer->c_ata_c.r_command = SET_FEATURES; 933 xfer->c_ata_c.r_st_bmask = 0; 934 xfer->c_ata_c.r_st_pmask = 0; 935 xfer->c_ata_c.r_features = WDSF_SET_MODE; 936 xfer->c_ata_c.r_count = mode; 937 xfer->c_ata_c.flags = flags; 938 xfer->c_ata_c.timeout = 1000; /* 1s */ 939 if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, 940 xfer) != ATACMD_COMPLETE) { 941 rv = CMD_AGAIN; 942 goto out; 943 } 944 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 945 rv = CMD_ERR; 946 goto out; 947 } 948 949 rv = CMD_OK; 950 951 out: 952 ata_free_xfer(chp, xfer); 953 return rv; 954 } 955 956 #if NATA_DMA 957 void 958 ata_dmaerr(struct ata_drive_datas *drvp, int flags) 959 { 960 ata_channel_lock_owned(drvp->chnl_softc); 961 962 /* 963 * Downgrade decision: if we get NERRS_MAX in NXFER. 964 * We start with n_dmaerrs set to NERRS_MAX-1 so that the 965 * first error within the first NXFER ops will immediatly trigger 966 * a downgrade. 967 * If we got an error and n_xfers is bigger than NXFER reset counters. 968 */ 969 drvp->n_dmaerrs++; 970 if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) { 971 ata_downgrade_mode(drvp, flags); 972 drvp->n_dmaerrs = NERRS_MAX-1; 973 drvp->n_xfers = 0; 974 return; 975 } 976 if (drvp->n_xfers > NXFER) { 977 drvp->n_dmaerrs = 1; /* just got an error */ 978 drvp->n_xfers = 1; /* restart counting from this error */ 979 } 980 } 981 #endif /* NATA_DMA */ 982 983 /* 984 * freeze the queue and wait for the controller to be idle. Caller has to 985 * unfreeze/restart the queue 986 */ 987 static void 988 ata_channel_idle(struct ata_channel *chp) 989 { 990 ata_channel_lock(chp); 991 ata_channel_freeze_locked(chp); 992 while (chp->ch_queue->queue_active > 0) { 993 chp->ch_queue->queue_flags |= QF_IDLE_WAIT; 994 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1); 995 } 996 ata_channel_unlock(chp); 997 } 998 999 /* 1000 * Add a command to the queue and start controller. 1001 * 1002 * MUST BE CALLED AT splbio()! 1003 */ 1004 void 1005 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1006 { 1007 1008 ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer, 1009 chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1010 1011 /* complete xfer setup */ 1012 xfer->c_chp = chp; 1013 1014 ata_channel_lock(chp); 1015 1016 /* 1017 * Standard commands are added to the end of command list, but 1018 * recovery commands must be run immediatelly. 1019 */ 1020 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1021 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, 1022 c_xferchain); 1023 else 1024 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer, 1025 c_xferchain); 1026 1027 /* 1028 * if polling and can sleep, wait for the xfer to be at head of queue 1029 */ 1030 if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) { 1031 while (chp->ch_queue->queue_active > 0 || 1032 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) { 1033 xfer->c_flags |= C_WAITACT; 1034 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock); 1035 xfer->c_flags &= ~C_WAITACT; 1036 } 1037 1038 /* 1039 * Free xfer now if it there was attempt to free it 1040 * while we were waiting. 1041 */ 1042 if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) { 1043 ata_channel_unlock(chp); 1044 1045 ata_free_xfer(chp, xfer); 1046 return; 1047 } 1048 } 1049 1050 ata_channel_unlock(chp); 1051 1052 ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n", 1053 chp->ch_flags), DEBUG_XFERS); 1054 atastart(chp); 1055 } 1056 1057 /* 1058 * Start I/O on a controller, for the given channel. 1059 * The first xfer may be not for our channel if the channel queues 1060 * are shared. 1061 * 1062 * MUST BE CALLED AT splbio()! 1063 * 1064 * XXX FIS-based switching with PMP 1065 * Currently atastart() never schedules concurrent NCQ transfers to more than 1066 * one drive, even when channel has several SATA drives attached via PMP. 1067 * To support concurrent transfers to different drives with PMP, it would be 1068 * necessary to implement FIS-based switching support in controller driver, 1069 * and then adjust error handling and recovery to stop assuming at most 1070 * one active drive. 1071 */ 1072 void 1073 atastart(struct ata_channel *chp) 1074 { 1075 struct atac_softc *atac = chp->ch_atac; 1076 struct ata_queue *chq = chp->ch_queue; 1077 struct ata_xfer *xfer, *axfer; 1078 bool skipq; 1079 1080 #ifdef ATA_DEBUG 1081 int spl1, spl2; 1082 1083 spl1 = splbio(); 1084 spl2 = splbio(); 1085 if (spl2 != spl1) { 1086 printf("atastart: not at splbio()\n"); 1087 panic("atastart"); 1088 } 1089 splx(spl2); 1090 splx(spl1); 1091 #endif /* ATA_DEBUG */ 1092 1093 ata_channel_lock(chp); 1094 1095 again: 1096 /* is there a xfer ? */ 1097 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) { 1098 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n", 1099 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1100 goto out; 1101 } 1102 1103 /* 1104 * if someone is waiting for the command to be active, wake it up 1105 * and let it process the command 1106 */ 1107 if (__predict_false(xfer->c_flags & C_WAITACT)) { 1108 ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d " 1109 "wait active\n", xfer, chp->ch_channel, xfer->c_drive), 1110 DEBUG_XFERS); 1111 cv_broadcast(&chp->ch_queue->c_active); 1112 goto out; 1113 } 1114 1115 skipq = ISSET(xfer->c_flags, C_SKIP_QUEUE); 1116 1117 /* is the queue frozen? */ 1118 if (__predict_false(!skipq && chq->queue_freeze > 0)) { 1119 if (chq->queue_flags & QF_IDLE_WAIT) { 1120 chq->queue_flags &= ~QF_IDLE_WAIT; 1121 cv_signal(&chp->ch_queue->queue_idle); 1122 } 1123 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d " 1124 "queue frozen: %d\n", 1125 __func__, chp, chp->ch_channel, xfer->c_drive, 1126 chq->queue_freeze), 1127 DEBUG_XFERS); 1128 goto out; 1129 } 1130 1131 /* all xfers on same queue must belong to the same channel */ 1132 KASSERT(xfer->c_chp == chp); 1133 1134 /* 1135 * Can only take the command if there are no current active 1136 * commands, or if the command is NCQ and the active commands are also 1137 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based 1138 * switching, can only send commands to single drive. 1139 * Need only check first xfer. 1140 * XXX FIS-based switching - revisit 1141 */ 1142 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) { 1143 if (!ISSET(xfer->c_flags, C_NCQ) || 1144 !ISSET(axfer->c_flags, C_NCQ) || 1145 xfer->c_drive != axfer->c_drive) 1146 goto out; 1147 } 1148 1149 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive]; 1150 1151 /* 1152 * Are we on limit of active xfers ? If the queue has more 1153 * than 1 openings, we keep one slot reserved for recovery or dump. 1154 */ 1155 KASSERT(chq->queue_active <= chq->queue_openings); 1156 const uint8_t chq_openings = (!skipq && chq->queue_openings > 1) 1157 ? (chq->queue_openings - 1) : chq->queue_openings; 1158 const uint8_t drv_openings = ISSET(xfer->c_flags, C_NCQ) 1159 ? drvp->drv_openings : ATA_MAX_OPENINGS; 1160 if (chq->queue_active >= MIN(chq_openings, drv_openings)) { 1161 if (skipq) { 1162 panic("%s: channel %d busy, xfer not possible", 1163 __func__, chp->ch_channel); 1164 } 1165 1166 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n", 1167 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1168 goto out; 1169 } 1170 1171 /* Slot allocation can fail if drv_openings < ch_openings */ 1172 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings)) 1173 goto out; 1174 1175 if (__predict_false(atac->atac_claim_hw)) { 1176 if (!atac->atac_claim_hw(chp, 0)) { 1177 ata_queue_free_slot(chp, xfer->c_slot); 1178 goto out; 1179 } 1180 } 1181 1182 /* Now committed to start the xfer */ 1183 1184 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n", 1185 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1186 if (drvp->drive_flags & ATA_DRIVE_RESET) { 1187 drvp->drive_flags &= ~ATA_DRIVE_RESET; 1188 drvp->state = 0; 1189 } 1190 1191 if (ISSET(xfer->c_flags, C_NCQ)) 1192 SET(chp->ch_flags, ATACH_NCQ); 1193 else 1194 CLR(chp->ch_flags, ATACH_NCQ); 1195 1196 SIMPLEQ_REMOVE_HEAD(&chq->queue_xfer, c_xferchain); 1197 1198 ata_activate_xfer_locked(chp, xfer); 1199 1200 if (atac->atac_cap & ATAC_CAP_NOIRQ) 1201 KASSERT(xfer->c_flags & C_POLL); 1202 1203 switch (ata_xfer_start(xfer)) { 1204 case ATASTART_TH: 1205 case ATASTART_ABORT: 1206 /* don't start any further commands in this case */ 1207 goto out; 1208 default: 1209 /* nothing to do */ 1210 break; 1211 } 1212 1213 /* Queue more commands if possible, but not during recovery or dump */ 1214 if (!skipq && chq->queue_active < chq->queue_openings) 1215 goto again; 1216 1217 out: 1218 ata_channel_unlock(chp); 1219 } 1220 1221 int 1222 ata_xfer_start(struct ata_xfer *xfer) 1223 { 1224 struct ata_channel *chp = xfer->c_chp; 1225 int rv; 1226 1227 KASSERT(mutex_owned(&chp->ch_lock)); 1228 1229 rv = xfer->ops->c_start(chp, xfer); 1230 switch (rv) { 1231 case ATASTART_STARTED: 1232 /* nothing to do */ 1233 break; 1234 case ATASTART_TH: 1235 /* postpone xfer to thread */ 1236 ata_thread_wake_locked(chp); 1237 break; 1238 case ATASTART_POLL: 1239 /* can happen even in thread context for some ATAPI devices */ 1240 ata_channel_unlock(chp); 1241 KASSERT(xfer->ops != NULL && xfer->ops->c_poll != NULL); 1242 xfer->ops->c_poll(chp, xfer); 1243 ata_channel_lock(chp); 1244 break; 1245 case ATASTART_ABORT: 1246 ata_channel_unlock(chp); 1247 KASSERT(xfer->ops != NULL && xfer->ops->c_abort != NULL); 1248 xfer->ops->c_abort(chp, xfer); 1249 ata_channel_lock(chp); 1250 break; 1251 } 1252 1253 return rv; 1254 } 1255 1256 static void 1257 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer) 1258 { 1259 struct ata_queue * const chq = chp->ch_queue; 1260 1261 KASSERT(mutex_owned(&chp->ch_lock)); 1262 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); 1263 1264 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1265 TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain); 1266 else { 1267 /* 1268 * Must go to head, so that ata_queue_get_active_xfer() 1269 * returns the recovery command, and not some other 1270 * random active transfer. 1271 */ 1272 TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain); 1273 } 1274 chq->active_xfers_used |= __BIT(xfer->c_slot); 1275 chq->queue_active++; 1276 } 1277 1278 /* 1279 * Does it's own locking, does not require splbio(). 1280 * flags - whether to block waiting for free xfer 1281 */ 1282 struct ata_xfer * 1283 ata_get_xfer(struct ata_channel *chp, bool waitok) 1284 { 1285 return pool_get(&ata_xfer_pool, 1286 PR_ZERO | (waitok ? PR_WAITOK : PR_NOWAIT)); 1287 } 1288 1289 /* 1290 * ata_deactivate_xfer() must be always called prior to ata_free_xfer() 1291 */ 1292 void 1293 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1294 { 1295 struct ata_queue *chq = chp->ch_queue; 1296 1297 ata_channel_lock(chp); 1298 1299 if (__predict_false(xfer->c_flags & (C_WAITACT|C_WAITTIMO))) { 1300 /* Someone is waiting for this xfer, so we can't free now */ 1301 xfer->c_flags |= C_FREE; 1302 cv_broadcast(&chq->c_active); 1303 ata_channel_unlock(chp); 1304 return; 1305 } 1306 1307 /* XXX move PIOBM and free_gw to deactivate? */ 1308 #if NATA_PIOBM /* XXX wdc dependent code */ 1309 if (__predict_false(xfer->c_flags & C_PIOBM)) { 1310 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1311 1312 /* finish the busmastering PIO */ 1313 (*wdc->piobm_done)(wdc->dma_arg, 1314 chp->ch_channel, xfer->c_drive); 1315 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT); 1316 } 1317 #endif 1318 1319 if (__predict_false(chp->ch_atac->atac_free_hw)) 1320 chp->ch_atac->atac_free_hw(chp); 1321 1322 ata_channel_unlock(chp); 1323 1324 if (__predict_true(!ISSET(xfer->c_flags, C_PRIVATE_ALLOC))) 1325 pool_put(&ata_xfer_pool, xfer); 1326 } 1327 1328 void 1329 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1330 { 1331 struct ata_queue * const chq = chp->ch_queue; 1332 1333 ata_channel_lock(chp); 1334 1335 KASSERT(chq->queue_active > 0); 1336 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0); 1337 1338 /* Stop only when this is last active xfer */ 1339 if (chq->queue_active == 1) 1340 callout_stop(&chp->c_timo_callout); 1341 1342 if (callout_invoking(&chp->c_timo_callout)) 1343 xfer->c_flags |= C_WAITTIMO; 1344 1345 TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain); 1346 chq->active_xfers_used &= ~__BIT(xfer->c_slot); 1347 chq->queue_active--; 1348 1349 ata_queue_free_slot(chp, xfer->c_slot); 1350 1351 if (xfer->c_flags & C_WAIT) 1352 cv_broadcast(&chq->c_cmd_finish); 1353 1354 ata_channel_unlock(chp); 1355 } 1356 1357 /* 1358 * Called in c_intr hook. Must be called before before any deactivations 1359 * are done - if there is drain pending, it calls c_kill_xfer hook which 1360 * deactivates the xfer. 1361 * Calls c_kill_xfer with channel lock free. 1362 * Returns true if caller should just exit without further processing. 1363 * Caller must not further access any part of xfer or any related controller 1364 * structures in that case, it should just return. 1365 */ 1366 bool 1367 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer) 1368 { 1369 int drive = xfer->c_drive; 1370 bool draining = false; 1371 1372 ata_channel_lock(chp); 1373 1374 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1375 ata_channel_unlock(chp); 1376 1377 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE); 1378 1379 ata_channel_lock(chp); 1380 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1381 cv_signal(&chp->ch_queue->queue_drain); 1382 draining = true; 1383 } 1384 1385 ata_channel_unlock(chp); 1386 1387 return draining; 1388 } 1389 1390 /* 1391 * Check for race of normal transfer handling vs. timeout. 1392 */ 1393 bool 1394 ata_timo_xfer_check(struct ata_xfer *xfer) 1395 { 1396 struct ata_channel *chp = xfer->c_chp; 1397 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1398 1399 ata_channel_lock(chp); 1400 1401 if (xfer->c_flags & C_WAITTIMO) { 1402 xfer->c_flags &= ~C_WAITTIMO; 1403 1404 /* Handle race vs. ata_free_xfer() */ 1405 if (xfer->c_flags & C_FREE) { 1406 xfer->c_flags &= ~C_FREE; 1407 ata_channel_unlock(chp); 1408 1409 device_printf(drvp->drv_softc, 1410 "xfer %"PRIxPTR" freed while invoking timeout\n", 1411 (intptr_t)xfer & PAGE_MASK); 1412 1413 ata_free_xfer(chp, xfer); 1414 return true; 1415 } 1416 1417 /* Race vs. callout_stop() in ata_deactivate_xfer() */ 1418 ata_channel_unlock(chp); 1419 1420 device_printf(drvp->drv_softc, 1421 "xfer %"PRIxPTR" deactivated while invoking timeout\n", 1422 (intptr_t)xfer & PAGE_MASK); 1423 return true; 1424 } 1425 1426 ata_channel_unlock(chp); 1427 1428 /* No race, proceed with timeout handling */ 1429 return false; 1430 } 1431 1432 /* 1433 * Kill off all active xfers for a ata_channel. 1434 * 1435 * Must be called with channel lock held. 1436 */ 1437 void 1438 ata_kill_active(struct ata_channel *chp, int reason, int flags) 1439 { 1440 struct ata_queue * const chq = chp->ch_queue; 1441 struct ata_xfer *xfer, *xfernext; 1442 1443 KASSERT(mutex_owned(&chp->ch_lock)); 1444 1445 TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) { 1446 ata_channel_unlock(chp); 1447 xfer->ops->c_kill_xfer(xfer->c_chp, xfer, reason); 1448 ata_channel_lock(chp); 1449 } 1450 } 1451 1452 /* 1453 * Kill off all pending xfers for a drive. 1454 */ 1455 void 1456 ata_kill_pending(struct ata_drive_datas *drvp) 1457 { 1458 struct ata_channel * const chp = drvp->chnl_softc; 1459 struct ata_queue * const chq = chp->ch_queue; 1460 struct ata_xfer *xfer; 1461 1462 ata_channel_lock(chp); 1463 1464 /* Kill all pending transfers */ 1465 while ((xfer = SIMPLEQ_FIRST(&chq->queue_xfer))) { 1466 KASSERT(xfer->c_chp == chp); 1467 1468 if (xfer->c_drive != drvp->drive) 1469 continue; 1470 1471 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain); 1472 1473 /* 1474 * Keep the lock, so that we get deadlock (and 'locking against 1475 * myself' with LOCKDEBUG), instead of silent 1476 * data corruption, if the hook tries to call back into 1477 * middle layer for inactive xfer. 1478 */ 1479 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE); 1480 } 1481 1482 /* Wait until all active transfers on the drive finish */ 1483 while (chq->queue_active > 0) { 1484 bool drv_active = false; 1485 1486 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { 1487 KASSERT(xfer->c_chp == chp); 1488 1489 if (xfer->c_drive == drvp->drive) { 1490 drv_active = true; 1491 break; 1492 } 1493 } 1494 1495 if (!drv_active) { 1496 /* all finished */ 1497 break; 1498 } 1499 1500 drvp->drive_flags |= ATA_DRIVE_WAITDRAIN; 1501 cv_wait(&chq->queue_drain, &chp->ch_lock); 1502 } 1503 1504 ata_channel_unlock(chp); 1505 } 1506 1507 static void 1508 ata_channel_freeze_locked(struct ata_channel *chp) 1509 { 1510 chp->ch_queue->queue_freeze++; 1511 1512 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1513 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1514 } 1515 1516 void 1517 ata_channel_freeze(struct ata_channel *chp) 1518 { 1519 ata_channel_lock(chp); 1520 ata_channel_freeze_locked(chp); 1521 ata_channel_unlock(chp); 1522 } 1523 1524 void 1525 ata_channel_thaw_locked(struct ata_channel *chp) 1526 { 1527 KASSERT(mutex_owned(&chp->ch_lock)); 1528 KASSERT(chp->ch_queue->queue_freeze > 0); 1529 1530 chp->ch_queue->queue_freeze--; 1531 1532 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1533 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1534 } 1535 1536 /* 1537 * ata_thread_run: 1538 * 1539 * Reset and ATA channel. Channel lock must be held. arg is type-specific. 1540 */ 1541 void 1542 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg) 1543 { 1544 struct atac_softc *atac = chp->ch_atac; 1545 bool threset = false; 1546 struct ata_drive_datas *drvp; 1547 1548 ata_channel_lock_owned(chp); 1549 1550 /* 1551 * If we can poll or wait it's OK, otherwise wake up the 1552 * kernel thread to do it for us. 1553 */ 1554 ATADEBUG_PRINT(("%s flags 0x%x ch_flags 0x%x\n", 1555 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS); 1556 if ((flags & (AT_POLL | AT_WAIT)) == 0) { 1557 switch (type) { 1558 case ATACH_TH_RESET: 1559 if (chp->ch_flags & ATACH_TH_RESET) { 1560 /* No need to schedule another reset */ 1561 return; 1562 } 1563 break; 1564 case ATACH_TH_DRIVE_RESET: 1565 { 1566 int drive = arg; 1567 1568 KASSERT(drive <= chp->ch_ndrives); 1569 drvp = &chp->ch_drive[drive]; 1570 1571 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 1572 /* No need to schedule another reset */ 1573 return; 1574 } 1575 drvp->drive_flags |= ATA_DRIVE_TH_RESET; 1576 break; 1577 } 1578 case ATACH_TH_RECOVERY: 1579 { 1580 uint32_t tfd = (uint32_t)arg; 1581 1582 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1583 chp->recovery_tfd = tfd; 1584 break; 1585 } 1586 default: 1587 panic("%s: unknown type: %x", __func__, type); 1588 /* NOTREACHED */ 1589 } 1590 1591 /* 1592 * Block execution of other commands while reset is scheduled 1593 * to a thread. 1594 */ 1595 ata_channel_freeze_locked(chp); 1596 chp->ch_flags |= type; 1597 1598 cv_signal(&chp->ch_thr_idle); 1599 return; 1600 } 1601 1602 /* Block execution of other commands during reset */ 1603 ata_channel_freeze_locked(chp); 1604 1605 /* 1606 * If reset has been scheduled to a thread, then clear 1607 * the flag now so that the thread won't try to execute it if 1608 * we happen to sleep, and thaw one more time after the reset. 1609 */ 1610 if (chp->ch_flags & type) { 1611 chp->ch_flags &= ~type; 1612 threset = true; 1613 } 1614 1615 switch (type) { 1616 case ATACH_TH_RESET: 1617 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags); 1618 1619 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1620 for (int drive = 0; drive < chp->ch_ndrives; drive++) 1621 chp->ch_drive[drive].state = 0; 1622 break; 1623 1624 case ATACH_TH_DRIVE_RESET: 1625 { 1626 int drive = arg; 1627 1628 KASSERT(drive <= chp->ch_ndrives); 1629 drvp = &chp->ch_drive[drive]; 1630 (*atac->atac_bustype_ata->ata_reset_drive)(drvp, flags, NULL); 1631 drvp->state = 0; 1632 break; 1633 } 1634 1635 case ATACH_TH_RECOVERY: 1636 { 1637 uint32_t tfd = (uint32_t)arg; 1638 1639 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1640 KASSERT(atac->atac_bustype_ata->ata_recovery != NULL); 1641 1642 SET(chp->ch_flags, ATACH_RECOVERING); 1643 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd); 1644 CLR(chp->ch_flags, ATACH_RECOVERING); 1645 break; 1646 } 1647 1648 default: 1649 panic("%s: unknown type: %x", __func__, type); 1650 /* NOTREACHED */ 1651 } 1652 1653 /* 1654 * Thaw one extra time to clear the freeze done when the reset has 1655 * been scheduled to the thread. 1656 */ 1657 if (threset) 1658 ata_channel_thaw_locked(chp); 1659 1660 /* Allow commands to run again */ 1661 ata_channel_thaw_locked(chp); 1662 1663 /* Signal the thread in case there is an xfer to run */ 1664 cv_signal(&chp->ch_thr_idle); 1665 } 1666 1667 int 1668 ata_addref(struct ata_channel *chp) 1669 { 1670 struct atac_softc *atac = chp->ch_atac; 1671 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1672 int s, error = 0; 1673 1674 s = splbio(); 1675 if (adapt->adapt_refcnt++ == 0 && 1676 adapt->adapt_enable != NULL) { 1677 error = (*adapt->adapt_enable)(atac->atac_dev, 1); 1678 if (error) 1679 adapt->adapt_refcnt--; 1680 } 1681 splx(s); 1682 return (error); 1683 } 1684 1685 void 1686 ata_delref(struct ata_channel *chp) 1687 { 1688 struct atac_softc *atac = chp->ch_atac; 1689 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1690 int s; 1691 1692 s = splbio(); 1693 if (adapt->adapt_refcnt-- == 1 && 1694 adapt->adapt_enable != NULL) 1695 (void) (*adapt->adapt_enable)(atac->atac_dev, 0); 1696 splx(s); 1697 } 1698 1699 void 1700 ata_print_modes(struct ata_channel *chp) 1701 { 1702 struct atac_softc *atac = chp->ch_atac; 1703 int drive; 1704 struct ata_drive_datas *drvp; 1705 1706 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1707 for (drive = 0; drive < chp->ch_ndrives; drive++) { 1708 drvp = &chp->ch_drive[drive]; 1709 if (drvp->drive_type == ATA_DRIVET_NONE || 1710 drvp->drv_softc == NULL) 1711 continue; 1712 aprint_verbose("%s(%s:%d:%d): using PIO mode %d", 1713 device_xname(drvp->drv_softc), 1714 device_xname(atac->atac_dev), 1715 chp->ch_channel, drvp->drive, drvp->PIO_mode); 1716 #if NATA_DMA 1717 if (drvp->drive_flags & ATA_DRIVE_DMA) 1718 aprint_verbose(", DMA mode %d", drvp->DMA_mode); 1719 #if NATA_UDMA 1720 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 1721 aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode); 1722 if (drvp->UDMA_mode == 2) 1723 aprint_verbose(" (Ultra/33)"); 1724 else if (drvp->UDMA_mode == 4) 1725 aprint_verbose(" (Ultra/66)"); 1726 else if (drvp->UDMA_mode == 5) 1727 aprint_verbose(" (Ultra/100)"); 1728 else if (drvp->UDMA_mode == 6) 1729 aprint_verbose(" (Ultra/133)"); 1730 } 1731 #endif /* NATA_UDMA */ 1732 #endif /* NATA_DMA */ 1733 #if NATA_DMA || NATA_PIOBM 1734 if (0 1735 #if NATA_DMA 1736 || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) 1737 #endif 1738 #if NATA_PIOBM 1739 /* PIOBM capable controllers use DMA for PIO commands */ 1740 || (atac->atac_cap & ATAC_CAP_PIOBM) 1741 #endif 1742 ) 1743 aprint_verbose(" (using DMA)"); 1744 1745 if (drvp->drive_flags & ATA_DRIVE_NCQ) { 1746 aprint_verbose(", NCQ (%d tags)%s", 1747 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings), 1748 (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO) 1749 ? " w/PRIO" : ""); 1750 } else if (drvp->drive_flags & ATA_DRIVE_WFUA) 1751 aprint_verbose(", WRITE DMA FUA EXT"); 1752 1753 #endif /* NATA_DMA || NATA_PIOBM */ 1754 aprint_verbose("\n"); 1755 } 1756 } 1757 1758 #if NATA_DMA 1759 /* 1760 * downgrade the transfer mode of a drive after an error. return 1 if 1761 * downgrade was possible, 0 otherwise. 1762 * 1763 * MUST BE CALLED AT splbio()! 1764 */ 1765 int 1766 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags) 1767 { 1768 struct ata_channel *chp = drvp->chnl_softc; 1769 struct atac_softc *atac = chp->ch_atac; 1770 device_t drv_dev = drvp->drv_softc; 1771 int cf_flags = device_cfdata(drv_dev)->cf_flags; 1772 1773 ata_channel_lock_owned(drvp->chnl_softc); 1774 1775 /* if drive or controller don't know its mode, we can't do much */ 1776 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 || 1777 (atac->atac_set_modes == NULL)) 1778 return 0; 1779 /* current drive mode was set by a config flag, let it this way */ 1780 if ((cf_flags & ATA_CONFIG_PIO_SET) || 1781 (cf_flags & ATA_CONFIG_DMA_SET) || 1782 (cf_flags & ATA_CONFIG_UDMA_SET)) 1783 return 0; 1784 1785 #if NATA_UDMA 1786 /* 1787 * If we were using Ultra-DMA mode, downgrade to the next lower mode. 1788 */ 1789 if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) { 1790 drvp->UDMA_mode--; 1791 aprint_error_dev(drv_dev, 1792 "transfer error, downgrading to Ultra-DMA mode %d\n", 1793 drvp->UDMA_mode); 1794 } 1795 #endif 1796 1797 /* 1798 * If we were using ultra-DMA, don't downgrade to multiword DMA. 1799 */ 1800 else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { 1801 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 1802 drvp->PIO_mode = drvp->PIO_cap; 1803 aprint_error_dev(drv_dev, 1804 "transfer error, downgrading to PIO mode %d\n", 1805 drvp->PIO_mode); 1806 } else /* already using PIO, can't downgrade */ 1807 return 0; 1808 1809 (*atac->atac_set_modes)(chp); 1810 ata_print_modes(chp); 1811 /* reset the channel, which will schedule all drives for setup */ 1812 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE); 1813 return 1; 1814 } 1815 #endif /* NATA_DMA */ 1816 1817 /* 1818 * Probe drive's capabilities, for use by the controller later 1819 * Assumes drvp points to an existing drive. 1820 */ 1821 void 1822 ata_probe_caps(struct ata_drive_datas *drvp) 1823 { 1824 struct ataparams params, params2; 1825 struct ata_channel *chp = drvp->chnl_softc; 1826 struct atac_softc *atac = chp->ch_atac; 1827 device_t drv_dev = drvp->drv_softc; 1828 int i, printed = 0; 1829 const char *sep = ""; 1830 int cf_flags; 1831 1832 if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) { 1833 /* IDENTIFY failed. Can't tell more about the device */ 1834 return; 1835 } 1836 if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) == 1837 (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) { 1838 /* 1839 * Controller claims 16 and 32 bit transfers. 1840 * Re-do an IDENTIFY with 32-bit transfers, 1841 * and compare results. 1842 */ 1843 ata_channel_lock(chp); 1844 drvp->drive_flags |= ATA_DRIVE_CAP32; 1845 ata_channel_unlock(chp); 1846 ata_get_params(drvp, AT_WAIT, ¶ms2); 1847 if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) { 1848 /* Not good. fall back to 16bits */ 1849 ata_channel_lock(chp); 1850 drvp->drive_flags &= ~ATA_DRIVE_CAP32; 1851 ata_channel_unlock(chp); 1852 } else { 1853 aprint_verbose_dev(drv_dev, "32-bit data port\n"); 1854 } 1855 } 1856 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */ 1857 if (params.atap_ata_major > 0x01 && 1858 params.atap_ata_major != 0xffff) { 1859 for (i = 14; i > 0; i--) { 1860 if (params.atap_ata_major & (1 << i)) { 1861 aprint_verbose_dev(drv_dev, 1862 "ATA version %d\n", i); 1863 drvp->ata_vers = i; 1864 break; 1865 } 1866 } 1867 } 1868 #endif 1869 1870 /* An ATAPI device is at last PIO mode 3 */ 1871 if (drvp->drive_type == ATA_DRIVET_ATAPI) 1872 drvp->PIO_mode = 3; 1873 1874 /* 1875 * It's not in the specs, but it seems that some drive 1876 * returns 0xffff in atap_extensions when this field is invalid 1877 */ 1878 if (params.atap_extensions != 0xffff && 1879 (params.atap_extensions & WDC_EXT_MODES)) { 1880 /* 1881 * XXX some drives report something wrong here (they claim to 1882 * support PIO mode 8 !). As mode is coded on 3 bits in 1883 * SET FEATURE, limit it to 7 (so limit i to 4). 1884 * If higher mode than 7 is found, abort. 1885 */ 1886 for (i = 7; i >= 0; i--) { 1887 if ((params.atap_piomode_supp & (1 << i)) == 0) 1888 continue; 1889 if (i > 4) 1890 return; 1891 /* 1892 * See if mode is accepted. 1893 * If the controller can't set its PIO mode, 1894 * assume the defaults are good, so don't try 1895 * to set it 1896 */ 1897 if (atac->atac_set_modes) 1898 /* 1899 * It's OK to pool here, it's fast enough 1900 * to not bother waiting for interrupt 1901 */ 1902 if (ata_set_mode(drvp, 0x08 | (i + 3), 1903 AT_WAIT) != CMD_OK) 1904 continue; 1905 if (!printed) { 1906 aprint_verbose_dev(drv_dev, 1907 "drive supports PIO mode %d", i + 3); 1908 sep = ","; 1909 printed = 1; 1910 } 1911 /* 1912 * If controller's driver can't set its PIO mode, 1913 * get the highter one for the drive. 1914 */ 1915 if (atac->atac_set_modes == NULL || 1916 atac->atac_pio_cap >= i + 3) { 1917 drvp->PIO_mode = i + 3; 1918 drvp->PIO_cap = i + 3; 1919 break; 1920 } 1921 } 1922 if (!printed) { 1923 /* 1924 * We didn't find a valid PIO mode. 1925 * Assume the values returned for DMA are buggy too 1926 */ 1927 return; 1928 } 1929 ata_channel_lock(chp); 1930 drvp->drive_flags |= ATA_DRIVE_MODE; 1931 ata_channel_unlock(chp); 1932 printed = 0; 1933 for (i = 7; i >= 0; i--) { 1934 if ((params.atap_dmamode_supp & (1 << i)) == 0) 1935 continue; 1936 #if NATA_DMA 1937 if ((atac->atac_cap & ATAC_CAP_DMA) && 1938 atac->atac_set_modes != NULL) 1939 if (ata_set_mode(drvp, 0x20 | i, AT_WAIT) 1940 != CMD_OK) 1941 continue; 1942 #endif 1943 if (!printed) { 1944 aprint_verbose("%s DMA mode %d", sep, i); 1945 sep = ","; 1946 printed = 1; 1947 } 1948 #if NATA_DMA 1949 if (atac->atac_cap & ATAC_CAP_DMA) { 1950 if (atac->atac_set_modes != NULL && 1951 atac->atac_dma_cap < i) 1952 continue; 1953 drvp->DMA_mode = i; 1954 drvp->DMA_cap = i; 1955 ata_channel_lock(chp); 1956 drvp->drive_flags |= ATA_DRIVE_DMA; 1957 ata_channel_unlock(chp); 1958 } 1959 #endif 1960 break; 1961 } 1962 if (params.atap_extensions & WDC_EXT_UDMA_MODES) { 1963 printed = 0; 1964 for (i = 7; i >= 0; i--) { 1965 if ((params.atap_udmamode_supp & (1 << i)) 1966 == 0) 1967 continue; 1968 #if NATA_UDMA 1969 if (atac->atac_set_modes != NULL && 1970 (atac->atac_cap & ATAC_CAP_UDMA)) 1971 if (ata_set_mode(drvp, 0x40 | i, 1972 AT_WAIT) != CMD_OK) 1973 continue; 1974 #endif 1975 if (!printed) { 1976 aprint_verbose("%s Ultra-DMA mode %d", 1977 sep, i); 1978 if (i == 2) 1979 aprint_verbose(" (Ultra/33)"); 1980 else if (i == 4) 1981 aprint_verbose(" (Ultra/66)"); 1982 else if (i == 5) 1983 aprint_verbose(" (Ultra/100)"); 1984 else if (i == 6) 1985 aprint_verbose(" (Ultra/133)"); 1986 sep = ","; 1987 printed = 1; 1988 } 1989 #if NATA_UDMA 1990 if (atac->atac_cap & ATAC_CAP_UDMA) { 1991 if (atac->atac_set_modes != NULL && 1992 atac->atac_udma_cap < i) 1993 continue; 1994 drvp->UDMA_mode = i; 1995 drvp->UDMA_cap = i; 1996 ata_channel_lock(chp); 1997 drvp->drive_flags |= ATA_DRIVE_UDMA; 1998 ata_channel_unlock(chp); 1999 } 2000 #endif 2001 break; 2002 } 2003 } 2004 } 2005 2006 ata_channel_lock(chp); 2007 drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM; 2008 if (drvp->drive_type == ATA_DRIVET_ATAPI) { 2009 if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM) 2010 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2011 } else { 2012 if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM) 2013 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2014 } 2015 ata_channel_unlock(chp); 2016 2017 /* Try to guess ATA version here, if it didn't get reported */ 2018 if (drvp->ata_vers == 0) { 2019 #if NATA_UDMA 2020 if (drvp->drive_flags & ATA_DRIVE_UDMA) 2021 drvp->ata_vers = 4; /* should be at last ATA-4 */ 2022 else 2023 #endif 2024 if (drvp->PIO_cap > 2) 2025 drvp->ata_vers = 2; /* should be at last ATA-2 */ 2026 } 2027 cf_flags = device_cfdata(drv_dev)->cf_flags; 2028 if (cf_flags & ATA_CONFIG_PIO_SET) { 2029 ata_channel_lock(chp); 2030 drvp->PIO_mode = 2031 (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF; 2032 drvp->drive_flags |= ATA_DRIVE_MODE; 2033 ata_channel_unlock(chp); 2034 } 2035 #if NATA_DMA 2036 if ((atac->atac_cap & ATAC_CAP_DMA) == 0) { 2037 /* don't care about DMA modes */ 2038 if (*sep != '\0') 2039 aprint_verbose("\n"); 2040 return; 2041 } 2042 if (cf_flags & ATA_CONFIG_DMA_SET) { 2043 ata_channel_lock(chp); 2044 if ((cf_flags & ATA_CONFIG_DMA_MODES) == 2045 ATA_CONFIG_DMA_DISABLE) { 2046 drvp->drive_flags &= ~ATA_DRIVE_DMA; 2047 } else { 2048 drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >> 2049 ATA_CONFIG_DMA_OFF; 2050 drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE; 2051 } 2052 ata_channel_unlock(chp); 2053 } 2054 2055 /* 2056 * Probe WRITE DMA FUA EXT. Support is mandatory for devices 2057 * supporting LBA48, but nevertheless confirm with the feature flag. 2058 */ 2059 if (drvp->drive_flags & ATA_DRIVE_DMA) { 2060 if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 2061 && (params.atap_cmd_def & ATA_CMDE_WFE)) { 2062 drvp->drive_flags |= ATA_DRIVE_WFUA; 2063 aprint_verbose("%s WRITE DMA FUA", sep); 2064 sep = ","; 2065 } 2066 } 2067 2068 /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */ 2069 ata_channel_lock(chp); 2070 drvp->drv_openings = 1; 2071 if (params.atap_sata_caps & SATA_NATIVE_CMDQ) { 2072 if (atac->atac_cap & ATAC_CAP_NCQ) 2073 drvp->drive_flags |= ATA_DRIVE_NCQ; 2074 drvp->drv_openings = 2075 (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1; 2076 aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings); 2077 sep = ","; 2078 2079 if (params.atap_sata_caps & SATA_NCQ_PRIO) { 2080 drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO; 2081 aprint_verbose(" w/PRIO"); 2082 } 2083 } 2084 ata_channel_unlock(chp); 2085 2086 if (*sep != '\0') 2087 aprint_verbose("\n"); 2088 2089 #if NATA_UDMA 2090 if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) { 2091 /* don't care about UDMA modes */ 2092 return; 2093 } 2094 if (cf_flags & ATA_CONFIG_UDMA_SET) { 2095 ata_channel_lock(chp); 2096 if ((cf_flags & ATA_CONFIG_UDMA_MODES) == 2097 ATA_CONFIG_UDMA_DISABLE) { 2098 drvp->drive_flags &= ~ATA_DRIVE_UDMA; 2099 } else { 2100 drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >> 2101 ATA_CONFIG_UDMA_OFF; 2102 drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE; 2103 } 2104 ata_channel_unlock(chp); 2105 } 2106 #endif /* NATA_UDMA */ 2107 #endif /* NATA_DMA */ 2108 } 2109 2110 /* management of the /dev/atabus* devices */ 2111 int 2112 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l) 2113 { 2114 struct atabus_softc *sc; 2115 int error; 2116 2117 sc = device_lookup_private(&atabus_cd, minor(dev)); 2118 if (sc == NULL) 2119 return (ENXIO); 2120 2121 if (sc->sc_flags & ATABUSCF_OPEN) 2122 return (EBUSY); 2123 2124 if ((error = ata_addref(sc->sc_chan)) != 0) 2125 return (error); 2126 2127 sc->sc_flags |= ATABUSCF_OPEN; 2128 2129 return (0); 2130 } 2131 2132 2133 int 2134 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l) 2135 { 2136 struct atabus_softc *sc = 2137 device_lookup_private(&atabus_cd, minor(dev)); 2138 2139 ata_delref(sc->sc_chan); 2140 2141 sc->sc_flags &= ~ATABUSCF_OPEN; 2142 2143 return (0); 2144 } 2145 2146 int 2147 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 2148 { 2149 struct atabus_softc *sc = 2150 device_lookup_private(&atabus_cd, minor(dev)); 2151 struct ata_channel *chp = sc->sc_chan; 2152 int min_drive, max_drive, drive; 2153 int error; 2154 2155 /* 2156 * Enforce write permission for ioctls that change the 2157 * state of the bus. Host adapter specific ioctls must 2158 * be checked by the adapter driver. 2159 */ 2160 switch (cmd) { 2161 case ATABUSIOSCAN: 2162 case ATABUSIODETACH: 2163 case ATABUSIORESET: 2164 if ((flag & FWRITE) == 0) 2165 return (EBADF); 2166 } 2167 2168 switch (cmd) { 2169 case ATABUSIORESET: 2170 ata_channel_lock(chp); 2171 ata_thread_run(sc->sc_chan, AT_WAIT | AT_POLL, 2172 ATACH_TH_RESET, ATACH_NODRIVE); 2173 ata_channel_unlock(chp); 2174 return 0; 2175 case ATABUSIOSCAN: 2176 { 2177 #if 0 2178 struct atabusioscan_args *a= 2179 (struct atabusioscan_args *)addr; 2180 #endif 2181 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2182 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2183 return (EOPNOTSUPP); 2184 return (EOPNOTSUPP); 2185 } 2186 case ATABUSIODETACH: 2187 { 2188 struct atabusiodetach_args *a= 2189 (struct atabusiodetach_args *)addr; 2190 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2191 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2192 return (EOPNOTSUPP); 2193 switch (a->at_dev) { 2194 case -1: 2195 min_drive = 0; 2196 max_drive = 1; 2197 break; 2198 case 0: 2199 case 1: 2200 min_drive = max_drive = a->at_dev; 2201 break; 2202 default: 2203 return (EINVAL); 2204 } 2205 for (drive = min_drive; drive <= max_drive; drive++) { 2206 if (chp->ch_drive[drive].drv_softc != NULL) { 2207 error = config_detach( 2208 chp->ch_drive[drive].drv_softc, 0); 2209 if (error) 2210 return (error); 2211 KASSERT(chp->ch_drive[drive].drv_softc == NULL); 2212 } 2213 } 2214 return 0; 2215 } 2216 default: 2217 return ENOTTY; 2218 } 2219 } 2220 2221 static bool 2222 atabus_suspend(device_t dv, const pmf_qual_t *qual) 2223 { 2224 struct atabus_softc *sc = device_private(dv); 2225 struct ata_channel *chp = sc->sc_chan; 2226 2227 ata_channel_idle(chp); 2228 2229 return true; 2230 } 2231 2232 static bool 2233 atabus_resume(device_t dv, const pmf_qual_t *qual) 2234 { 2235 struct atabus_softc *sc = device_private(dv); 2236 struct ata_channel *chp = sc->sc_chan; 2237 2238 /* 2239 * XXX joerg: with wdc, the first channel unfreezes the controller. 2240 * Move this the reset and queue idling into wdc. 2241 */ 2242 ata_channel_lock(chp); 2243 if (chp->ch_queue->queue_freeze == 0) { 2244 ata_channel_unlock(chp); 2245 goto out; 2246 } 2247 2248 /* unfreeze the queue and reset drives */ 2249 ata_channel_thaw_locked(chp); 2250 2251 /* reset channel only if there are drives attached */ 2252 if (chp->ch_ndrives > 0) 2253 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE); 2254 2255 ata_channel_unlock(chp); 2256 2257 out: 2258 return true; 2259 } 2260 2261 static int 2262 atabus_rescan(device_t self, const char *ifattr, const int *locators) 2263 { 2264 struct atabus_softc *sc = device_private(self); 2265 struct ata_channel *chp = sc->sc_chan; 2266 struct atabus_initq *initq; 2267 int i; 2268 2269 /* 2270 * we can rescan a port multiplier atabus, even if some devices are 2271 * still attached 2272 */ 2273 if (chp->ch_satapmp_nports == 0) { 2274 if (chp->atapibus != NULL) { 2275 return EBUSY; 2276 } 2277 2278 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 2279 for (i = 0; i < chp->ch_ndrives; i++) { 2280 if (chp->ch_drive[i].drv_softc != NULL) { 2281 return EBUSY; 2282 } 2283 } 2284 } 2285 2286 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 2287 initq->atabus_sc = sc; 2288 mutex_enter(&atabus_qlock); 2289 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 2290 mutex_exit(&atabus_qlock); 2291 config_pending_incr(sc->sc_dev); 2292 2293 ata_channel_lock(chp); 2294 chp->ch_flags |= ATACH_TH_RESCAN; 2295 cv_signal(&chp->ch_thr_idle); 2296 ata_channel_unlock(chp); 2297 2298 return 0; 2299 } 2300 2301 void 2302 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags) 2303 { 2304 KASSERT(mutex_owned(&chp->ch_lock)); 2305 2306 if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) { 2307 /* 2308 * can't use kpause(), we may be in interrupt context 2309 * or taking a crash dump 2310 */ 2311 delay(ms * 1000); 2312 } else { 2313 int pause = mstohz(ms); 2314 2315 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock); 2316 } 2317 } 2318 2319 void 2320 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count, 2321 uint16_t *features, uint8_t *device) 2322 { 2323 if ((xfer->c_flags & C_NCQ) == 0) { 2324 /* FUA handling for non-NCQ drives */ 2325 if (xfer->c_bio.flags & ATA_FUA 2326 && *cmd == WDCC_WRITEDMA_EXT) 2327 *cmd = WDCC_WRITEDMA_FUA_EXT; 2328 2329 return; 2330 } 2331 2332 *cmd = (xfer->c_bio.flags & ATA_READ) ? 2333 WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED; 2334 2335 /* for FPDMA the block count is in features */ 2336 *features = *count; 2337 2338 /* NCQ tag */ 2339 *count = (xfer->c_slot << 3); 2340 2341 if (xfer->c_bio.flags & ATA_PRIO_HIGH) 2342 *count |= WDSC_PRIO_HIGH; 2343 2344 /* other device flags */ 2345 if (xfer->c_bio.flags & ATA_FUA) 2346 *device |= WDSD_FUA; 2347 } 2348 2349 void 2350 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer) 2351 { 2352 struct ata_queue *chq = chp->ch_queue; 2353 struct ata_command *ata_c = &xfer->c_ata_c; 2354 2355 ata_channel_lock(chp); 2356 2357 while ((ata_c->flags & AT_DONE) == 0) 2358 cv_wait(&chq->c_cmd_finish, &chp->ch_lock); 2359 2360 ata_channel_unlock(chp); 2361 2362 KASSERT((ata_c->flags & AT_DONE) != 0); 2363 } 2364