1 /* $NetBSD: ata.c,v 1.164 2021/10/05 08:01:05 rin Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.164 2021/10/05 08:01:05 rin Exp $"); 29 30 #include "opt_ata.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/device.h> 36 #include <sys/conf.h> 37 #include <sys/fcntl.h> 38 #include <sys/proc.h> 39 #include <sys/kthread.h> 40 #include <sys/errno.h> 41 #include <sys/ataio.h> 42 #include <sys/kmem.h> 43 #include <sys/intr.h> 44 #include <sys/bus.h> 45 #include <sys/once.h> 46 #include <sys/bitops.h> 47 #include <sys/cpu.h> 48 49 #define ATABUS_PRIVATE 50 51 #include <dev/ata/ataconf.h> 52 #include <dev/ata/atareg.h> 53 #include <dev/ata/atavar.h> 54 #include <dev/ic/wdcvar.h> /* for PIOBM */ 55 56 #include "ioconf.h" 57 #include "locators.h" 58 59 #include "atapibus.h" 60 #include "ataraid.h" 61 #include "sata_pmp.h" 62 63 #if NATARAID > 0 64 #include <dev/ata/ata_raidvar.h> 65 #endif 66 #if NSATA_PMP > 0 67 #include <dev/ata/satapmpvar.h> 68 #endif 69 #include <dev/ata/satapmpreg.h> 70 71 #define DEBUG_FUNCS 0x08 72 #define DEBUG_PROBE 0x10 73 #define DEBUG_DETACH 0x20 74 #define DEBUG_XFERS 0x40 75 #ifdef ATADEBUG 76 #ifndef ATADEBUG_MASK 77 #define ATADEBUG_MASK 0 78 #endif 79 int atadebug_mask = ATADEBUG_MASK; 80 #define ATADEBUG_PRINT(args, level) \ 81 if (atadebug_mask & (level)) \ 82 printf args 83 #else 84 #define ATADEBUG_PRINT(args, level) 85 #endif 86 87 #if defined(ATA_DOWNGRADE_MODE) && NATA_DMA 88 static int ata_downgrade_mode(struct ata_drive_datas *, int); 89 #endif 90 91 static ONCE_DECL(ata_init_ctrl); 92 static struct pool ata_xfer_pool; 93 94 /* 95 * A queue of atabus instances, used to ensure the same bus probe order 96 * for a given hardware configuration at each boot. Kthread probing 97 * devices on a atabus. Only one probing at once. 98 */ 99 static TAILQ_HEAD(, atabus_initq) atabus_initq_head; 100 static kmutex_t atabus_qlock; 101 static kcondvar_t atabus_qcv; 102 static lwp_t * atabus_cfg_lwp; 103 104 /***************************************************************************** 105 * ATA bus layer. 106 * 107 * ATA controllers attach an atabus instance, which handles probing the bus 108 * for drives, etc. 109 *****************************************************************************/ 110 111 dev_type_open(atabusopen); 112 dev_type_close(atabusclose); 113 dev_type_ioctl(atabusioctl); 114 115 const struct cdevsw atabus_cdevsw = { 116 .d_open = atabusopen, 117 .d_close = atabusclose, 118 .d_read = noread, 119 .d_write = nowrite, 120 .d_ioctl = atabusioctl, 121 .d_stop = nostop, 122 .d_tty = notty, 123 .d_poll = nopoll, 124 .d_mmap = nommap, 125 .d_kqfilter = nokqfilter, 126 .d_discard = nodiscard, 127 .d_flag = D_OTHER 128 }; 129 130 static void atabus_childdetached(device_t, device_t); 131 static int atabus_rescan(device_t, const char *, const int *); 132 static bool atabus_resume(device_t, const pmf_qual_t *); 133 static bool atabus_suspend(device_t, const pmf_qual_t *); 134 static void atabusconfig_thread(void *); 135 136 static void ata_channel_idle(struct ata_channel *); 137 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *); 138 static void ata_channel_freeze_locked(struct ata_channel *); 139 static void ata_thread_wake_locked(struct ata_channel *); 140 141 /* 142 * atabus_init: 143 * 144 * Initialize ATA subsystem structures. 145 */ 146 static int 147 atabus_init(void) 148 { 149 150 pool_init(&ata_xfer_pool, sizeof(struct ata_xfer), 0, 0, 0, 151 "ataspl", NULL, IPL_BIO); 152 TAILQ_INIT(&atabus_initq_head); 153 mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE); 154 cv_init(&atabus_qcv, "atainitq"); 155 return 0; 156 } 157 158 /* 159 * atabusprint: 160 * 161 * Autoconfiguration print routine used by ATA controllers when 162 * attaching an atabus instance. 163 */ 164 int 165 atabusprint(void *aux, const char *pnp) 166 { 167 struct ata_channel *chan = aux; 168 169 if (pnp) 170 aprint_normal("atabus at %s", pnp); 171 aprint_normal(" channel %d", chan->ch_channel); 172 173 return (UNCONF); 174 } 175 176 /* 177 * ataprint: 178 * 179 * Autoconfiguration print routine. 180 */ 181 int 182 ataprint(void *aux, const char *pnp) 183 { 184 struct ata_device *adev = aux; 185 186 if (pnp) 187 aprint_normal("wd at %s", pnp); 188 aprint_normal(" drive %d", adev->adev_drv_data->drive); 189 190 return (UNCONF); 191 } 192 193 /* 194 * ata_channel_attach: 195 * 196 * Common parts of attaching an atabus to an ATA controller channel. 197 */ 198 void 199 ata_channel_attach(struct ata_channel *chp) 200 { 201 if (chp->ch_flags & ATACH_DISABLED) 202 return; 203 204 ata_channel_init(chp); 205 206 KASSERT(chp->ch_queue != NULL); 207 208 chp->atabus = config_found(chp->ch_atac->atac_dev, chp, atabusprint, 209 CFARGS(.iattr = "ata")); 210 } 211 212 /* 213 * ata_channel_detach: 214 * 215 * Common parts of detaching an atabus to an ATA controller channel. 216 */ 217 void 218 ata_channel_detach(struct ata_channel *chp) 219 { 220 if (chp->ch_flags & ATACH_DISABLED) 221 return; 222 223 ata_channel_destroy(chp); 224 225 chp->ch_flags |= ATACH_DETACHED; 226 } 227 228 static void 229 atabusconfig(struct atabus_softc *atabus_sc) 230 { 231 struct ata_channel *chp = atabus_sc->sc_chan; 232 struct atac_softc *atac = chp->ch_atac; 233 struct atabus_initq *atabus_initq = NULL; 234 int i, error; 235 236 /* we are in the atabus's thread context */ 237 238 /* 239 * Probe for the drives attached to controller, unless a PMP 240 * is already known 241 */ 242 /* XXX for SATA devices we will power up all drives at once */ 243 if (chp->ch_satapmp_nports == 0) 244 (*atac->atac_probe)(chp); 245 246 if (chp->ch_ndrives >= 2) { 247 ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n", 248 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type), 249 DEBUG_PROBE); 250 } 251 252 /* Make sure the devices probe in atabus order to avoid jitter. */ 253 mutex_enter(&atabus_qlock); 254 for (;;) { 255 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 256 if (atabus_initq->atabus_sc == atabus_sc) 257 break; 258 cv_wait(&atabus_qcv, &atabus_qlock); 259 } 260 mutex_exit(&atabus_qlock); 261 262 ata_channel_lock(chp); 263 264 KASSERT(ata_is_thread_run(chp)); 265 266 /* If no drives, abort here */ 267 if (chp->ch_drive == NULL) 268 goto out; 269 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 270 for (i = 0; i < chp->ch_ndrives; i++) 271 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) 272 break; 273 if (i == chp->ch_ndrives) 274 goto out; 275 276 /* Shortcut in case we've been shutdown */ 277 if (chp->ch_flags & ATACH_SHUTDOWN) 278 goto out; 279 280 ata_channel_unlock(chp); 281 282 if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread, 283 atabus_sc, &atabus_cfg_lwp, 284 "%scnf", device_xname(atac->atac_dev))) != 0) 285 aprint_error_dev(atac->atac_dev, 286 "unable to create config thread: error %d\n", error); 287 return; 288 289 out: 290 ata_channel_unlock(chp); 291 292 mutex_enter(&atabus_qlock); 293 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 294 cv_broadcast(&atabus_qcv); 295 mutex_exit(&atabus_qlock); 296 297 kmem_free(atabus_initq, sizeof(*atabus_initq)); 298 299 ata_delref(chp); 300 301 config_pending_decr(atabus_sc->sc_dev); 302 } 303 304 /* 305 * atabus_configthread: finish attach of atabus's childrens, in a separate 306 * kernel thread. 307 */ 308 static void 309 atabusconfig_thread(void *arg) 310 { 311 struct atabus_softc *atabus_sc = arg; 312 struct ata_channel *chp = atabus_sc->sc_chan; 313 struct atac_softc *atac = chp->ch_atac; 314 struct atabus_initq *atabus_initq = NULL; 315 int i, s; 316 317 /* XXX seems wrong */ 318 mutex_enter(&atabus_qlock); 319 atabus_initq = TAILQ_FIRST(&atabus_initq_head); 320 KASSERT(atabus_initq->atabus_sc == atabus_sc); 321 mutex_exit(&atabus_qlock); 322 323 /* 324 * First look for a port multiplier 325 */ 326 if (chp->ch_ndrives == PMP_MAX_DRIVES && 327 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { 328 #if NSATA_PMP > 0 329 satapmp_attach(chp); 330 #else 331 aprint_error_dev(atabus_sc->sc_dev, 332 "SATA port multiplier not supported\n"); 333 /* no problems going on, all drives are ATA_DRIVET_NONE */ 334 #endif 335 } 336 337 /* 338 * Attach an ATAPI bus, if needed. 339 */ 340 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 341 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) { 342 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) { 343 #if NATAPIBUS > 0 344 (*atac->atac_atapibus_attach)(atabus_sc); 345 #else 346 /* 347 * Fake the autoconfig "not configured" message 348 */ 349 aprint_normal("atapibus at %s not configured\n", 350 device_xname(atac->atac_dev)); 351 chp->atapibus = NULL; 352 s = splbio(); 353 for (i = 0; i < chp->ch_ndrives; i++) { 354 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 355 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 356 } 357 splx(s); 358 #endif 359 break; 360 } 361 } 362 363 for (i = 0; i < chp->ch_ndrives; i++) { 364 struct ata_device adev; 365 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA && 366 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) { 367 continue; 368 } 369 if (chp->ch_drive[i].drv_softc != NULL) 370 continue; 371 memset(&adev, 0, sizeof(struct ata_device)); 372 adev.adev_bustype = atac->atac_bustype_ata; 373 adev.adev_channel = chp->ch_channel; 374 adev.adev_drv_data = &chp->ch_drive[i]; 375 chp->ch_drive[i].drv_softc = config_found(atabus_sc->sc_dev, 376 &adev, ataprint, 377 CFARGS(.iattr = "ata_hl")); 378 if (chp->ch_drive[i].drv_softc != NULL) { 379 ata_probe_caps(&chp->ch_drive[i]); 380 } else { 381 s = splbio(); 382 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 383 splx(s); 384 } 385 } 386 387 /* now that we know the drives, the controller can set its modes */ 388 if (atac->atac_set_modes) { 389 (*atac->atac_set_modes)(chp); 390 ata_print_modes(chp); 391 } 392 #if NATARAID > 0 393 if (atac->atac_cap & ATAC_CAP_RAID) { 394 for (i = 0; i < chp->ch_ndrives; i++) { 395 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) { 396 ata_raid_check_component( 397 chp->ch_drive[i].drv_softc); 398 } 399 } 400 } 401 #endif /* NATARAID > 0 */ 402 403 /* 404 * reset drive_flags for unattached devices, reset state for attached 405 * ones 406 */ 407 s = splbio(); 408 for (i = 0; i < chp->ch_ndrives; i++) { 409 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 410 continue; 411 if (chp->ch_drive[i].drv_softc == NULL) { 412 chp->ch_drive[i].drive_flags = 0; 413 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 414 } else 415 chp->ch_drive[i].state = 0; 416 } 417 splx(s); 418 419 mutex_enter(&atabus_qlock); 420 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); 421 cv_broadcast(&atabus_qcv); 422 mutex_exit(&atabus_qlock); 423 424 kmem_free(atabus_initq, sizeof(*atabus_initq)); 425 426 ata_delref(chp); 427 428 config_pending_decr(atabus_sc->sc_dev); 429 kthread_exit(0); 430 } 431 432 /* 433 * atabus_thread: 434 * 435 * Worker thread for the ATA bus. 436 */ 437 static void 438 atabus_thread(void *arg) 439 { 440 struct atabus_softc *sc = arg; 441 struct ata_channel *chp = sc->sc_chan; 442 struct ata_queue *chq = chp->ch_queue; 443 struct ata_xfer *xfer; 444 int i, rv; 445 446 ata_channel_lock(chp); 447 KASSERT(ata_is_thread_run(chp)); 448 449 /* 450 * Probe the drives. Reset type to indicate to controllers 451 * that can re-probe that all drives must be probed.. 452 * 453 * Note: ch_ndrives may be changed during the probe. 454 */ 455 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 456 for (i = 0; i < chp->ch_ndrives; i++) { 457 chp->ch_drive[i].drive_flags = 0; 458 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 459 } 460 ata_channel_unlock(chp); 461 462 atabusconfig(sc); 463 464 ata_channel_lock(chp); 465 for (;;) { 466 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET 467 | ATACH_TH_RECOVERY | ATACH_SHUTDOWN)) == 0 && 468 (chq->queue_active == 0 || chq->queue_freeze == 0)) { 469 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 470 } 471 if (chp->ch_flags & ATACH_SHUTDOWN) { 472 break; 473 } 474 if (chp->ch_flags & ATACH_TH_RESCAN) { 475 chp->ch_flags &= ~ATACH_TH_RESCAN; 476 ata_channel_unlock(chp); 477 atabusconfig(sc); 478 ata_channel_lock(chp); 479 } 480 if (chp->ch_flags & ATACH_TH_RESET) { 481 /* this will unfreeze the channel */ 482 ata_thread_run(chp, AT_WAIT, 483 ATACH_TH_RESET, ATACH_NODRIVE); 484 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) { 485 /* this will unfreeze the channel */ 486 for (i = 0; i < chp->ch_ndrives; i++) { 487 struct ata_drive_datas *drvp; 488 489 drvp = &chp->ch_drive[i]; 490 491 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 492 ata_thread_run(chp, 493 AT_WAIT, ATACH_TH_DRIVE_RESET, i); 494 } 495 } 496 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET; 497 } else if (chp->ch_flags & ATACH_TH_RECOVERY) { 498 /* 499 * This will unfreeze the channel; drops locks during 500 * run, so must wrap in splbio()/splx() to avoid 501 * spurious interrupts. XXX MPSAFE 502 */ 503 int s = splbio(); 504 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY, 505 chp->recovery_tfd); 506 splx(s); 507 } else if (chq->queue_active > 0 && chq->queue_freeze == 1) { 508 /* 509 * Caller has bumped queue_freeze, decrease it. This 510 * flow shalt never be executed for NCQ commands. 511 */ 512 KASSERT((chp->ch_flags & ATACH_NCQ) == 0); 513 KASSERT(chq->queue_active == 1); 514 515 ata_channel_thaw_locked(chp); 516 xfer = ata_queue_get_active_xfer_locked(chp); 517 518 KASSERT(xfer != NULL); 519 KASSERT((xfer->c_flags & C_POLL) == 0); 520 521 switch ((rv = ata_xfer_start(xfer))) { 522 case ATASTART_STARTED: 523 case ATASTART_POLL: 524 case ATASTART_ABORT: 525 break; 526 case ATASTART_TH: 527 default: 528 panic("%s: ata_xfer_start() unexpected rv %d", 529 __func__, rv); 530 /* NOTREACHED */ 531 } 532 } else if (chq->queue_freeze > 1) 533 panic("%s: queue_freeze", __func__); 534 535 /* Try to run down the queue once channel is unfrozen */ 536 if (chq->queue_freeze == 0) { 537 ata_channel_unlock(chp); 538 atastart(chp); 539 ata_channel_lock(chp); 540 } 541 } 542 chp->ch_thread = NULL; 543 cv_signal(&chp->ch_thr_idle); 544 ata_channel_unlock(chp); 545 kthread_exit(0); 546 } 547 548 bool 549 ata_is_thread_run(struct ata_channel *chp) 550 { 551 KASSERT(mutex_owned(&chp->ch_lock)); 552 553 return (chp->ch_thread == curlwp && !cpu_intr_p()); 554 } 555 556 static void 557 ata_thread_wake_locked(struct ata_channel *chp) 558 { 559 KASSERT(mutex_owned(&chp->ch_lock)); 560 ata_channel_freeze_locked(chp); 561 cv_signal(&chp->ch_thr_idle); 562 } 563 564 /* 565 * atabus_match: 566 * 567 * Autoconfiguration match routine. 568 */ 569 static int 570 atabus_match(device_t parent, cfdata_t cf, void *aux) 571 { 572 struct ata_channel *chp = aux; 573 574 if (chp == NULL) 575 return (0); 576 577 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel && 578 cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT) 579 return (0); 580 581 return (1); 582 } 583 584 /* 585 * atabus_attach: 586 * 587 * Autoconfiguration attach routine. 588 */ 589 static void 590 atabus_attach(device_t parent, device_t self, void *aux) 591 { 592 struct atabus_softc *sc = device_private(self); 593 struct ata_channel *chp = aux; 594 struct atabus_initq *initq; 595 int error; 596 597 sc->sc_chan = chp; 598 599 aprint_normal("\n"); 600 aprint_naive("\n"); 601 602 sc->sc_dev = self; 603 604 if (ata_addref(chp)) 605 return; 606 607 RUN_ONCE(&ata_init_ctrl, atabus_init); 608 609 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 610 initq->atabus_sc = sc; 611 mutex_enter(&atabus_qlock); 612 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 613 mutex_exit(&atabus_qlock); 614 config_pending_incr(sc->sc_dev); 615 616 /* XXX MPSAFE - no KTHREAD_MPSAFE, so protected by KERNEL_LOCK() */ 617 if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc, 618 &chp->ch_thread, "%s", device_xname(self))) != 0) 619 aprint_error_dev(self, 620 "unable to create kernel thread: error %d\n", error); 621 622 if (!pmf_device_register(self, atabus_suspend, atabus_resume)) 623 aprint_error_dev(self, "couldn't establish power handler\n"); 624 } 625 626 /* 627 * atabus_detach: 628 * 629 * Autoconfiguration detach routine. 630 */ 631 static int 632 atabus_detach(device_t self, int flags) 633 { 634 struct atabus_softc *sc = device_private(self); 635 struct ata_channel *chp = sc->sc_chan; 636 device_t dev = NULL; 637 int i, error = 0; 638 639 /* 640 * Detach atapibus and its children. 641 */ 642 if ((dev = chp->atapibus) != NULL) { 643 ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n", 644 device_xname(self), device_xname(dev)), DEBUG_DETACH); 645 646 error = config_detach(dev, flags); 647 if (error) 648 goto out; 649 KASSERT(chp->atapibus == NULL); 650 } 651 652 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 653 654 /* 655 * Detach our other children. 656 */ 657 for (i = 0; i < chp->ch_ndrives; i++) { 658 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 659 continue; 660 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 661 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 662 if ((dev = chp->ch_drive[i].drv_softc) != NULL) { 663 ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__, 664 __LINE__, device_xname(self), device_xname(dev)), 665 DEBUG_DETACH); 666 error = config_detach(dev, flags); 667 if (error) 668 goto out; 669 KASSERT(chp->ch_drive[i].drv_softc == NULL); 670 KASSERT(chp->ch_drive[i].drive_type == 0); 671 } 672 } 673 674 /* Shutdown the channel. */ 675 ata_channel_lock(chp); 676 chp->ch_flags |= ATACH_SHUTDOWN; 677 while (chp->ch_thread != NULL) { 678 cv_signal(&chp->ch_thr_idle); 679 cv_wait(&chp->ch_thr_idle, &chp->ch_lock); 680 } 681 ata_channel_unlock(chp); 682 683 atabus_free_drives(chp); 684 685 out: 686 #ifdef ATADEBUG 687 if (dev != NULL && error != 0) 688 ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__, 689 device_xname(self), error, device_xname(dev)), 690 DEBUG_DETACH); 691 #endif /* ATADEBUG */ 692 693 return (error); 694 } 695 696 void 697 atabus_childdetached(device_t self, device_t child) 698 { 699 bool found = false; 700 struct atabus_softc *sc = device_private(self); 701 struct ata_channel *chp = sc->sc_chan; 702 int i; 703 704 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 705 /* 706 * atapibus detached. 707 */ 708 if (child == chp->atapibus) { 709 chp->atapibus = NULL; 710 found = true; 711 for (i = 0; i < chp->ch_ndrives; i++) { 712 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI) 713 continue; 714 KASSERT(chp->ch_drive[i].drv_softc != NULL); 715 chp->ch_drive[i].drv_softc = NULL; 716 chp->ch_drive[i].drive_flags = 0; 717 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 718 } 719 } 720 721 /* 722 * Detach our other children. 723 */ 724 for (i = 0; i < chp->ch_ndrives; i++) { 725 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) 726 continue; 727 if (child == chp->ch_drive[i].drv_softc) { 728 chp->ch_drive[i].drv_softc = NULL; 729 chp->ch_drive[i].drive_flags = 0; 730 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) 731 chp->ch_satapmp_nports = 0; 732 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; 733 found = true; 734 } 735 } 736 737 if (!found) 738 panic("%s: unknown child %p", device_xname(self), 739 (const void *)child); 740 } 741 742 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc), 743 atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan, 744 atabus_childdetached, DVF_DETACH_SHUTDOWN); 745 746 /***************************************************************************** 747 * Common ATA bus operations. 748 *****************************************************************************/ 749 750 /* allocate/free the channel's ch_drive[] array */ 751 int 752 atabus_alloc_drives(struct ata_channel *chp, int ndrives) 753 { 754 int i; 755 if (chp->ch_ndrives != ndrives) 756 atabus_free_drives(chp); 757 if (chp->ch_drive == NULL) { 758 void *drv; 759 760 ata_channel_unlock(chp); 761 drv = kmem_zalloc(sizeof(*chp->ch_drive) * ndrives, KM_SLEEP); 762 ata_channel_lock(chp); 763 764 if (chp->ch_drive != NULL) { 765 /* lost the race */ 766 kmem_free(drv, sizeof(*chp->ch_drive) * ndrives); 767 return 0; 768 } 769 chp->ch_drive = drv; 770 } 771 for (i = 0; i < ndrives; i++) { 772 chp->ch_drive[i].chnl_softc = chp; 773 chp->ch_drive[i].drive = i; 774 } 775 chp->ch_ndrives = ndrives; 776 return 0; 777 } 778 779 void 780 atabus_free_drives(struct ata_channel *chp) 781 { 782 #ifdef DIAGNOSTIC 783 int i; 784 int dopanic = 0; 785 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 786 for (i = 0; i < chp->ch_ndrives; i++) { 787 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) { 788 printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n", 789 device_xname(chp->atabus), i, 790 chp->ch_drive[i].drive_type); 791 dopanic = 1; 792 } 793 if (chp->ch_drive[i].drv_softc != NULL) { 794 printf("%s: ch_drive[%d] attached to %s\n", 795 device_xname(chp->atabus), i, 796 device_xname(chp->ch_drive[i].drv_softc)); 797 dopanic = 1; 798 } 799 } 800 if (dopanic) 801 panic("atabus_free_drives"); 802 #endif 803 804 if (chp->ch_drive == NULL) 805 return; 806 kmem_free(chp->ch_drive, 807 sizeof(struct ata_drive_datas) * chp->ch_ndrives); 808 chp->ch_ndrives = 0; 809 chp->ch_drive = NULL; 810 } 811 812 /* Get the disk's parameters */ 813 int 814 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags, 815 struct ataparams *prms) 816 { 817 struct ata_xfer *xfer; 818 struct ata_channel *chp = drvp->chnl_softc; 819 struct atac_softc *atac = chp->ch_atac; 820 char *tb; 821 int i, rv; 822 uint16_t *p; 823 824 ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); 825 826 xfer = ata_get_xfer(chp, false); 827 if (xfer == NULL) { 828 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 829 DEBUG_FUNCS|DEBUG_PROBE); 830 return CMD_AGAIN; 831 } 832 833 tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP); 834 memset(prms, 0, sizeof(struct ataparams)); 835 836 if (drvp->drive_type == ATA_DRIVET_ATA) { 837 xfer->c_ata_c.r_command = WDCC_IDENTIFY; 838 xfer->c_ata_c.r_st_bmask = WDCS_DRDY; 839 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 840 xfer->c_ata_c.timeout = 3000; /* 3s */ 841 } else if (drvp->drive_type == ATA_DRIVET_ATAPI) { 842 xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE; 843 xfer->c_ata_c.r_st_bmask = 0; 844 xfer->c_ata_c.r_st_pmask = WDCS_DRQ; 845 xfer->c_ata_c.timeout = 10000; /* 10s */ 846 } else { 847 ATADEBUG_PRINT(("ata_get_parms: no disks\n"), 848 DEBUG_FUNCS|DEBUG_PROBE); 849 rv = CMD_ERR; 850 goto out; 851 } 852 xfer->c_ata_c.flags = AT_READ | flags; 853 xfer->c_ata_c.data = tb; 854 xfer->c_ata_c.bcount = ATA_BSIZE; 855 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer); 856 ata_wait_cmd(chp, xfer); 857 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 858 ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n", 859 xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE); 860 rv = CMD_ERR; 861 goto out; 862 } 863 /* if we didn't read any data something is wrong */ 864 if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) { 865 rv = CMD_ERR; 866 goto out; 867 } 868 869 /* Read in parameter block. */ 870 memcpy(prms, tb, sizeof(struct ataparams)); 871 872 /* 873 * Shuffle string byte order. 874 * ATAPI NEC, Mitsumi and Pioneer drives and 875 * old ATA TDK CompactFlash cards 876 * have different byte order. 877 */ 878 #if BYTE_ORDER == BIG_ENDIAN 879 # define M(n) prms->atap_model[(n) ^ 1] 880 #else 881 # define M(n) prms->atap_model[n] 882 #endif 883 if ( 884 #if BYTE_ORDER == BIG_ENDIAN 885 ! 886 #endif 887 ((drvp->drive_type == ATA_DRIVET_ATAPI) ? 888 ((M(0) == 'N' && M(1) == 'E') || 889 (M(0) == 'F' && M(1) == 'X') || 890 (M(0) == 'P' && M(1) == 'i')) : 891 ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) { 892 rv = CMD_OK; 893 goto out; 894 } 895 #undef M 896 for (i = 0; i < sizeof(prms->atap_model); i += 2) { 897 p = (uint16_t *)(prms->atap_model + i); 898 *p = bswap16(*p); 899 } 900 for (i = 0; i < sizeof(prms->atap_serial); i += 2) { 901 p = (uint16_t *)(prms->atap_serial + i); 902 *p = bswap16(*p); 903 } 904 for (i = 0; i < sizeof(prms->atap_revision); i += 2) { 905 p = (uint16_t *)(prms->atap_revision + i); 906 *p = bswap16(*p); 907 } 908 909 rv = CMD_OK; 910 out: 911 kmem_free(tb, ATA_BSIZE); 912 ata_free_xfer(chp, xfer); 913 return rv; 914 } 915 916 int 917 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags) 918 { 919 struct ata_xfer *xfer; 920 int rv; 921 struct ata_channel *chp = drvp->chnl_softc; 922 struct atac_softc *atac = chp->ch_atac; 923 924 ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS); 925 926 xfer = ata_get_xfer(chp, false); 927 if (xfer == NULL) { 928 ATADEBUG_PRINT(("%s: no xfer\n", __func__), 929 DEBUG_FUNCS|DEBUG_PROBE); 930 return CMD_AGAIN; 931 } 932 933 xfer->c_ata_c.r_command = SET_FEATURES; 934 xfer->c_ata_c.r_st_bmask = 0; 935 xfer->c_ata_c.r_st_pmask = 0; 936 xfer->c_ata_c.r_features = WDSF_SET_MODE; 937 xfer->c_ata_c.r_count = mode; 938 xfer->c_ata_c.flags = flags; 939 xfer->c_ata_c.timeout = 1000; /* 1s */ 940 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer); 941 ata_wait_cmd(chp, xfer); 942 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { 943 rv = CMD_ERR; 944 goto out; 945 } 946 947 rv = CMD_OK; 948 949 out: 950 ata_free_xfer(chp, xfer); 951 return rv; 952 } 953 954 #if NATA_DMA 955 void 956 ata_dmaerr(struct ata_drive_datas *drvp, int flags) 957 { 958 ata_channel_lock_owned(drvp->chnl_softc); 959 960 /* 961 * Downgrade decision: if we get NERRS_MAX in NXFER. 962 * We start with n_dmaerrs set to NERRS_MAX-1 so that the 963 * first error within the first NXFER ops will immediatly trigger 964 * a downgrade. 965 * If we got an error and n_xfers is bigger than NXFER reset counters. 966 */ 967 drvp->n_dmaerrs++; 968 if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) { 969 #ifdef ATA_DOWNGRADE_MODE 970 ata_downgrade_mode(drvp, flags); 971 drvp->n_dmaerrs = NERRS_MAX-1; 972 #else 973 static struct timeval last; 974 static const struct timeval serrintvl = { 300, 0 }; 975 976 if (ratecheck(&last, &serrintvl)) { 977 aprint_error_dev(drvp->drv_softc, 978 "excessive DMA errors - %d in last %d transfers\n", 979 drvp->n_dmaerrs, drvp->n_xfers); 980 } 981 #endif 982 drvp->n_xfers = 0; 983 return; 984 } 985 if (drvp->n_xfers > NXFER) { 986 drvp->n_dmaerrs = 1; /* just got an error */ 987 drvp->n_xfers = 1; /* restart counting from this error */ 988 } 989 } 990 #endif /* NATA_DMA */ 991 992 /* 993 * freeze the queue and wait for the controller to be idle. Caller has to 994 * unfreeze/restart the queue 995 */ 996 static void 997 ata_channel_idle(struct ata_channel *chp) 998 { 999 ata_channel_lock(chp); 1000 ata_channel_freeze_locked(chp); 1001 while (chp->ch_queue->queue_active > 0) { 1002 chp->ch_queue->queue_flags |= QF_IDLE_WAIT; 1003 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1); 1004 } 1005 ata_channel_unlock(chp); 1006 } 1007 1008 /* 1009 * Add a command to the queue and start controller. 1010 * 1011 * MUST BE CALLED AT splbio()! 1012 */ 1013 void 1014 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1015 { 1016 1017 ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer, 1018 chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1019 1020 /* complete xfer setup */ 1021 xfer->c_chp = chp; 1022 1023 ata_channel_lock(chp); 1024 1025 /* 1026 * Standard commands are added to the end of command list, but 1027 * recovery commands must be run immediatelly. 1028 */ 1029 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1030 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, 1031 c_xferchain); 1032 else 1033 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer, 1034 c_xferchain); 1035 1036 /* 1037 * if polling and can sleep, wait for the xfer to be at head of queue 1038 */ 1039 if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) { 1040 while (chp->ch_queue->queue_active > 0 || 1041 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) { 1042 xfer->c_flags |= C_WAITACT; 1043 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock); 1044 xfer->c_flags &= ~C_WAITACT; 1045 } 1046 1047 /* 1048 * Free xfer now if it there was attempt to free it 1049 * while we were waiting. 1050 */ 1051 if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) { 1052 ata_channel_unlock(chp); 1053 1054 ata_free_xfer(chp, xfer); 1055 return; 1056 } 1057 } 1058 1059 ata_channel_unlock(chp); 1060 1061 ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n", 1062 chp->ch_flags), DEBUG_XFERS); 1063 atastart(chp); 1064 } 1065 1066 /* 1067 * Start I/O on a controller, for the given channel. 1068 * The first xfer may be not for our channel if the channel queues 1069 * are shared. 1070 * 1071 * MUST BE CALLED AT splbio()! 1072 * 1073 * XXX FIS-based switching with PMP 1074 * Currently atastart() never schedules concurrent NCQ transfers to more than 1075 * one drive, even when channel has several SATA drives attached via PMP. 1076 * To support concurrent transfers to different drives with PMP, it would be 1077 * necessary to implement FIS-based switching support in controller driver, 1078 * and then adjust error handling and recovery to stop assuming at most 1079 * one active drive. 1080 */ 1081 void 1082 atastart(struct ata_channel *chp) 1083 { 1084 struct atac_softc *atac = chp->ch_atac; 1085 struct ata_queue *chq = chp->ch_queue; 1086 struct ata_xfer *xfer, *axfer; 1087 bool skipq; 1088 1089 #ifdef ATA_DEBUG 1090 int spl1, spl2; 1091 1092 spl1 = splbio(); 1093 spl2 = splbio(); 1094 if (spl2 != spl1) { 1095 printf("atastart: not at splbio()\n"); 1096 panic("atastart"); 1097 } 1098 splx(spl2); 1099 splx(spl1); 1100 #endif /* ATA_DEBUG */ 1101 1102 ata_channel_lock(chp); 1103 1104 again: 1105 /* is there a xfer ? */ 1106 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) { 1107 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n", 1108 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1109 goto out; 1110 } 1111 1112 /* 1113 * if someone is waiting for the command to be active, wake it up 1114 * and let it process the command 1115 */ 1116 if (__predict_false(xfer->c_flags & C_WAITACT)) { 1117 ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d " 1118 "wait active\n", xfer, chp->ch_channel, xfer->c_drive), 1119 DEBUG_XFERS); 1120 cv_broadcast(&chp->ch_queue->c_active); 1121 goto out; 1122 } 1123 1124 skipq = ISSET(xfer->c_flags, C_SKIP_QUEUE); 1125 1126 /* is the queue frozen? */ 1127 if (__predict_false(!skipq && chq->queue_freeze > 0)) { 1128 if (chq->queue_flags & QF_IDLE_WAIT) { 1129 chq->queue_flags &= ~QF_IDLE_WAIT; 1130 cv_signal(&chp->ch_queue->queue_idle); 1131 } 1132 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d " 1133 "queue frozen: %d\n", 1134 __func__, chp, chp->ch_channel, xfer->c_drive, 1135 chq->queue_freeze), 1136 DEBUG_XFERS); 1137 goto out; 1138 } 1139 1140 /* all xfers on same queue must belong to the same channel */ 1141 KASSERT(xfer->c_chp == chp); 1142 1143 /* 1144 * Can only take the command if there are no current active 1145 * commands, or if the command is NCQ and the active commands are also 1146 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based 1147 * switching, can only send commands to single drive. 1148 * Need only check first xfer. 1149 * XXX FIS-based switching - revisit 1150 */ 1151 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) { 1152 if (!ISSET(xfer->c_flags, C_NCQ) || 1153 !ISSET(axfer->c_flags, C_NCQ) || 1154 xfer->c_drive != axfer->c_drive) 1155 goto out; 1156 } 1157 1158 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive]; 1159 1160 /* 1161 * Are we on limit of active xfers ? If the queue has more 1162 * than 1 openings, we keep one slot reserved for recovery or dump. 1163 */ 1164 KASSERT(chq->queue_active <= chq->queue_openings); 1165 const uint8_t chq_openings = (!skipq && chq->queue_openings > 1) 1166 ? (chq->queue_openings - 1) : chq->queue_openings; 1167 const uint8_t drv_openings = ISSET(xfer->c_flags, C_NCQ) 1168 ? drvp->drv_openings : ATA_MAX_OPENINGS; 1169 if (chq->queue_active >= MIN(chq_openings, drv_openings)) { 1170 if (skipq) { 1171 panic("%s: channel %d busy, xfer not possible", 1172 __func__, chp->ch_channel); 1173 } 1174 1175 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n", 1176 __func__, chp, chp->ch_channel), DEBUG_XFERS); 1177 goto out; 1178 } 1179 1180 /* Slot allocation can fail if drv_openings < ch_openings */ 1181 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings)) 1182 goto out; 1183 1184 if (__predict_false(atac->atac_claim_hw)) { 1185 if (!atac->atac_claim_hw(chp, 0)) { 1186 ata_queue_free_slot(chp, xfer->c_slot); 1187 goto out; 1188 } 1189 } 1190 1191 /* Now committed to start the xfer */ 1192 1193 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n", 1194 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS); 1195 if (drvp->drive_flags & ATA_DRIVE_RESET) { 1196 drvp->drive_flags &= ~ATA_DRIVE_RESET; 1197 drvp->state = 0; 1198 } 1199 1200 if (ISSET(xfer->c_flags, C_NCQ)) 1201 SET(chp->ch_flags, ATACH_NCQ); 1202 else 1203 CLR(chp->ch_flags, ATACH_NCQ); 1204 1205 SIMPLEQ_REMOVE_HEAD(&chq->queue_xfer, c_xferchain); 1206 1207 ata_activate_xfer_locked(chp, xfer); 1208 1209 if (atac->atac_cap & ATAC_CAP_NOIRQ) 1210 KASSERT(xfer->c_flags & C_POLL); 1211 1212 switch (ata_xfer_start(xfer)) { 1213 case ATASTART_TH: 1214 case ATASTART_ABORT: 1215 /* don't start any further commands in this case */ 1216 goto out; 1217 default: 1218 /* nothing to do */ 1219 break; 1220 } 1221 1222 /* Queue more commands if possible, but not during recovery or dump */ 1223 if (!skipq && chq->queue_active < chq->queue_openings) 1224 goto again; 1225 1226 out: 1227 ata_channel_unlock(chp); 1228 } 1229 1230 int 1231 ata_xfer_start(struct ata_xfer *xfer) 1232 { 1233 struct ata_channel *chp = xfer->c_chp; 1234 int rv, status; 1235 1236 KASSERT(mutex_owned(&chp->ch_lock)); 1237 1238 again: 1239 rv = xfer->ops->c_start(chp, xfer); 1240 switch (rv) { 1241 case ATASTART_STARTED: 1242 /* nothing to do */ 1243 break; 1244 case ATASTART_TH: 1245 /* postpone xfer to thread */ 1246 ata_thread_wake_locked(chp); 1247 break; 1248 case ATASTART_POLL: 1249 /* can happen even in thread context for some ATAPI devices */ 1250 ata_channel_unlock(chp); 1251 KASSERT(xfer->ops != NULL && xfer->ops->c_poll != NULL); 1252 status = xfer->ops->c_poll(chp, xfer); 1253 ata_channel_lock(chp); 1254 if (status == ATAPOLL_AGAIN) 1255 goto again; 1256 break; 1257 case ATASTART_ABORT: 1258 ata_channel_unlock(chp); 1259 KASSERT(xfer->ops != NULL && xfer->ops->c_abort != NULL); 1260 xfer->ops->c_abort(chp, xfer); 1261 ata_channel_lock(chp); 1262 break; 1263 } 1264 1265 return rv; 1266 } 1267 1268 static void 1269 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer) 1270 { 1271 struct ata_queue * const chq = chp->ch_queue; 1272 1273 KASSERT(mutex_owned(&chp->ch_lock)); 1274 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); 1275 1276 if ((xfer->c_flags & C_SKIP_QUEUE) == 0) 1277 TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain); 1278 else { 1279 /* 1280 * Must go to head, so that ata_queue_get_active_xfer() 1281 * returns the recovery command, and not some other 1282 * random active transfer. 1283 */ 1284 TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain); 1285 } 1286 chq->active_xfers_used |= __BIT(xfer->c_slot); 1287 chq->queue_active++; 1288 } 1289 1290 /* 1291 * Does it's own locking, does not require splbio(). 1292 * flags - whether to block waiting for free xfer 1293 */ 1294 struct ata_xfer * 1295 ata_get_xfer(struct ata_channel *chp, bool waitok) 1296 { 1297 return pool_get(&ata_xfer_pool, 1298 PR_ZERO | (waitok ? PR_WAITOK : PR_NOWAIT)); 1299 } 1300 1301 /* 1302 * ata_deactivate_xfer() must be always called prior to ata_free_xfer() 1303 */ 1304 void 1305 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1306 { 1307 struct ata_queue *chq = chp->ch_queue; 1308 1309 ata_channel_lock(chp); 1310 1311 if (__predict_false(xfer->c_flags & (C_WAITACT|C_WAITTIMO))) { 1312 /* Someone is waiting for this xfer, so we can't free now */ 1313 xfer->c_flags |= C_FREE; 1314 cv_broadcast(&chq->c_active); 1315 ata_channel_unlock(chp); 1316 return; 1317 } 1318 1319 /* XXX move PIOBM and free_gw to deactivate? */ 1320 #if NATA_PIOBM /* XXX wdc dependent code */ 1321 if (__predict_false(xfer->c_flags & C_PIOBM)) { 1322 struct wdc_softc *wdc = CHAN_TO_WDC(chp); 1323 1324 /* finish the busmastering PIO */ 1325 (*wdc->piobm_done)(wdc->dma_arg, 1326 chp->ch_channel, xfer->c_drive); 1327 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT); 1328 } 1329 #endif 1330 1331 if (__predict_false(chp->ch_atac->atac_free_hw)) 1332 chp->ch_atac->atac_free_hw(chp); 1333 1334 ata_channel_unlock(chp); 1335 1336 if (__predict_true(!ISSET(xfer->c_flags, C_PRIVATE_ALLOC))) 1337 pool_put(&ata_xfer_pool, xfer); 1338 } 1339 1340 void 1341 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer) 1342 { 1343 struct ata_queue * const chq = chp->ch_queue; 1344 1345 ata_channel_lock(chp); 1346 1347 KASSERT(chq->queue_active > 0); 1348 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0); 1349 1350 /* Stop only when this is last active xfer */ 1351 if (chq->queue_active == 1) 1352 callout_stop(&chp->c_timo_callout); 1353 1354 if (callout_invoking(&chp->c_timo_callout)) 1355 xfer->c_flags |= C_WAITTIMO; 1356 1357 TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain); 1358 chq->active_xfers_used &= ~__BIT(xfer->c_slot); 1359 chq->queue_active--; 1360 1361 ata_queue_free_slot(chp, xfer->c_slot); 1362 1363 if (xfer->c_flags & C_WAIT) 1364 cv_broadcast(&chq->c_cmd_finish); 1365 1366 ata_channel_unlock(chp); 1367 } 1368 1369 /* 1370 * Called in c_intr hook. Must be called before before any deactivations 1371 * are done - if there is drain pending, it calls c_kill_xfer hook which 1372 * deactivates the xfer. 1373 * Calls c_kill_xfer with channel lock free. 1374 * Returns true if caller should just exit without further processing. 1375 * Caller must not further access any part of xfer or any related controller 1376 * structures in that case, it should just return. 1377 */ 1378 bool 1379 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer) 1380 { 1381 int drive = xfer->c_drive; 1382 bool draining = false; 1383 1384 ata_channel_lock(chp); 1385 1386 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { 1387 ata_channel_unlock(chp); 1388 1389 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE); 1390 1391 ata_channel_lock(chp); 1392 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; 1393 cv_signal(&chp->ch_queue->queue_drain); 1394 draining = true; 1395 } 1396 1397 ata_channel_unlock(chp); 1398 1399 return draining; 1400 } 1401 1402 /* 1403 * Check for race of normal transfer handling vs. timeout. 1404 */ 1405 bool 1406 ata_timo_xfer_check(struct ata_xfer *xfer) 1407 { 1408 struct ata_channel *chp = xfer->c_chp; 1409 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; 1410 1411 ata_channel_lock(chp); 1412 1413 if (xfer->c_flags & C_WAITTIMO) { 1414 xfer->c_flags &= ~C_WAITTIMO; 1415 1416 /* Handle race vs. ata_free_xfer() */ 1417 if (xfer->c_flags & C_FREE) { 1418 xfer->c_flags &= ~C_FREE; 1419 ata_channel_unlock(chp); 1420 1421 device_printf(drvp->drv_softc, 1422 "xfer %"PRIxPTR" freed while invoking timeout\n", 1423 (intptr_t)xfer & PAGE_MASK); 1424 1425 ata_free_xfer(chp, xfer); 1426 return true; 1427 } 1428 1429 /* Race vs. callout_stop() in ata_deactivate_xfer() */ 1430 ata_channel_unlock(chp); 1431 1432 device_printf(drvp->drv_softc, 1433 "xfer %"PRIxPTR" deactivated while invoking timeout\n", 1434 (intptr_t)xfer & PAGE_MASK); 1435 return true; 1436 } 1437 1438 ata_channel_unlock(chp); 1439 1440 /* No race, proceed with timeout handling */ 1441 return false; 1442 } 1443 1444 /* 1445 * Kill off all active xfers for a ata_channel. 1446 * 1447 * Must be called with channel lock held. 1448 */ 1449 void 1450 ata_kill_active(struct ata_channel *chp, int reason, int flags) 1451 { 1452 struct ata_queue * const chq = chp->ch_queue; 1453 struct ata_xfer *xfer, *xfernext; 1454 1455 KASSERT(mutex_owned(&chp->ch_lock)); 1456 1457 TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) { 1458 ata_channel_unlock(chp); 1459 xfer->ops->c_kill_xfer(xfer->c_chp, xfer, reason); 1460 ata_channel_lock(chp); 1461 } 1462 } 1463 1464 /* 1465 * Kill off all pending xfers for a drive. 1466 */ 1467 void 1468 ata_kill_pending(struct ata_drive_datas *drvp) 1469 { 1470 struct ata_channel * const chp = drvp->chnl_softc; 1471 struct ata_queue * const chq = chp->ch_queue; 1472 struct ata_xfer *xfer; 1473 1474 ata_channel_lock(chp); 1475 1476 /* Kill all pending transfers */ 1477 while ((xfer = SIMPLEQ_FIRST(&chq->queue_xfer))) { 1478 KASSERT(xfer->c_chp == chp); 1479 1480 if (xfer->c_drive != drvp->drive) 1481 continue; 1482 1483 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain); 1484 1485 /* 1486 * Keep the lock, so that we get deadlock (and 'locking against 1487 * myself' with LOCKDEBUG), instead of silent 1488 * data corruption, if the hook tries to call back into 1489 * middle layer for inactive xfer. 1490 */ 1491 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE); 1492 } 1493 1494 /* Wait until all active transfers on the drive finish */ 1495 while (chq->queue_active > 0) { 1496 bool drv_active = false; 1497 1498 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { 1499 KASSERT(xfer->c_chp == chp); 1500 1501 if (xfer->c_drive == drvp->drive) { 1502 drv_active = true; 1503 break; 1504 } 1505 } 1506 1507 if (!drv_active) { 1508 /* all finished */ 1509 break; 1510 } 1511 1512 drvp->drive_flags |= ATA_DRIVE_WAITDRAIN; 1513 cv_wait(&chq->queue_drain, &chp->ch_lock); 1514 } 1515 1516 ata_channel_unlock(chp); 1517 } 1518 1519 static void 1520 ata_channel_freeze_locked(struct ata_channel *chp) 1521 { 1522 chp->ch_queue->queue_freeze++; 1523 1524 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1525 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1526 } 1527 1528 void 1529 ata_channel_freeze(struct ata_channel *chp) 1530 { 1531 ata_channel_lock(chp); 1532 ata_channel_freeze_locked(chp); 1533 ata_channel_unlock(chp); 1534 } 1535 1536 void 1537 ata_channel_thaw_locked(struct ata_channel *chp) 1538 { 1539 KASSERT(mutex_owned(&chp->ch_lock)); 1540 KASSERT(chp->ch_queue->queue_freeze > 0); 1541 1542 chp->ch_queue->queue_freeze--; 1543 1544 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp, 1545 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS); 1546 } 1547 1548 /* 1549 * ata_thread_run: 1550 * 1551 * Reset and ATA channel. Channel lock must be held. arg is type-specific. 1552 */ 1553 void 1554 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg) 1555 { 1556 struct atac_softc *atac = chp->ch_atac; 1557 bool threset = false; 1558 struct ata_drive_datas *drvp; 1559 1560 ata_channel_lock_owned(chp); 1561 1562 /* 1563 * If we can poll or wait it's OK, otherwise wake up the 1564 * kernel thread to do it for us. 1565 */ 1566 ATADEBUG_PRINT(("%s flags 0x%x ch_flags 0x%x\n", 1567 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS); 1568 if ((flags & (AT_POLL | AT_WAIT)) == 0) { 1569 switch (type) { 1570 case ATACH_TH_RESET: 1571 if (chp->ch_flags & ATACH_TH_RESET) { 1572 /* No need to schedule another reset */ 1573 return; 1574 } 1575 break; 1576 case ATACH_TH_DRIVE_RESET: 1577 { 1578 int drive = arg; 1579 1580 KASSERT(drive <= chp->ch_ndrives); 1581 drvp = &chp->ch_drive[drive]; 1582 1583 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) { 1584 /* No need to schedule another reset */ 1585 return; 1586 } 1587 drvp->drive_flags |= ATA_DRIVE_TH_RESET; 1588 break; 1589 } 1590 case ATACH_TH_RECOVERY: 1591 { 1592 uint32_t tfd = (uint32_t)arg; 1593 1594 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1595 chp->recovery_tfd = tfd; 1596 break; 1597 } 1598 default: 1599 panic("%s: unknown type: %x", __func__, type); 1600 /* NOTREACHED */ 1601 } 1602 1603 /* 1604 * Block execution of other commands while reset is scheduled 1605 * to a thread. 1606 */ 1607 ata_channel_freeze_locked(chp); 1608 chp->ch_flags |= type; 1609 1610 cv_signal(&chp->ch_thr_idle); 1611 return; 1612 } 1613 1614 /* Block execution of other commands during reset */ 1615 ata_channel_freeze_locked(chp); 1616 1617 /* 1618 * If reset has been scheduled to a thread, then clear 1619 * the flag now so that the thread won't try to execute it if 1620 * we happen to sleep, and thaw one more time after the reset. 1621 */ 1622 if (chp->ch_flags & type) { 1623 chp->ch_flags &= ~type; 1624 threset = true; 1625 } 1626 1627 switch (type) { 1628 case ATACH_TH_RESET: 1629 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags); 1630 1631 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1632 for (int drive = 0; drive < chp->ch_ndrives; drive++) 1633 chp->ch_drive[drive].state = 0; 1634 break; 1635 1636 case ATACH_TH_DRIVE_RESET: 1637 { 1638 int drive = arg; 1639 1640 KASSERT(drive <= chp->ch_ndrives); 1641 drvp = &chp->ch_drive[drive]; 1642 (*atac->atac_bustype_ata->ata_reset_drive)(drvp, flags, NULL); 1643 drvp->state = 0; 1644 break; 1645 } 1646 1647 case ATACH_TH_RECOVERY: 1648 { 1649 uint32_t tfd = (uint32_t)arg; 1650 1651 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0); 1652 KASSERT(atac->atac_bustype_ata->ata_recovery != NULL); 1653 1654 SET(chp->ch_flags, ATACH_RECOVERING); 1655 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd); 1656 CLR(chp->ch_flags, ATACH_RECOVERING); 1657 break; 1658 } 1659 1660 default: 1661 panic("%s: unknown type: %x", __func__, type); 1662 /* NOTREACHED */ 1663 } 1664 1665 /* 1666 * Thaw one extra time to clear the freeze done when the reset has 1667 * been scheduled to the thread. 1668 */ 1669 if (threset) 1670 ata_channel_thaw_locked(chp); 1671 1672 /* Allow commands to run again */ 1673 ata_channel_thaw_locked(chp); 1674 1675 /* Signal the thread in case there is an xfer to run */ 1676 cv_signal(&chp->ch_thr_idle); 1677 } 1678 1679 int 1680 ata_addref(struct ata_channel *chp) 1681 { 1682 struct atac_softc *atac = chp->ch_atac; 1683 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1684 int s, error = 0; 1685 1686 s = splbio(); 1687 if (adapt->adapt_refcnt++ == 0 && 1688 adapt->adapt_enable != NULL) { 1689 error = (*adapt->adapt_enable)(atac->atac_dev, 1); 1690 if (error) 1691 adapt->adapt_refcnt--; 1692 } 1693 splx(s); 1694 return (error); 1695 } 1696 1697 void 1698 ata_delref(struct ata_channel *chp) 1699 { 1700 struct atac_softc *atac = chp->ch_atac; 1701 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; 1702 int s; 1703 1704 s = splbio(); 1705 if (adapt->adapt_refcnt-- == 1 && 1706 adapt->adapt_enable != NULL) 1707 (void) (*adapt->adapt_enable)(atac->atac_dev, 0); 1708 splx(s); 1709 } 1710 1711 void 1712 ata_print_modes(struct ata_channel *chp) 1713 { 1714 struct atac_softc *atac = chp->ch_atac; 1715 int drive; 1716 struct ata_drive_datas *drvp; 1717 1718 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 1719 for (drive = 0; drive < chp->ch_ndrives; drive++) { 1720 drvp = &chp->ch_drive[drive]; 1721 if (drvp->drive_type == ATA_DRIVET_NONE || 1722 drvp->drv_softc == NULL) 1723 continue; 1724 aprint_verbose("%s(%s:%d:%d): using PIO mode %d", 1725 device_xname(drvp->drv_softc), 1726 device_xname(atac->atac_dev), 1727 chp->ch_channel, drvp->drive, drvp->PIO_mode); 1728 #if NATA_DMA 1729 if (drvp->drive_flags & ATA_DRIVE_DMA) 1730 aprint_verbose(", DMA mode %d", drvp->DMA_mode); 1731 #if NATA_UDMA 1732 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 1733 aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode); 1734 if (drvp->UDMA_mode == 2) 1735 aprint_verbose(" (Ultra/33)"); 1736 else if (drvp->UDMA_mode == 4) 1737 aprint_verbose(" (Ultra/66)"); 1738 else if (drvp->UDMA_mode == 5) 1739 aprint_verbose(" (Ultra/100)"); 1740 else if (drvp->UDMA_mode == 6) 1741 aprint_verbose(" (Ultra/133)"); 1742 } 1743 #endif /* NATA_UDMA */ 1744 #endif /* NATA_DMA */ 1745 #if NATA_DMA || NATA_PIOBM 1746 if (0 1747 #if NATA_DMA 1748 || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) 1749 #endif 1750 #if NATA_PIOBM 1751 /* PIOBM capable controllers use DMA for PIO commands */ 1752 || (atac->atac_cap & ATAC_CAP_PIOBM) 1753 #endif 1754 ) 1755 aprint_verbose(" (using DMA)"); 1756 1757 if (drvp->drive_flags & ATA_DRIVE_NCQ) { 1758 aprint_verbose(", NCQ (%d tags)%s", 1759 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings), 1760 (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO) 1761 ? " w/PRIO" : ""); 1762 } else if (drvp->drive_flags & ATA_DRIVE_WFUA) 1763 aprint_verbose(", WRITE DMA FUA EXT"); 1764 1765 #endif /* NATA_DMA || NATA_PIOBM */ 1766 aprint_verbose("\n"); 1767 } 1768 } 1769 1770 #if defined(ATA_DOWNGRADE_MODE) && NATA_DMA 1771 /* 1772 * downgrade the transfer mode of a drive after an error. return 1 if 1773 * downgrade was possible, 0 otherwise. 1774 * 1775 * MUST BE CALLED AT splbio()! 1776 */ 1777 static int 1778 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags) 1779 { 1780 struct ata_channel *chp = drvp->chnl_softc; 1781 struct atac_softc *atac = chp->ch_atac; 1782 device_t drv_dev = drvp->drv_softc; 1783 int cf_flags = device_cfdata(drv_dev)->cf_flags; 1784 1785 ata_channel_lock_owned(drvp->chnl_softc); 1786 1787 /* if drive or controller don't know its mode, we can't do much */ 1788 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 || 1789 (atac->atac_set_modes == NULL)) 1790 return 0; 1791 /* current drive mode was set by a config flag, let it this way */ 1792 if ((cf_flags & ATA_CONFIG_PIO_SET) || 1793 (cf_flags & ATA_CONFIG_DMA_SET) || 1794 (cf_flags & ATA_CONFIG_UDMA_SET)) 1795 return 0; 1796 1797 #if NATA_UDMA 1798 /* 1799 * If we were using Ultra-DMA mode, downgrade to the next lower mode. 1800 */ 1801 if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) { 1802 drvp->UDMA_mode--; 1803 aprint_error_dev(drv_dev, 1804 "transfer error, downgrading to Ultra-DMA mode %d\n", 1805 drvp->UDMA_mode); 1806 } 1807 #endif 1808 1809 /* 1810 * If we were using ultra-DMA, don't downgrade to multiword DMA. 1811 */ 1812 else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { 1813 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 1814 drvp->PIO_mode = drvp->PIO_cap; 1815 aprint_error_dev(drv_dev, 1816 "transfer error, downgrading to PIO mode %d\n", 1817 drvp->PIO_mode); 1818 } else /* already using PIO, can't downgrade */ 1819 return 0; 1820 1821 (*atac->atac_set_modes)(chp); 1822 ata_print_modes(chp); 1823 /* reset the channel, which will schedule all drives for setup */ 1824 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE); 1825 return 1; 1826 } 1827 #endif /* ATA_DOWNGRADE_MODE && NATA_DMA */ 1828 1829 /* 1830 * Probe drive's capabilities, for use by the controller later 1831 * Assumes drvp points to an existing drive. 1832 */ 1833 void 1834 ata_probe_caps(struct ata_drive_datas *drvp) 1835 { 1836 struct ataparams params, params2; 1837 struct ata_channel *chp = drvp->chnl_softc; 1838 struct atac_softc *atac = chp->ch_atac; 1839 device_t drv_dev = drvp->drv_softc; 1840 int i, printed = 0; 1841 const char *sep = ""; 1842 int cf_flags; 1843 1844 if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) { 1845 /* IDENTIFY failed. Can't tell more about the device */ 1846 return; 1847 } 1848 if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) == 1849 (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) { 1850 /* 1851 * Controller claims 16 and 32 bit transfers. 1852 * Re-do an IDENTIFY with 32-bit transfers, 1853 * and compare results. 1854 */ 1855 ata_channel_lock(chp); 1856 drvp->drive_flags |= ATA_DRIVE_CAP32; 1857 ata_channel_unlock(chp); 1858 ata_get_params(drvp, AT_WAIT, ¶ms2); 1859 if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) { 1860 /* Not good. fall back to 16bits */ 1861 ata_channel_lock(chp); 1862 drvp->drive_flags &= ~ATA_DRIVE_CAP32; 1863 ata_channel_unlock(chp); 1864 } else { 1865 aprint_verbose_dev(drv_dev, "32-bit data port\n"); 1866 } 1867 } 1868 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */ 1869 if (params.atap_ata_major > 0x01 && 1870 params.atap_ata_major != 0xffff) { 1871 for (i = 14; i > 0; i--) { 1872 if (params.atap_ata_major & (1 << i)) { 1873 aprint_verbose_dev(drv_dev, 1874 "ATA version %d\n", i); 1875 drvp->ata_vers = i; 1876 break; 1877 } 1878 } 1879 } 1880 #endif 1881 1882 /* An ATAPI device is at last PIO mode 3 */ 1883 if (drvp->drive_type == ATA_DRIVET_ATAPI) 1884 drvp->PIO_mode = 3; 1885 1886 /* 1887 * It's not in the specs, but it seems that some drive 1888 * returns 0xffff in atap_extensions when this field is invalid 1889 */ 1890 if (params.atap_extensions != 0xffff && 1891 (params.atap_extensions & WDC_EXT_MODES)) { 1892 /* 1893 * XXX some drives report something wrong here (they claim to 1894 * support PIO mode 8 !). As mode is coded on 3 bits in 1895 * SET FEATURE, limit it to 7 (so limit i to 4). 1896 * If higher mode than 7 is found, abort. 1897 */ 1898 for (i = 7; i >= 0; i--) { 1899 if ((params.atap_piomode_supp & (1 << i)) == 0) 1900 continue; 1901 if (i > 4) 1902 return; 1903 /* 1904 * See if mode is accepted. 1905 * If the controller can't set its PIO mode, 1906 * assume the defaults are good, so don't try 1907 * to set it 1908 */ 1909 if (atac->atac_set_modes) 1910 /* 1911 * It's OK to poll here, it's fast enough 1912 * to not bother waiting for interrupt 1913 */ 1914 if (ata_set_mode(drvp, 0x08 | (i + 3), 1915 AT_WAIT) != CMD_OK) 1916 continue; 1917 if (!printed) { 1918 aprint_verbose_dev(drv_dev, 1919 "drive supports PIO mode %d", i + 3); 1920 sep = ","; 1921 printed = 1; 1922 } 1923 /* 1924 * If controller's driver can't set its PIO mode, 1925 * get the highter one for the drive. 1926 */ 1927 if (atac->atac_set_modes == NULL || 1928 atac->atac_pio_cap >= i + 3) { 1929 drvp->PIO_mode = i + 3; 1930 drvp->PIO_cap = i + 3; 1931 break; 1932 } 1933 } 1934 if (!printed) { 1935 /* 1936 * We didn't find a valid PIO mode. 1937 * Assume the values returned for DMA are buggy too 1938 */ 1939 return; 1940 } 1941 ata_channel_lock(chp); 1942 drvp->drive_flags |= ATA_DRIVE_MODE; 1943 ata_channel_unlock(chp); 1944 printed = 0; 1945 for (i = 7; i >= 0; i--) { 1946 if ((params.atap_dmamode_supp & (1 << i)) == 0) 1947 continue; 1948 #if NATA_DMA 1949 if ((atac->atac_cap & ATAC_CAP_DMA) && 1950 atac->atac_set_modes != NULL) 1951 if (ata_set_mode(drvp, 0x20 | i, AT_WAIT) 1952 != CMD_OK) 1953 continue; 1954 #endif 1955 if (!printed) { 1956 aprint_verbose("%s DMA mode %d", sep, i); 1957 sep = ","; 1958 printed = 1; 1959 } 1960 #if NATA_DMA 1961 if (atac->atac_cap & ATAC_CAP_DMA) { 1962 if (atac->atac_set_modes != NULL && 1963 atac->atac_dma_cap < i) 1964 continue; 1965 drvp->DMA_mode = i; 1966 drvp->DMA_cap = i; 1967 ata_channel_lock(chp); 1968 drvp->drive_flags |= ATA_DRIVE_DMA; 1969 ata_channel_unlock(chp); 1970 } 1971 #endif 1972 break; 1973 } 1974 if (params.atap_extensions & WDC_EXT_UDMA_MODES) { 1975 printed = 0; 1976 for (i = 7; i >= 0; i--) { 1977 if ((params.atap_udmamode_supp & (1 << i)) 1978 == 0) 1979 continue; 1980 #if NATA_UDMA 1981 if (atac->atac_set_modes != NULL && 1982 (atac->atac_cap & ATAC_CAP_UDMA)) 1983 if (ata_set_mode(drvp, 0x40 | i, 1984 AT_WAIT) != CMD_OK) 1985 continue; 1986 #endif 1987 if (!printed) { 1988 aprint_verbose("%s Ultra-DMA mode %d", 1989 sep, i); 1990 if (i == 2) 1991 aprint_verbose(" (Ultra/33)"); 1992 else if (i == 4) 1993 aprint_verbose(" (Ultra/66)"); 1994 else if (i == 5) 1995 aprint_verbose(" (Ultra/100)"); 1996 else if (i == 6) 1997 aprint_verbose(" (Ultra/133)"); 1998 sep = ","; 1999 printed = 1; 2000 } 2001 #if NATA_UDMA 2002 if (atac->atac_cap & ATAC_CAP_UDMA) { 2003 if (atac->atac_set_modes != NULL && 2004 atac->atac_udma_cap < i) 2005 continue; 2006 drvp->UDMA_mode = i; 2007 drvp->UDMA_cap = i; 2008 ata_channel_lock(chp); 2009 drvp->drive_flags |= ATA_DRIVE_UDMA; 2010 ata_channel_unlock(chp); 2011 } 2012 #endif 2013 break; 2014 } 2015 } 2016 } 2017 2018 ata_channel_lock(chp); 2019 drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM; 2020 if (drvp->drive_type == ATA_DRIVET_ATAPI) { 2021 if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM) 2022 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2023 } else { 2024 if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM) 2025 drvp->drive_flags |= ATA_DRIVE_NOSTREAM; 2026 } 2027 ata_channel_unlock(chp); 2028 2029 /* Try to guess ATA version here, if it didn't get reported */ 2030 if (drvp->ata_vers == 0) { 2031 #if NATA_UDMA 2032 if (drvp->drive_flags & ATA_DRIVE_UDMA) 2033 drvp->ata_vers = 4; /* should be at last ATA-4 */ 2034 else 2035 #endif 2036 if (drvp->PIO_cap > 2) 2037 drvp->ata_vers = 2; /* should be at last ATA-2 */ 2038 } 2039 cf_flags = device_cfdata(drv_dev)->cf_flags; 2040 if (cf_flags & ATA_CONFIG_PIO_SET) { 2041 ata_channel_lock(chp); 2042 drvp->PIO_mode = 2043 (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF; 2044 drvp->drive_flags |= ATA_DRIVE_MODE; 2045 ata_channel_unlock(chp); 2046 } 2047 #if NATA_DMA 2048 if ((atac->atac_cap & ATAC_CAP_DMA) == 0) { 2049 /* don't care about DMA modes */ 2050 goto out; 2051 } 2052 if (cf_flags & ATA_CONFIG_DMA_SET) { 2053 ata_channel_lock(chp); 2054 if ((cf_flags & ATA_CONFIG_DMA_MODES) == 2055 ATA_CONFIG_DMA_DISABLE) { 2056 drvp->drive_flags &= ~ATA_DRIVE_DMA; 2057 } else { 2058 drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >> 2059 ATA_CONFIG_DMA_OFF; 2060 drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE; 2061 } 2062 ata_channel_unlock(chp); 2063 } 2064 2065 /* 2066 * Probe WRITE DMA FUA EXT. Support is mandatory for devices 2067 * supporting LBA48, but nevertheless confirm with the feature flag. 2068 */ 2069 if (drvp->drive_flags & ATA_DRIVE_DMA) { 2070 if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 2071 && (params.atap_cmd_def & ATA_CMDE_WFE)) { 2072 drvp->drive_flags |= ATA_DRIVE_WFUA; 2073 aprint_verbose("%s WRITE DMA FUA", sep); 2074 sep = ","; 2075 } 2076 } 2077 2078 /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */ 2079 ata_channel_lock(chp); 2080 drvp->drv_openings = 1; 2081 if (params.atap_sata_caps & SATA_NATIVE_CMDQ) { 2082 if (atac->atac_cap & ATAC_CAP_NCQ) 2083 drvp->drive_flags |= ATA_DRIVE_NCQ; 2084 drvp->drv_openings = 2085 (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1; 2086 aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings); 2087 sep = ","; 2088 2089 if (params.atap_sata_caps & SATA_NCQ_PRIO) { 2090 drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO; 2091 aprint_verbose(" w/PRIO"); 2092 } 2093 } 2094 ata_channel_unlock(chp); 2095 2096 #if NATA_UDMA 2097 if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) { 2098 /* don't care about UDMA modes */ 2099 goto out; 2100 } 2101 if (cf_flags & ATA_CONFIG_UDMA_SET) { 2102 ata_channel_lock(chp); 2103 if ((cf_flags & ATA_CONFIG_UDMA_MODES) == 2104 ATA_CONFIG_UDMA_DISABLE) { 2105 drvp->drive_flags &= ~ATA_DRIVE_UDMA; 2106 } else { 2107 drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >> 2108 ATA_CONFIG_UDMA_OFF; 2109 drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE; 2110 } 2111 ata_channel_unlock(chp); 2112 } 2113 #endif /* NATA_UDMA */ 2114 out: 2115 #endif /* NATA_DMA */ 2116 if (*sep != '\0') 2117 aprint_verbose("\n"); 2118 } 2119 2120 /* management of the /dev/atabus* devices */ 2121 int 2122 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l) 2123 { 2124 struct atabus_softc *sc; 2125 int error; 2126 2127 sc = device_lookup_private(&atabus_cd, minor(dev)); 2128 if (sc == NULL) 2129 return (ENXIO); 2130 2131 if (sc->sc_flags & ATABUSCF_OPEN) 2132 return (EBUSY); 2133 2134 if ((error = ata_addref(sc->sc_chan)) != 0) 2135 return (error); 2136 2137 sc->sc_flags |= ATABUSCF_OPEN; 2138 2139 return (0); 2140 } 2141 2142 2143 int 2144 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l) 2145 { 2146 struct atabus_softc *sc = 2147 device_lookup_private(&atabus_cd, minor(dev)); 2148 2149 ata_delref(sc->sc_chan); 2150 2151 sc->sc_flags &= ~ATABUSCF_OPEN; 2152 2153 return (0); 2154 } 2155 2156 int 2157 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 2158 { 2159 struct atabus_softc *sc = 2160 device_lookup_private(&atabus_cd, minor(dev)); 2161 struct ata_channel *chp = sc->sc_chan; 2162 int min_drive, max_drive, drive; 2163 int error; 2164 2165 /* 2166 * Enforce write permission for ioctls that change the 2167 * state of the bus. Host adapter specific ioctls must 2168 * be checked by the adapter driver. 2169 */ 2170 switch (cmd) { 2171 case ATABUSIOSCAN: 2172 case ATABUSIODETACH: 2173 case ATABUSIORESET: 2174 if ((flag & FWRITE) == 0) 2175 return (EBADF); 2176 } 2177 2178 switch (cmd) { 2179 case ATABUSIORESET: 2180 ata_channel_lock(chp); 2181 ata_thread_run(sc->sc_chan, AT_WAIT | AT_POLL, 2182 ATACH_TH_RESET, ATACH_NODRIVE); 2183 ata_channel_unlock(chp); 2184 return 0; 2185 case ATABUSIOSCAN: 2186 { 2187 #if 0 2188 struct atabusioscan_args *a= 2189 (struct atabusioscan_args *)addr; 2190 #endif 2191 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2192 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2193 return (EOPNOTSUPP); 2194 return (EOPNOTSUPP); 2195 } 2196 case ATABUSIODETACH: 2197 { 2198 struct atabusiodetach_args *a= 2199 (struct atabusiodetach_args *)addr; 2200 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || 2201 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) 2202 return (EOPNOTSUPP); 2203 switch (a->at_dev) { 2204 case -1: 2205 min_drive = 0; 2206 max_drive = 1; 2207 break; 2208 case 0: 2209 case 1: 2210 min_drive = max_drive = a->at_dev; 2211 break; 2212 default: 2213 return (EINVAL); 2214 } 2215 for (drive = min_drive; drive <= max_drive; drive++) { 2216 if (chp->ch_drive[drive].drv_softc != NULL) { 2217 error = config_detach( 2218 chp->ch_drive[drive].drv_softc, 0); 2219 if (error) 2220 return (error); 2221 KASSERT(chp->ch_drive[drive].drv_softc == NULL); 2222 } 2223 } 2224 return 0; 2225 } 2226 default: 2227 return ENOTTY; 2228 } 2229 } 2230 2231 static bool 2232 atabus_suspend(device_t dv, const pmf_qual_t *qual) 2233 { 2234 struct atabus_softc *sc = device_private(dv); 2235 struct ata_channel *chp = sc->sc_chan; 2236 2237 ata_channel_idle(chp); 2238 2239 return true; 2240 } 2241 2242 static bool 2243 atabus_resume(device_t dv, const pmf_qual_t *qual) 2244 { 2245 struct atabus_softc *sc = device_private(dv); 2246 struct ata_channel *chp = sc->sc_chan; 2247 2248 /* 2249 * XXX joerg: with wdc, the first channel unfreezes the controller. 2250 * Move this the reset and queue idling into wdc. 2251 */ 2252 ata_channel_lock(chp); 2253 if (chp->ch_queue->queue_freeze == 0) { 2254 ata_channel_unlock(chp); 2255 goto out; 2256 } 2257 2258 /* unfreeze the queue and reset drives */ 2259 ata_channel_thaw_locked(chp); 2260 2261 /* reset channel only if there are drives attached */ 2262 if (chp->ch_ndrives > 0) 2263 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE); 2264 2265 ata_channel_unlock(chp); 2266 2267 out: 2268 return true; 2269 } 2270 2271 static int 2272 atabus_rescan(device_t self, const char *ifattr, const int *locators) 2273 { 2274 struct atabus_softc *sc = device_private(self); 2275 struct ata_channel *chp = sc->sc_chan; 2276 struct atabus_initq *initq; 2277 int i; 2278 2279 /* 2280 * we can rescan a port multiplier atabus, even if some devices are 2281 * still attached 2282 */ 2283 if (chp->ch_satapmp_nports == 0) { 2284 if (chp->atapibus != NULL) { 2285 return EBUSY; 2286 } 2287 2288 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); 2289 for (i = 0; i < chp->ch_ndrives; i++) { 2290 if (chp->ch_drive[i].drv_softc != NULL) { 2291 return EBUSY; 2292 } 2293 } 2294 } 2295 2296 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP); 2297 initq->atabus_sc = sc; 2298 mutex_enter(&atabus_qlock); 2299 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); 2300 mutex_exit(&atabus_qlock); 2301 config_pending_incr(sc->sc_dev); 2302 2303 ata_channel_lock(chp); 2304 chp->ch_flags |= ATACH_TH_RESCAN; 2305 cv_signal(&chp->ch_thr_idle); 2306 ata_channel_unlock(chp); 2307 2308 return 0; 2309 } 2310 2311 void 2312 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags) 2313 { 2314 KASSERT(mutex_owned(&chp->ch_lock)); 2315 2316 if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) { 2317 /* 2318 * can't use kpause(), we may be in interrupt context 2319 * or taking a crash dump 2320 */ 2321 delay(ms * 1000); 2322 } else { 2323 int pause = mstohz(ms); 2324 2325 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock); 2326 } 2327 } 2328 2329 void 2330 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count, 2331 uint16_t *features, uint8_t *device) 2332 { 2333 if ((xfer->c_flags & C_NCQ) == 0) { 2334 /* FUA handling for non-NCQ drives */ 2335 if (xfer->c_bio.flags & ATA_FUA 2336 && *cmd == WDCC_WRITEDMA_EXT) 2337 *cmd = WDCC_WRITEDMA_FUA_EXT; 2338 2339 return; 2340 } 2341 2342 *cmd = (xfer->c_bio.flags & ATA_READ) ? 2343 WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED; 2344 2345 /* for FPDMA the block count is in features */ 2346 *features = *count; 2347 2348 /* NCQ tag */ 2349 *count = (xfer->c_slot << 3); 2350 2351 if (xfer->c_bio.flags & ATA_PRIO_HIGH) 2352 *count |= WDSC_PRIO_HIGH; 2353 2354 /* other device flags */ 2355 if (xfer->c_bio.flags & ATA_FUA) 2356 *device |= WDSD_FUA; 2357 } 2358 2359 void 2360 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer) 2361 { 2362 struct ata_queue *chq = chp->ch_queue; 2363 struct ata_command *ata_c = &xfer->c_ata_c; 2364 2365 ata_channel_lock(chp); 2366 2367 while ((ata_c->flags & AT_DONE) == 0) 2368 cv_wait(&chq->c_cmd_finish, &chp->ch_lock); 2369 2370 ata_channel_unlock(chp); 2371 2372 KASSERT((ata_c->flags & AT_DONE) != 0); 2373 } 2374