1 /* $OpenBSD: atascsi.c,v 1.64 2009/02/16 21:19:06 miod Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/buf.h> 22 #include <sys/kernel.h> 23 #include <sys/malloc.h> 24 #include <sys/device.h> 25 #include <sys/proc.h> 26 #include <sys/queue.h> 27 28 #include <scsi/scsi_all.h> 29 #include <scsi/scsi_disk.h> 30 #include <scsi/scsiconf.h> 31 32 #include <dev/ata/atascsi.h> 33 34 #include <sys/ataio.h> 35 36 struct atascsi { 37 struct device *as_dev; 38 void *as_cookie; 39 40 struct ata_port **as_ports; 41 42 struct atascsi_methods *as_methods; 43 struct scsi_adapter as_switch; 44 struct scsi_link as_link; 45 struct scsibus_softc *as_scsibus; 46 47 int as_capability; 48 }; 49 50 int atascsi_cmd(struct scsi_xfer *); 51 int atascsi_ioctl(struct scsi_link *, u_long, caddr_t, int, 52 struct proc *); 53 int atascsi_probe(struct scsi_link *); 54 void atascsi_free(struct scsi_link *); 55 56 /* template */ 57 struct scsi_adapter atascsi_switch = { 58 atascsi_cmd, /* scsi_cmd */ 59 scsi_minphys, /* scsi_minphys */ 60 atascsi_probe, /* dev_probe */ 61 atascsi_free, /* dev_free */ 62 atascsi_ioctl /* ioctl */ 63 }; 64 65 struct scsi_device atascsi_device = { 66 NULL, NULL, NULL, NULL 67 }; 68 69 void ata_fix_identify(struct ata_identify *); 70 71 int atascsi_disk_cmd(struct scsi_xfer *); 72 void atascsi_disk_cmd_done(struct ata_xfer *); 73 int atascsi_disk_inq(struct scsi_xfer *); 74 int atascsi_disk_inquiry(struct scsi_xfer *); 75 int atascsi_disk_vpd_supported(struct scsi_xfer *); 76 int atascsi_disk_vpd_serial(struct scsi_xfer *); 77 int atascsi_disk_vpd_ident(struct scsi_xfer *); 78 int atascsi_disk_capacity(struct scsi_xfer *); 79 int atascsi_disk_sync(struct scsi_xfer *); 80 void atascsi_disk_sync_done(struct ata_xfer *); 81 int atascsi_disk_sense(struct scsi_xfer *); 82 83 int atascsi_atapi_cmd(struct scsi_xfer *); 84 void atascsi_atapi_cmd_done(struct ata_xfer *); 85 86 int atascsi_done(struct scsi_xfer *, int); 87 88 int ata_running = 0; 89 90 int ata_exec(struct atascsi *, struct ata_xfer *); 91 92 struct ata_xfer *ata_get_xfer(struct ata_port *); 93 void ata_put_xfer(struct ata_xfer *); 94 95 struct atascsi * 96 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa) 97 { 98 struct scsibus_attach_args saa; 99 struct atascsi *as; 100 101 as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO); 102 103 as->as_dev = self; 104 as->as_cookie = aaa->aaa_cookie; 105 as->as_methods = aaa->aaa_methods; 106 as->as_capability = aaa->aaa_capability; 107 108 /* copy from template and modify for ourselves */ 109 as->as_switch = atascsi_switch; 110 if (aaa->aaa_minphys != NULL) 111 as->as_switch.scsi_minphys = aaa->aaa_minphys; 112 113 /* fill in our scsi_link */ 114 as->as_link.device = &atascsi_device; 115 as->as_link.adapter = &as->as_switch; 116 as->as_link.adapter_softc = as; 117 as->as_link.adapter_buswidth = aaa->aaa_nports; 118 as->as_link.luns = 1; /* XXX port multiplier as luns */ 119 as->as_link.adapter_target = aaa->aaa_nports; 120 as->as_link.openings = aaa->aaa_ncmds; 121 if (as->as_capability & ASAA_CAP_NEEDS_RESERVED) 122 as->as_link.openings--; 123 124 as->as_ports = malloc(sizeof(struct ata_port *) * aaa->aaa_nports, 125 M_DEVBUF, M_WAITOK | M_ZERO); 126 127 bzero(&saa, sizeof(saa)); 128 saa.saa_sc_link = &as->as_link; 129 130 /* stash the scsibus so we can do hotplug on it */ 131 as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa, 132 scsiprint); 133 134 return (as); 135 } 136 137 int 138 atascsi_detach(struct atascsi *as, int flags) 139 { 140 int rv; 141 142 rv = config_detach((struct device *)as->as_scsibus, flags); 143 if (rv != 0) 144 return (rv); 145 146 free(as->as_ports, M_DEVBUF); 147 free(as, M_DEVBUF); 148 149 return (0); 150 } 151 152 int 153 atascsi_probe_dev(struct atascsi *as, int port) 154 { 155 return (scsi_probe_target(as->as_scsibus, port)); 156 } 157 158 int 159 atascsi_detach_dev(struct atascsi *as, int port, int flags) 160 { 161 return (scsi_detach_target(as->as_scsibus, port, flags)); 162 } 163 164 int 165 atascsi_probe(struct scsi_link *link) 166 { 167 struct atascsi *as = link->adapter_softc; 168 struct ata_port *ap; 169 struct ata_xfer *xa; 170 int port, type; 171 int rv; 172 173 /* revisit this when we do port multipliers */ 174 if (link->lun > 0) 175 return (ENXIO); 176 177 port = link->target; 178 if (port > as->as_link.adapter_buswidth) 179 return (ENXIO); 180 181 type = as->as_methods->probe(as->as_cookie, port); 182 switch (type) { 183 case ATA_PORT_T_DISK: 184 break; 185 case ATA_PORT_T_ATAPI: 186 link->flags |= SDEV_ATAPI; 187 link->quirks |= SDEV_ONLYBIG; 188 break; 189 default: 190 rv = ENODEV; 191 goto unsupported; 192 } 193 194 ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 195 ap->ap_as = as; 196 ap->ap_port = port; 197 ap->ap_type = type; 198 199 /* fetch the device info */ 200 xa = ata_get_xfer(ap); 201 if (xa == NULL) 202 panic("no free xfers on a new port"); 203 xa->data = &ap->ap_identify; 204 xa->datalen = sizeof(ap->ap_identify); 205 xa->fis->flags = ATA_H2D_FLAGS_CMD; 206 xa->fis->command = ATA_C_IDENTIFY; 207 xa->fis->device = 0; 208 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 209 xa->complete = ata_put_xfer; 210 xa->timeout = 1000; 211 if (ata_exec(as, xa) != COMPLETE) { 212 rv = EIO; 213 goto error; 214 } 215 216 ata_fix_identify(&ap->ap_identify); 217 218 as->as_ports[port] = ap; 219 220 if (type != ATA_PORT_T_DISK) 221 return (0); 222 223 /* Enable write cache if supported */ 224 if (ap->ap_identify.cmdset82 & ATA_IDENTIFY_WRITECACHE) { 225 xa = ata_get_xfer(ap); 226 if (xa == NULL) 227 panic("no free xfers on a new port"); 228 xa->fis->command = ATA_C_SET_FEATURES; 229 xa->fis->features = ATA_SF_WRITECACHE_EN; 230 xa->fis->flags = ATA_H2D_FLAGS_CMD; 231 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 232 xa->complete = ata_put_xfer; 233 xa->timeout = 1000; 234 ata_exec(as, xa); /* we dont care if this works or not */ 235 } 236 237 /* Enable read lookahead if supported */ 238 if (ap->ap_identify.cmdset82 & ATA_IDENTIFY_LOOKAHEAD) { 239 xa = ata_get_xfer(ap); 240 if (xa == NULL) 241 panic("no free xfers on a new port"); 242 xa->fis->command = ATA_C_SET_FEATURES; 243 xa->fis->features = ATA_SF_LOOKAHEAD_EN; 244 xa->fis->flags = ATA_H2D_FLAGS_CMD; 245 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 246 xa->complete = ata_put_xfer; 247 xa->timeout = 1000; 248 ata_exec(as, xa); /* we dont care if this works or not */ 249 } 250 251 /* 252 * FREEZE LOCK the device so malicous users can't lock it on us. 253 * As there is no harm in issuing this to devices that don't 254 * support the security feature set we just send it, and don't bother 255 * checking if the device sends a command abort to tell us it doesn't 256 * support it 257 */ 258 xa = ata_get_xfer(ap); 259 if (xa == NULL) 260 panic("no free xfers on a new port"); 261 xa->fis->command = ATA_C_SEC_FREEZE_LOCK; 262 xa->fis->flags = ATA_H2D_FLAGS_CMD; 263 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 264 xa->complete = ata_put_xfer; 265 xa->timeout = 1000; 266 ata_exec(as, xa); /* we dont care if this works or not */ 267 268 return (0); 269 error: 270 free(ap, M_DEVBUF); 271 unsupported: 272 as->as_methods->free(as->as_cookie, port); 273 return (rv); 274 } 275 276 void 277 atascsi_free(struct scsi_link *link) 278 { 279 struct atascsi *as = link->adapter_softc; 280 struct ata_port *ap; 281 int port; 282 283 if (link->lun > 0) 284 return; 285 286 port = link->target; 287 if (port > as->as_link.adapter_buswidth) 288 return; 289 290 ap = as->as_ports[port]; 291 if (ap == NULL) 292 return; 293 294 free(ap, M_DEVBUF); 295 as->as_ports[port] = NULL; 296 297 as->as_methods->free(as->as_cookie, port); 298 } 299 300 void 301 ata_fix_identify(struct ata_identify *id) 302 { 303 u_int16_t *swap; 304 int i; 305 306 swap = (u_int16_t *)id->serial; 307 for (i = 0; i < sizeof(id->serial) / sizeof(u_int16_t); i++) 308 swap[i] = swap16(swap[i]); 309 310 swap = (u_int16_t *)id->firmware; 311 for (i = 0; i < sizeof(id->firmware) / sizeof(u_int16_t); i++) 312 swap[i] = swap16(swap[i]); 313 314 swap = (u_int16_t *)id->model; 315 for (i = 0; i < sizeof(id->model) / sizeof(u_int16_t); i++) 316 swap[i] = swap16(swap[i]); 317 } 318 319 int 320 atascsi_cmd(struct scsi_xfer *xs) 321 { 322 struct scsi_link *link = xs->sc_link; 323 struct atascsi *as = link->adapter_softc; 324 struct ata_port *ap = as->as_ports[link->target]; 325 326 if (ap == NULL) 327 return (atascsi_done(xs, XS_DRIVER_STUFFUP)); 328 329 switch (ap->ap_type) { 330 case ATA_PORT_T_DISK: 331 return (atascsi_disk_cmd(xs)); 332 case ATA_PORT_T_ATAPI: 333 return (atascsi_atapi_cmd(xs)); 334 335 case ATA_PORT_T_NONE: 336 default: 337 return (atascsi_done(xs, XS_DRIVER_STUFFUP)); 338 } 339 } 340 341 int 342 atascsi_disk_cmd(struct scsi_xfer *xs) 343 { 344 struct scsi_link *link = xs->sc_link; 345 struct atascsi *as = link->adapter_softc; 346 struct ata_port *ap = as->as_ports[link->target]; 347 int flags = 0; 348 struct scsi_rw *rw; 349 struct scsi_rw_big *rwb; 350 struct ata_xfer *xa; 351 struct ata_fis_h2d *fis; 352 u_int64_t lba; 353 u_int32_t sector_count; 354 355 switch (xs->cmd->opcode) { 356 case READ_BIG: 357 case READ_COMMAND: 358 flags = ATA_F_READ; 359 break; 360 case WRITE_BIG: 361 case WRITE_COMMAND: 362 flags = ATA_F_WRITE; 363 /* deal with io outside the switch */ 364 break; 365 366 case SYNCHRONIZE_CACHE: 367 return (atascsi_disk_sync(xs)); 368 case REQUEST_SENSE: 369 return (atascsi_disk_sense(xs)); 370 case INQUIRY: 371 return (atascsi_disk_inq(xs)); 372 case READ_CAPACITY: 373 return (atascsi_disk_capacity(xs)); 374 375 case TEST_UNIT_READY: 376 case START_STOP: 377 case PREVENT_ALLOW: 378 return (atascsi_done(xs, XS_NOERROR)); 379 380 default: 381 return (atascsi_done(xs, XS_DRIVER_STUFFUP)); 382 } 383 384 xa = ata_get_xfer(ap); 385 if (xa == NULL) 386 return (NO_CCB); 387 388 xa->flags = flags; 389 if (xs->cmdlen == 6) { 390 rw = (struct scsi_rw *)xs->cmd; 391 lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 392 sector_count = rw->length ? rw->length : 0x100; 393 } else { 394 rwb = (struct scsi_rw_big *)xs->cmd; 395 lba = _4btol(rwb->addr); 396 sector_count = _2btol(rwb->length); 397 } 398 399 fis = xa->fis; 400 401 fis->flags = ATA_H2D_FLAGS_CMD; 402 fis->lba_low = lba & 0xff; 403 fis->lba_mid = (lba >> 8) & 0xff; 404 fis->lba_high = (lba >> 16) & 0xff; 405 406 if (ap->ap_ncqdepth && !(xs->flags & SCSI_POLL)) { 407 /* Use NCQ */ 408 xa->flags |= ATA_F_NCQ; 409 fis->command = (xa->flags & ATA_F_WRITE) ? 410 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; 411 fis->device = ATA_H2D_DEVICE_LBA; 412 fis->lba_low_exp = (lba >> 24) & 0xff; 413 fis->lba_mid_exp = (lba >> 32) & 0xff; 414 fis->lba_high_exp = (lba >> 40) & 0xff; 415 fis->sector_count = xa->tag << 3; 416 fis->features = sector_count & 0xff; 417 fis->features_exp = (sector_count >> 8) & 0xff; 418 } else if (sector_count > 0x100 || lba > 0xfffffff) { 419 /* Use LBA48 */ 420 fis->command = (xa->flags & ATA_F_WRITE) ? 421 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT; 422 fis->device = ATA_H2D_DEVICE_LBA; 423 fis->lba_low_exp = (lba >> 24) & 0xff; 424 fis->lba_mid_exp = (lba >> 32) & 0xff; 425 fis->lba_high_exp = (lba >> 40) & 0xff; 426 fis->sector_count = sector_count & 0xff; 427 fis->sector_count_exp = (sector_count >> 8) & 0xff; 428 } else { 429 /* Use LBA */ 430 fis->command = (xa->flags & ATA_F_WRITE) ? 431 ATA_C_WRITEDMA : ATA_C_READDMA; 432 fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f); 433 fis->sector_count = sector_count & 0xff; 434 } 435 436 xa->data = xs->data; 437 xa->datalen = xs->datalen; 438 xa->complete = atascsi_disk_cmd_done; 439 xa->timeout = xs->timeout; 440 xa->atascsi_private = xs; 441 if (xs->flags & SCSI_POLL) 442 xa->flags |= ATA_F_POLL; 443 444 return (ata_exec(as, xa)); 445 } 446 447 void 448 atascsi_disk_cmd_done(struct ata_xfer *xa) 449 { 450 struct scsi_xfer *xs = xa->atascsi_private; 451 452 switch (xa->state) { 453 case ATA_S_COMPLETE: 454 xs->error = XS_NOERROR; 455 break; 456 case ATA_S_ERROR: 457 /* fake sense? */ 458 xs->error = XS_DRIVER_STUFFUP; 459 break; 460 case ATA_S_TIMEOUT: 461 xs->error = XS_TIMEOUT; 462 break; 463 default: 464 panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)", 465 xa->state); 466 } 467 468 xs->resid = xa->resid; 469 ata_put_xfer(xa); 470 471 xs->flags |= ITSDONE; 472 scsi_done(xs); 473 } 474 475 int 476 atascsi_disk_inq(struct scsi_xfer *xs) 477 { 478 struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd; 479 480 if (ISSET(inq->flags, SI_EVPD)) { 481 switch (inq->pagecode) { 482 case SI_PG_SUPPORTED: 483 return (atascsi_disk_vpd_supported(xs)); 484 case SI_PG_SERIAL: 485 return (atascsi_disk_vpd_serial(xs)); 486 case SI_PG_DEVID: 487 return (atascsi_disk_vpd_ident(xs)); 488 default: 489 return (atascsi_done(xs, XS_DRIVER_STUFFUP)); 490 } 491 } 492 493 return (atascsi_disk_inquiry(xs)); 494 } 495 496 int 497 atascsi_disk_inquiry(struct scsi_xfer *xs) 498 { 499 struct scsi_link *link = xs->sc_link; 500 struct atascsi *as = link->adapter_softc; 501 struct ata_port *ap = as->as_ports[link->target]; 502 struct scsi_inquiry_data inq; 503 int rv; 504 505 bzero(&inq, sizeof(inq)); 506 507 inq.device = T_DIRECT; 508 inq.version = 0x05; /* SPC-3 */ 509 inq.response_format = 2; 510 inq.additional_length = 32; 511 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 512 bcopy(ap->ap_identify.model, inq.product, sizeof(inq.product)); 513 bcopy(ap->ap_identify.firmware, inq.revision, sizeof(inq.revision)); 514 515 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 516 517 rv = atascsi_done(xs, XS_NOERROR); 518 519 if (ap->ap_features & ATA_PORT_F_PROBED) 520 return (rv); 521 522 ap->ap_features = ATA_PORT_F_PROBED; 523 524 if (as->as_capability & ASAA_CAP_NCQ && 525 (letoh16(ap->ap_identify.satacap) & (1 << 8))) { 526 int host_ncqdepth; 527 /* 528 * At this point, openings should be the number of commands the 529 * host controller supports, less any reserved slot the host 530 * controller needs for recovery. 531 */ 532 host_ncqdepth = link->openings + 533 ((as->as_capability & ASAA_CAP_NEEDS_RESERVED) ? 1 : 0); 534 535 ap->ap_ncqdepth = (letoh16(ap->ap_identify.qdepth) & 0x1f) + 1; 536 537 /* Limit the number of openings to what the device supports. */ 538 if (host_ncqdepth > ap->ap_ncqdepth) 539 link->openings -= (host_ncqdepth - ap->ap_ncqdepth); 540 541 /* 542 * XXX throw away any xfers that have tag numbers higher than 543 * what the device supports. 544 */ 545 while (host_ncqdepth--) { 546 struct ata_xfer *xa; 547 548 xa = ata_get_xfer(ap); 549 if (xa->tag < ap->ap_ncqdepth) { 550 xa->state = ATA_S_COMPLETE; 551 ata_put_xfer(xa); 552 } 553 } 554 } 555 556 return (rv); 557 } 558 559 int 560 atascsi_disk_vpd_supported(struct scsi_xfer *xs) 561 { 562 struct { 563 struct scsi_vpd_hdr hdr; 564 u_int8_t list[3]; 565 } pg; 566 567 bzero(&pg, sizeof(pg)); 568 569 pg.hdr.device = T_DIRECT; 570 pg.hdr.page_code = SI_PG_SUPPORTED; 571 pg.hdr.page_length = sizeof(pg.list); 572 pg.list[0] = SI_PG_SUPPORTED; 573 pg.list[1] = SI_PG_SERIAL; 574 pg.list[2] = SI_PG_DEVID; 575 576 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 577 578 return (atascsi_done(xs, XS_NOERROR)); 579 } 580 581 int 582 atascsi_disk_vpd_serial(struct scsi_xfer *xs) 583 { 584 struct scsi_link *link = xs->sc_link; 585 struct atascsi *as = link->adapter_softc; 586 struct ata_port *ap = as->as_ports[link->target]; 587 struct scsi_vpd_serial pg; 588 589 bzero(&pg, sizeof(pg)); 590 591 pg.hdr.device = T_DIRECT; 592 pg.hdr.page_code = SI_PG_SERIAL; 593 pg.hdr.page_length = sizeof(ap->ap_identify.serial); 594 bcopy(ap->ap_identify.serial, pg.serial, 595 sizeof(ap->ap_identify.serial)); 596 597 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 598 599 return (atascsi_done(xs, XS_NOERROR)); 600 } 601 602 int 603 atascsi_disk_vpd_ident(struct scsi_xfer *xs) 604 { 605 struct scsi_link *link = xs->sc_link; 606 struct atascsi *as = link->adapter_softc; 607 struct ata_port *ap = as->as_ports[link->target]; 608 struct { 609 struct scsi_vpd_hdr hdr; 610 struct scsi_vpd_devid_hdr devid_hdr; 611 u_int8_t devid[68]; 612 } pg; 613 u_int8_t *p; 614 size_t pg_len; 615 616 bzero(&pg, sizeof(pg)); 617 if (ap->ap_identify.features87 & ATA_ID_F87_WWN) { 618 pg_len = 8; 619 620 pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY; 621 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA; 622 623 /* XXX ata_identify field(s) should be renamed */ 624 bcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len); 625 } else { 626 pg_len = 68; 627 628 pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII; 629 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10; 630 631 p = pg.devid; 632 bcopy("ATA ", p, 8); 633 p += 8; 634 bcopy(ap->ap_identify.model, p, 635 sizeof(ap->ap_identify.model)); 636 p += sizeof(ap->ap_identify.model); 637 bcopy(ap->ap_identify.serial, p, 638 sizeof(ap->ap_identify.serial)); 639 } 640 641 pg.devid_hdr.len = pg_len; 642 pg_len += sizeof(pg.devid_hdr); 643 644 pg.hdr.device = T_DIRECT; 645 pg.hdr.page_code = SI_PG_DEVID; 646 pg.hdr.page_length = pg_len; 647 pg_len += sizeof(pg.hdr); 648 649 bcopy(&pg, xs->data, MIN(pg_len, xs->datalen)); 650 651 return (atascsi_done(xs, XS_NOERROR)); 652 } 653 654 int 655 atascsi_disk_sync(struct scsi_xfer *xs) 656 { 657 struct scsi_link *link = xs->sc_link; 658 struct atascsi *as = link->adapter_softc; 659 struct ata_port *ap = as->as_ports[link->target]; 660 struct ata_xfer *xa; 661 662 xa = ata_get_xfer(ap); 663 if (xa == NULL) 664 return (NO_CCB); 665 666 xa->datalen = 0; 667 xa->flags = ATA_F_READ; 668 xa->complete = atascsi_disk_sync_done; 669 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 670 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 671 xa->atascsi_private = xs; 672 if (xs->flags & SCSI_POLL) 673 xa->flags |= ATA_F_POLL; 674 675 xa->fis->flags = ATA_H2D_FLAGS_CMD; 676 xa->fis->command = ATA_C_FLUSH_CACHE; 677 xa->fis->device = 0; 678 679 return (ata_exec(as, xa)); 680 } 681 682 void 683 atascsi_disk_sync_done(struct ata_xfer *xa) 684 { 685 struct scsi_xfer *xs = xa->atascsi_private; 686 687 switch (xa->state) { 688 case ATA_S_COMPLETE: 689 xs->error = XS_NOERROR; 690 break; 691 692 case ATA_S_ERROR: 693 case ATA_S_TIMEOUT: 694 printf("atascsi_disk_sync_done: %s\n", 695 xa->state == ATA_S_TIMEOUT ? "timeout" : "error"); 696 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 697 XS_DRIVER_STUFFUP); 698 break; 699 700 default: 701 panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)", 702 xa->state); 703 } 704 705 ata_put_xfer(xa); 706 707 xs->flags |= ITSDONE; 708 scsi_done(xs); 709 } 710 711 int 712 atascsi_disk_capacity(struct scsi_xfer *xs) 713 { 714 struct scsi_link *link = xs->sc_link; 715 struct atascsi *as = link->adapter_softc; 716 struct ata_port *ap = as->as_ports[link->target]; 717 struct ata_identify *id = &ap->ap_identify; 718 struct scsi_read_cap_data rcd; 719 u_int64_t capacity = 0; 720 int i; 721 722 bzero(&rcd, sizeof(rcd)); 723 if (letoh16(id->cmdset83) & 0x0400) { 724 /* LBA48 feature set supported */ 725 for (i = 3; i >= 0; --i) { 726 capacity <<= 16; 727 capacity += letoh16(id->addrsecxt[i]); 728 } 729 } else { 730 capacity = letoh16(id->addrsec[1]); 731 capacity <<= 16; 732 capacity += letoh16(id->addrsec[0]); 733 } 734 735 if (capacity > 0xffffffff) 736 capacity = 0xffffffff; 737 738 _lto4b(capacity - 1, rcd.addr); 739 _lto4b(512, rcd.length); 740 741 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 742 743 return (atascsi_done(xs, XS_NOERROR)); 744 } 745 746 int 747 atascsi_disk_sense(struct scsi_xfer *xs) 748 { 749 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 750 751 bzero(xs->data, xs->datalen); 752 /* check datalen > sizeof(struct scsi_sense_data)? */ 753 sd->error_code = 0x70; /* XXX magic */ 754 sd->flags = SKEY_NO_SENSE; 755 756 return (atascsi_done(xs, XS_NOERROR)); 757 } 758 759 int 760 atascsi_atapi_cmd(struct scsi_xfer *xs) 761 { 762 struct scsi_link *link = xs->sc_link; 763 struct atascsi *as = link->adapter_softc; 764 struct ata_port *ap = as->as_ports[link->target]; 765 struct ata_xfer *xa; 766 struct ata_fis_h2d *fis; 767 768 xa = ata_get_xfer(ap); 769 if (xa == NULL) 770 return (NO_CCB); 771 772 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 773 case SCSI_DATA_IN: 774 xa->flags = ATA_F_PACKET | ATA_F_READ; 775 break; 776 case SCSI_DATA_OUT: 777 xa->flags = ATA_F_PACKET | ATA_F_WRITE; 778 break; 779 default: 780 xa->flags = ATA_F_PACKET; 781 } 782 783 xa->data = xs->data; 784 xa->datalen = xs->datalen; 785 xa->complete = atascsi_atapi_cmd_done; 786 xa->timeout = xs->timeout; 787 xa->atascsi_private = xs; 788 if (xs->flags & SCSI_POLL) 789 xa->flags |= ATA_F_POLL; 790 791 fis = xa->fis; 792 fis->flags = ATA_H2D_FLAGS_CMD; 793 fis->command = ATA_C_PACKET; 794 fis->device = 0; 795 fis->sector_count = xa->tag << 3; 796 fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ? 797 ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ); 798 fis->lba_mid = 0x00; 799 fis->lba_high = 0x20; 800 801 /* Copy SCSI command into ATAPI packet. */ 802 memcpy(xa->packetcmd, xs->cmd, xs->cmdlen); 803 804 return (ata_exec(as, xa)); 805 } 806 807 void 808 atascsi_atapi_cmd_done(struct ata_xfer *xa) 809 { 810 struct scsi_xfer *xs = xa->atascsi_private; 811 struct scsi_sense_data *sd = &xs->sense; 812 813 switch (xa->state) { 814 case ATA_S_COMPLETE: 815 xs->error = XS_NOERROR; 816 break; 817 case ATA_S_ERROR: 818 /* Return PACKET sense data */ 819 sd->error_code = SSD_ERRCODE_CURRENT; 820 sd->flags = (xa->rfis.error & 0xf0) >> 4; 821 if (xa->rfis.error & 0x04) 822 sd->flags = SKEY_ILLEGAL_REQUEST; 823 if (xa->rfis.error & 0x02) 824 sd->flags |= SSD_EOM; 825 if (xa->rfis.error & 0x01) 826 sd->flags |= SSD_ILI; 827 xs->error = XS_SENSE; 828 break; 829 case ATA_S_TIMEOUT: 830 printf("atascsi_atapi_cmd_done, timeout\n"); 831 xs->error = XS_TIMEOUT; 832 break; 833 default: 834 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 835 xa->state); 836 } 837 838 xs->resid = xa->resid; 839 ata_put_xfer(xa); 840 841 xs->flags |= ITSDONE; 842 scsi_done(xs); 843 } 844 845 int 846 atascsi_done(struct scsi_xfer *xs, int error) 847 { 848 int s; 849 850 xs->error = error; 851 xs->flags |= ITSDONE; 852 853 s = splbio(); 854 scsi_done(xs); 855 splx(s); 856 return (COMPLETE); 857 } 858 859 int atascsi_ioctl_cmd(struct atascsi *, struct ata_port *, atareq_t *); 860 void atascsi_ioctl_done(struct ata_xfer *); 861 862 int 863 atascsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flags, 864 struct proc *p) 865 { 866 struct atascsi *as = link->adapter_softc; 867 struct ata_port *ap = as->as_ports[link->target]; 868 869 switch (cmd) { 870 case ATAIOCCOMMAND: 871 return (atascsi_ioctl_cmd(as, ap, (atareq_t *)addr)); 872 default: 873 return (ENOTTY); 874 } 875 } 876 877 int 878 atascsi_ioctl_cmd(struct atascsi *as, struct ata_port *ap, atareq_t *atareq) 879 { 880 struct ata_xfer *xa; 881 struct ata_fis_h2d *fis; 882 void *buf; 883 884 xa = ata_get_xfer(ap); 885 if (xa == NULL) 886 return (ENOMEM); 887 888 fis = xa->fis; 889 fis->flags = ATA_H2D_FLAGS_CMD; 890 fis->command = atareq->command; 891 fis->features = atareq->features; 892 fis->lba_low = atareq->sec_num; 893 fis->lba_mid = atareq->cylinder; 894 fis->lba_high = atareq->cylinder >> 8; 895 fis->device = atareq->head & 0x0f; 896 fis->sector_count = atareq->sec_count; 897 898 buf = malloc(atareq->datalen, M_TEMP, M_WAITOK); 899 900 xa->data = buf; 901 xa->datalen = atareq->datalen; 902 xa->complete = atascsi_ioctl_done; 903 xa->timeout = atareq->timeout; 904 xa->flags = 0; 905 if (atareq->flags & ATACMD_READ) 906 xa->flags |= ATA_F_READ; 907 if (atareq->flags & ATACMD_WRITE) { 908 xa->flags |= ATA_F_WRITE; 909 copyin(atareq->databuf, buf, atareq->datalen); 910 } 911 xa->atascsi_private = NULL; 912 913 switch (as->as_methods->ata_cmd(xa)) { 914 case ATA_COMPLETE: 915 break; 916 case ATA_QUEUED: 917 while (xa->state == ATA_S_PENDING || xa->state == ATA_S_ONCHIP) 918 tsleep(xa, PRIBIO, "atascsi", 0); 919 break; 920 case ATA_ERROR: 921 free(buf, M_TEMP); 922 ata_put_xfer(xa); 923 atareq->retsts = ATACMD_ERROR; 924 return (EIO); 925 default: 926 panic("atascsi_ioctl_cmd: unexpected return from ata_cmd"); 927 } 928 929 switch (xa->state) { 930 case ATA_S_COMPLETE: 931 atareq->retsts = ATACMD_OK; 932 if (atareq->flags & ATACMD_READ) 933 copyout(buf, atareq->databuf, atareq->datalen); 934 break; 935 case ATA_S_ERROR: 936 atareq->retsts = ATACMD_ERROR; 937 break; 938 case ATA_S_TIMEOUT: 939 atareq->retsts = ATACMD_TIMEOUT; 940 break; 941 default: 942 panic("atascsi_ioctl_cmd: unexpected ata_xfer state (%d)", 943 xa->state); 944 } 945 946 free(buf, M_TEMP); 947 948 ata_put_xfer(xa); 949 950 return (0); 951 } 952 953 void 954 atascsi_ioctl_done(struct ata_xfer *xa) 955 { 956 wakeup(xa); 957 } 958 959 int 960 ata_exec(struct atascsi *as, struct ata_xfer *xa) 961 { 962 int polled = xa->flags & ATA_F_POLL; 963 964 switch (as->as_methods->ata_cmd(xa)) { 965 case ATA_COMPLETE: 966 case ATA_ERROR: 967 return (COMPLETE); 968 case ATA_QUEUED: 969 if (!polled) 970 return (SUCCESSFULLY_QUEUED); 971 default: 972 panic("unexpected return from ata_exec"); 973 } 974 } 975 976 struct ata_xfer * 977 ata_get_xfer(struct ata_port *ap) 978 { 979 struct atascsi *as = ap->ap_as; 980 struct ata_xfer *xa; 981 982 xa = as->as_methods->ata_get_xfer(as->as_cookie, ap->ap_port); 983 if (xa != NULL) 984 xa->fis->type = ATA_FIS_TYPE_H2D; 985 986 return (xa); 987 } 988 989 void 990 ata_put_xfer(struct ata_xfer *xa) 991 { 992 xa->ata_put_xfer(xa); 993 } 994