1 /* $OpenBSD: atascsi.c,v 1.150 2020/10/15 13:22:13 krw Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com> 6 * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/buf.h> 24 #include <sys/kernel.h> 25 #include <sys/malloc.h> 26 #include <sys/device.h> 27 #include <sys/queue.h> 28 #include <sys/pool.h> 29 30 #include <scsi/scsi_all.h> 31 #include <scsi/scsi_disk.h> 32 #include <scsi/scsiconf.h> 33 34 #include <dev/ata/atascsi.h> 35 #include <dev/ata/pmreg.h> 36 37 struct atascsi_port; 38 39 struct atascsi { 40 struct device *as_dev; 41 void *as_cookie; 42 43 struct atascsi_host_port **as_host_ports; 44 45 struct atascsi_methods *as_methods; 46 struct scsi_adapter as_switch; 47 struct scsibus_softc *as_scsibus; 48 49 int as_capability; 50 int as_ncqdepth; 51 }; 52 53 /* 54 * atascsi_host_port is a port attached to the host controller, and 55 * only holds the details relevant to the host controller. 56 * atascsi_port is any port, including ports on port multipliers, and 57 * it holds details of the device attached to the port. 58 * 59 * When there is a port multiplier attached to a port, the ahp_ports 60 * array in the atascsi_host_port struct contains one atascsi_port for 61 * each port, and one for the control port (port 15). The index into 62 * the array is the LUN used to address the port. For the control port, 63 * the LUN is 0, and for the port multiplier ports, the LUN is the 64 * port number plus one. 65 * 66 * When there is no port multiplier attached to a port, the ahp_ports 67 * array contains a single entry for the device. The LUN and port number 68 * for this entry are both 0. 69 */ 70 71 struct atascsi_host_port { 72 struct scsi_iopool ahp_iopool; 73 struct atascsi *ahp_as; 74 int ahp_port; 75 int ahp_nports; 76 77 struct atascsi_port **ahp_ports; 78 }; 79 80 struct atascsi_port { 81 struct ata_identify ap_identify; 82 struct atascsi_host_port *ap_host_port; 83 struct atascsi *ap_as; 84 int ap_pmp_port; 85 int ap_type; 86 int ap_ncqdepth; 87 int ap_features; 88 #define ATA_PORT_F_NCQ 0x1 89 #define ATA_PORT_F_TRIM 0x2 90 }; 91 92 void atascsi_cmd(struct scsi_xfer *); 93 int atascsi_probe(struct scsi_link *); 94 void atascsi_free(struct scsi_link *); 95 96 /* template */ 97 struct scsi_adapter atascsi_switch = { 98 atascsi_cmd, NULL, atascsi_probe, atascsi_free, NULL 99 }; 100 101 void ata_swapcopy(void *, void *, size_t); 102 103 void atascsi_disk_cmd(struct scsi_xfer *); 104 void atascsi_disk_cmd_done(struct ata_xfer *); 105 void atascsi_disk_inq(struct scsi_xfer *); 106 void atascsi_disk_inquiry(struct scsi_xfer *); 107 void atascsi_disk_vpd_supported(struct scsi_xfer *); 108 void atascsi_disk_vpd_serial(struct scsi_xfer *); 109 void atascsi_disk_vpd_ident(struct scsi_xfer *); 110 void atascsi_disk_vpd_ata(struct scsi_xfer *); 111 void atascsi_disk_vpd_limits(struct scsi_xfer *); 112 void atascsi_disk_vpd_info(struct scsi_xfer *); 113 void atascsi_disk_vpd_thin(struct scsi_xfer *); 114 void atascsi_disk_write_same_16(struct scsi_xfer *); 115 void atascsi_disk_write_same_16_done(struct ata_xfer *); 116 void atascsi_disk_unmap(struct scsi_xfer *); 117 void atascsi_disk_unmap_task(void *); 118 void atascsi_disk_unmap_done(struct ata_xfer *); 119 void atascsi_disk_capacity(struct scsi_xfer *); 120 void atascsi_disk_capacity16(struct scsi_xfer *); 121 void atascsi_disk_sync(struct scsi_xfer *); 122 void atascsi_disk_sync_done(struct ata_xfer *); 123 void atascsi_disk_sense(struct scsi_xfer *); 124 void atascsi_disk_start_stop(struct scsi_xfer *); 125 void atascsi_disk_start_stop_done(struct ata_xfer *); 126 127 void atascsi_atapi_cmd(struct scsi_xfer *); 128 void atascsi_atapi_cmd_done(struct ata_xfer *); 129 130 void atascsi_pmp_cmd(struct scsi_xfer *); 131 void atascsi_pmp_cmd_done(struct ata_xfer *); 132 void atascsi_pmp_sense(struct scsi_xfer *xs); 133 void atascsi_pmp_inq(struct scsi_xfer *xs); 134 135 136 void atascsi_passthru_12(struct scsi_xfer *); 137 void atascsi_passthru_16(struct scsi_xfer *); 138 int atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t); 139 void atascsi_passthru_done(struct ata_xfer *); 140 141 void atascsi_done(struct scsi_xfer *, int); 142 143 void ata_exec(struct atascsi *, struct ata_xfer *); 144 145 void ata_polled_complete(struct ata_xfer *); 146 int ata_polled(struct ata_xfer *); 147 148 u_int64_t ata_identify_blocks(struct ata_identify *); 149 u_int ata_identify_blocksize(struct ata_identify *); 150 u_int ata_identify_block_l2p_exp(struct ata_identify *); 151 u_int ata_identify_block_logical_align(struct ata_identify *); 152 153 void *atascsi_io_get(void *); 154 void atascsi_io_put(void *, void *); 155 struct atascsi_port * atascsi_lookup_port(struct scsi_link *); 156 157 int atascsi_port_identify(struct atascsi_port *, 158 struct ata_identify *); 159 int atascsi_port_set_features(struct atascsi_port *, int, int); 160 161 162 struct atascsi * 163 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa) 164 { 165 struct scsibus_attach_args saa; 166 struct atascsi *as; 167 168 as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO); 169 170 as->as_dev = self; 171 as->as_cookie = aaa->aaa_cookie; 172 as->as_methods = aaa->aaa_methods; 173 as->as_capability = aaa->aaa_capability; 174 as->as_ncqdepth = aaa->aaa_ncmds; 175 176 /* copy from template and modify for ourselves */ 177 as->as_switch = atascsi_switch; 178 if (aaa->aaa_minphys != NULL) 179 as->as_switch.dev_minphys = aaa->aaa_minphys; 180 181 as->as_host_ports = mallocarray(aaa->aaa_nports, 182 sizeof(struct atascsi_host_port *), M_DEVBUF, M_WAITOK | M_ZERO); 183 184 saa.saa_adapter = &as->as_switch; 185 saa.saa_adapter_softc = as; 186 saa.saa_adapter_buswidth = aaa->aaa_nports; 187 saa.saa_luns = SATA_PMP_MAX_PORTS; 188 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 189 saa.saa_openings = 1; 190 saa.saa_pool = NULL; 191 saa.saa_quirks = saa.saa_flags = 0; 192 saa.saa_wwpn = saa.saa_wwnn = 0; 193 194 as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa, 195 scsiprint); 196 197 return (as); 198 } 199 200 int 201 atascsi_detach(struct atascsi *as, int flags) 202 { 203 int rv; 204 205 rv = config_detach((struct device *)as->as_scsibus, flags); 206 if (rv != 0) 207 return (rv); 208 209 free(as->as_host_ports, M_DEVBUF, 0); 210 free(as, M_DEVBUF, sizeof(*as)); 211 212 return (0); 213 } 214 215 struct atascsi_port * 216 atascsi_lookup_port(struct scsi_link *link) 217 { 218 struct atascsi *as = link->bus->sb_adapter_softc; 219 struct atascsi_host_port *ahp; 220 221 if (link->target >= link->bus->sb_adapter_buswidth) 222 return (NULL); 223 224 ahp = as->as_host_ports[link->target]; 225 if (link->lun >= ahp->ahp_nports) 226 return (NULL); 227 228 return (ahp->ahp_ports[link->lun]); 229 } 230 231 int 232 atascsi_probe(struct scsi_link *link) 233 { 234 struct atascsi *as = link->bus->sb_adapter_softc; 235 struct atascsi_host_port *ahp; 236 struct atascsi_port *ap; 237 struct ata_xfer *xa; 238 struct ata_identify *identify; 239 int port, type, qdepth; 240 int rv; 241 u_int16_t cmdset; 242 u_int16_t validinfo, ultradma; 243 int i, xfermode = -1; 244 245 port = link->target; 246 if (port >= link->bus->sb_adapter_buswidth) 247 return (ENXIO); 248 249 /* if this is a PMP port, check it's valid */ 250 if (link->lun > 0) { 251 if (link->lun >= as->as_host_ports[port]->ahp_nports) 252 return (ENXIO); 253 } 254 255 type = as->as_methods->ata_probe(as->as_cookie, port, link->lun); 256 switch (type) { 257 case ATA_PORT_T_DISK: 258 break; 259 case ATA_PORT_T_ATAPI: 260 link->flags |= SDEV_ATAPI; 261 break; 262 case ATA_PORT_T_PM: 263 if (link->lun != 0) { 264 printf("%s.%d.%d: Port multipliers cannot be nested\n", 265 as->as_dev->dv_xname, port, link->lun); 266 rv = ENODEV; 267 goto unsupported; 268 } 269 break; 270 default: 271 rv = ENODEV; 272 goto unsupported; 273 } 274 275 ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 276 ap->ap_as = as; 277 278 if (link->lun == 0) { 279 ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO); 280 ahp->ahp_as = as; 281 ahp->ahp_port = port; 282 283 scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get, 284 atascsi_io_put); 285 286 as->as_host_ports[port] = ahp; 287 288 if (type == ATA_PORT_T_PM) { 289 ahp->ahp_nports = SATA_PMP_MAX_PORTS; 290 ap->ap_pmp_port = SATA_PMP_CONTROL_PORT; 291 } else { 292 ahp->ahp_nports = 1; 293 ap->ap_pmp_port = 0; 294 } 295 ahp->ahp_ports = mallocarray(ahp->ahp_nports, 296 sizeof(struct atascsi_port *), M_DEVBUF, M_WAITOK | M_ZERO); 297 } else { 298 ahp = as->as_host_ports[port]; 299 ap->ap_pmp_port = link->lun - 1; 300 } 301 302 ap->ap_host_port = ahp; 303 ap->ap_type = type; 304 305 link->pool = &ahp->ahp_iopool; 306 307 /* fetch the device info, except for port multipliers */ 308 if (type != ATA_PORT_T_PM) { 309 310 /* devices attached to port multipliers tend not to be 311 * spun up at this point, and sometimes this prevents 312 * identification from working, so we retry a few times 313 * with a fairly long delay. 314 */ 315 316 identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO); 317 318 int count = (link->lun > 0) ? 6 : 2; 319 while (count--) { 320 rv = atascsi_port_identify(ap, identify); 321 if (rv == 0) { 322 ap->ap_identify = *identify; 323 break; 324 } 325 if (count > 0) 326 delay(5000000); 327 } 328 329 dma_free(identify, sizeof(*identify)); 330 331 if (rv != 0) { 332 goto error; 333 } 334 } 335 336 ahp->ahp_ports[link->lun] = ap; 337 338 if (type != ATA_PORT_T_DISK) 339 return (0); 340 341 /* 342 * Early SATA drives (as well as PATA drives) need to have 343 * their transfer mode set properly, otherwise commands that 344 * use DMA will time out. 345 */ 346 validinfo = letoh16(ap->ap_identify.validinfo); 347 if (ISSET(validinfo, ATA_ID_VALIDINFO_ULTRADMA)) { 348 ultradma = letoh16(ap->ap_identify.ultradma); 349 for (i = 7; i >= 0; i--) { 350 if (ultradma & (1 << i)) { 351 xfermode = ATA_SF_XFERMODE_UDMA | i; 352 break; 353 } 354 } 355 } 356 if (xfermode != -1) 357 (void)atascsi_port_set_features(ap, ATA_SF_XFERMODE, xfermode); 358 359 if (as->as_capability & ASAA_CAP_NCQ && 360 ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) && 361 (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) { 362 ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth)); 363 qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth); 364 if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED)) 365 qdepth--; 366 367 if (qdepth > 1) { 368 SET(ap->ap_features, ATA_PORT_F_NCQ); 369 370 /* Raise the number of openings */ 371 link->openings = qdepth; 372 373 /* 374 * XXX for directly attached devices, throw away any xfers 375 * that have tag numbers higher than what the device supports. 376 */ 377 if (link->lun == 0) { 378 while (qdepth--) { 379 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 380 if (xa->tag < link->openings) { 381 xa->state = ATA_S_COMPLETE; 382 scsi_io_put(&ahp->ahp_iopool, xa); 383 } 384 } 385 } 386 } 387 } 388 389 if (ISSET(letoh16(ap->ap_identify.data_set_mgmt), 390 ATA_ID_DATA_SET_MGMT_TRIM)) 391 SET(ap->ap_features, ATA_PORT_F_TRIM); 392 393 cmdset = letoh16(ap->ap_identify.cmdset82); 394 395 /* Enable write cache if supported */ 396 if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) { 397 /* We don't care if it fails. */ 398 (void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0); 399 } 400 401 /* Enable read lookahead if supported */ 402 if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) { 403 /* We don't care if it fails. */ 404 (void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0); 405 } 406 407 /* 408 * FREEZE LOCK the device so malicous users can't lock it on us. 409 * As there is no harm in issuing this to devices that don't 410 * support the security feature set we just send it, and don't bother 411 * checking if the device sends a command abort to tell us it doesn't 412 * support it 413 */ 414 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 415 if (xa == NULL) 416 panic("no free xfers on a new port"); 417 xa->fis->command = ATA_C_SEC_FREEZE_LOCK; 418 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 419 xa->flags = ATA_F_POLL; 420 xa->timeout = 1000; 421 xa->complete = ata_polled_complete; 422 xa->pmp_port = ap->ap_pmp_port; 423 xa->atascsi_private = &ahp->ahp_iopool; 424 ata_exec(as, xa); 425 ata_polled(xa); /* we dont care if it doesnt work */ 426 427 return (0); 428 error: 429 free(ap, M_DEVBUF, sizeof(*ap)); 430 unsupported: 431 432 as->as_methods->ata_free(as->as_cookie, port, link->lun); 433 return (rv); 434 } 435 436 void 437 atascsi_free(struct scsi_link *link) 438 { 439 struct atascsi *as = link->bus->sb_adapter_softc; 440 struct atascsi_host_port *ahp; 441 struct atascsi_port *ap; 442 int port; 443 444 port = link->target; 445 if (port >= link->bus->sb_adapter_buswidth) 446 return; 447 448 ahp = as->as_host_ports[port]; 449 if (ahp == NULL) 450 return; 451 452 if (link->lun >= ahp->ahp_nports) 453 return; 454 455 ap = ahp->ahp_ports[link->lun]; 456 free(ap, M_DEVBUF, sizeof(*ap)); 457 ahp->ahp_ports[link->lun] = NULL; 458 459 as->as_methods->ata_free(as->as_cookie, port, link->lun); 460 461 if (link->lun == ahp->ahp_nports - 1) { 462 /* we've already freed all of ahp->ahp_ports, now 463 * free ahp itself. this relies on the order luns are 464 * detached in scsi_detach_target(). 465 */ 466 free(ahp, M_DEVBUF, sizeof(*ahp)); 467 as->as_host_ports[port] = NULL; 468 } 469 } 470 471 void 472 atascsi_cmd(struct scsi_xfer *xs) 473 { 474 struct scsi_link *link = xs->sc_link; 475 struct atascsi_port *ap; 476 477 ap = atascsi_lookup_port(link); 478 if (ap == NULL) { 479 atascsi_done(xs, XS_DRIVER_STUFFUP); 480 return; 481 } 482 483 switch (ap->ap_type) { 484 case ATA_PORT_T_DISK: 485 atascsi_disk_cmd(xs); 486 break; 487 case ATA_PORT_T_ATAPI: 488 atascsi_atapi_cmd(xs); 489 break; 490 case ATA_PORT_T_PM: 491 atascsi_pmp_cmd(xs); 492 break; 493 494 case ATA_PORT_T_NONE: 495 default: 496 atascsi_done(xs, XS_DRIVER_STUFFUP); 497 break; 498 } 499 } 500 501 void 502 atascsi_disk_cmd(struct scsi_xfer *xs) 503 { 504 struct scsi_link *link = xs->sc_link; 505 struct atascsi *as = link->bus->sb_adapter_softc; 506 struct atascsi_port *ap; 507 struct ata_xfer *xa = xs->io; 508 int flags = 0; 509 struct ata_fis_h2d *fis; 510 u_int64_t lba; 511 u_int32_t sector_count; 512 513 ap = atascsi_lookup_port(link); 514 515 switch (xs->cmd.opcode) { 516 case READ_COMMAND: 517 case READ_10: 518 case READ_12: 519 case READ_16: 520 flags = ATA_F_READ; 521 break; 522 case WRITE_COMMAND: 523 case WRITE_10: 524 case WRITE_12: 525 case WRITE_16: 526 flags = ATA_F_WRITE; 527 /* deal with io outside the switch */ 528 break; 529 530 case WRITE_SAME_16: 531 atascsi_disk_write_same_16(xs); 532 return; 533 case UNMAP: 534 atascsi_disk_unmap(xs); 535 return; 536 537 case SYNCHRONIZE_CACHE: 538 atascsi_disk_sync(xs); 539 return; 540 case REQUEST_SENSE: 541 atascsi_disk_sense(xs); 542 return; 543 case INQUIRY: 544 atascsi_disk_inq(xs); 545 return; 546 case READ_CAPACITY: 547 atascsi_disk_capacity(xs); 548 return; 549 case READ_CAPACITY_16: 550 atascsi_disk_capacity16(xs); 551 return; 552 553 case ATA_PASSTHRU_12: 554 atascsi_passthru_12(xs); 555 return; 556 case ATA_PASSTHRU_16: 557 atascsi_passthru_16(xs); 558 return; 559 560 case START_STOP: 561 atascsi_disk_start_stop(xs); 562 return; 563 564 case TEST_UNIT_READY: 565 case PREVENT_ALLOW: 566 atascsi_done(xs, XS_NOERROR); 567 return; 568 569 default: 570 atascsi_done(xs, XS_DRIVER_STUFFUP); 571 return; 572 } 573 574 xa->flags = flags; 575 scsi_cmd_rw_decode(&xs->cmd, &lba, §or_count); 576 if ((lba >> 48) != 0 || (sector_count >> 16) != 0) { 577 atascsi_done(xs, XS_DRIVER_STUFFUP); 578 return; 579 } 580 581 fis = xa->fis; 582 583 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 584 fis->lba_low = lba & 0xff; 585 fis->lba_mid = (lba >> 8) & 0xff; 586 fis->lba_high = (lba >> 16) & 0xff; 587 588 if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) && 589 (xa->tag < ap->ap_ncqdepth) && 590 !(xs->flags & SCSI_POLL)) { 591 /* Use NCQ */ 592 xa->flags |= ATA_F_NCQ; 593 fis->command = (xa->flags & ATA_F_WRITE) ? 594 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; 595 fis->device = ATA_H2D_DEVICE_LBA; 596 fis->lba_low_exp = (lba >> 24) & 0xff; 597 fis->lba_mid_exp = (lba >> 32) & 0xff; 598 fis->lba_high_exp = (lba >> 40) & 0xff; 599 fis->sector_count = xa->tag << 3; 600 fis->features = sector_count & 0xff; 601 fis->features_exp = (sector_count >> 8) & 0xff; 602 } else if (sector_count > 0x100 || lba > 0xfffffff) { 603 /* Use LBA48 */ 604 fis->command = (xa->flags & ATA_F_WRITE) ? 605 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT; 606 fis->device = ATA_H2D_DEVICE_LBA; 607 fis->lba_low_exp = (lba >> 24) & 0xff; 608 fis->lba_mid_exp = (lba >> 32) & 0xff; 609 fis->lba_high_exp = (lba >> 40) & 0xff; 610 fis->sector_count = sector_count & 0xff; 611 fis->sector_count_exp = (sector_count >> 8) & 0xff; 612 } else { 613 /* Use LBA */ 614 fis->command = (xa->flags & ATA_F_WRITE) ? 615 ATA_C_WRITEDMA : ATA_C_READDMA; 616 fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f); 617 fis->sector_count = sector_count & 0xff; 618 } 619 620 xa->data = xs->data; 621 xa->datalen = xs->datalen; 622 xa->complete = atascsi_disk_cmd_done; 623 xa->timeout = xs->timeout; 624 xa->pmp_port = ap->ap_pmp_port; 625 xa->atascsi_private = xs; 626 if (xs->flags & SCSI_POLL) 627 xa->flags |= ATA_F_POLL; 628 629 ata_exec(as, xa); 630 } 631 632 void 633 atascsi_disk_cmd_done(struct ata_xfer *xa) 634 { 635 struct scsi_xfer *xs = xa->atascsi_private; 636 637 switch (xa->state) { 638 case ATA_S_COMPLETE: 639 xs->error = XS_NOERROR; 640 break; 641 case ATA_S_ERROR: 642 /* fake sense? */ 643 xs->error = XS_DRIVER_STUFFUP; 644 break; 645 case ATA_S_TIMEOUT: 646 xs->error = XS_TIMEOUT; 647 break; 648 default: 649 panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)", 650 xa->state); 651 } 652 653 xs->resid = xa->resid; 654 655 scsi_done(xs); 656 } 657 658 void 659 atascsi_disk_inq(struct scsi_xfer *xs) 660 { 661 struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd; 662 663 if (xs->cmdlen != sizeof(*inq)) { 664 atascsi_done(xs, XS_DRIVER_STUFFUP); 665 return; 666 } 667 668 if (ISSET(inq->flags, SI_EVPD)) { 669 switch (inq->pagecode) { 670 case SI_PG_SUPPORTED: 671 atascsi_disk_vpd_supported(xs); 672 break; 673 case SI_PG_SERIAL: 674 atascsi_disk_vpd_serial(xs); 675 break; 676 case SI_PG_DEVID: 677 atascsi_disk_vpd_ident(xs); 678 break; 679 case SI_PG_ATA: 680 atascsi_disk_vpd_ata(xs); 681 break; 682 case SI_PG_DISK_LIMITS: 683 atascsi_disk_vpd_limits(xs); 684 break; 685 case SI_PG_DISK_INFO: 686 atascsi_disk_vpd_info(xs); 687 break; 688 case SI_PG_DISK_THIN: 689 atascsi_disk_vpd_thin(xs); 690 break; 691 default: 692 atascsi_done(xs, XS_DRIVER_STUFFUP); 693 break; 694 } 695 } else 696 atascsi_disk_inquiry(xs); 697 } 698 699 void 700 atascsi_disk_inquiry(struct scsi_xfer *xs) 701 { 702 struct scsi_inquiry_data inq; 703 struct scsi_link *link = xs->sc_link; 704 struct atascsi_port *ap; 705 706 ap = atascsi_lookup_port(link); 707 708 bzero(&inq, sizeof(inq)); 709 710 inq.device = T_DIRECT; 711 inq.version = SCSI_REV_SPC3; 712 inq.response_format = SID_SCSI2_RESPONSE; 713 inq.additional_length = SID_SCSI2_ALEN; 714 inq.flags |= SID_CmdQue; 715 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 716 ata_swapcopy(ap->ap_identify.model, inq.product, 717 sizeof(inq.product)); 718 ata_swapcopy(ap->ap_identify.firmware, inq.revision, 719 sizeof(inq.revision)); 720 721 scsi_copy_internal_data(xs, &inq, sizeof(inq)); 722 723 atascsi_done(xs, XS_NOERROR); 724 } 725 726 void 727 atascsi_disk_vpd_supported(struct scsi_xfer *xs) 728 { 729 struct { 730 struct scsi_vpd_hdr hdr; 731 u_int8_t list[7]; 732 } pg; 733 struct scsi_link *link = xs->sc_link; 734 struct atascsi_port *ap; 735 int fat; 736 737 ap = atascsi_lookup_port(link); 738 fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1; 739 740 bzero(&pg, sizeof(pg)); 741 742 pg.hdr.device = T_DIRECT; 743 pg.hdr.page_code = SI_PG_SUPPORTED; 744 _lto2b(sizeof(pg.list) - fat, pg.hdr.page_length); 745 pg.list[0] = SI_PG_SUPPORTED; 746 pg.list[1] = SI_PG_SERIAL; 747 pg.list[2] = SI_PG_DEVID; 748 pg.list[3] = SI_PG_ATA; 749 pg.list[4] = SI_PG_DISK_LIMITS; 750 pg.list[5] = SI_PG_DISK_INFO; 751 pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */ 752 753 bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen)); 754 755 atascsi_done(xs, XS_NOERROR); 756 } 757 758 void 759 atascsi_disk_vpd_serial(struct scsi_xfer *xs) 760 { 761 struct scsi_link *link = xs->sc_link; 762 struct atascsi_port *ap; 763 struct scsi_vpd_serial pg; 764 765 ap = atascsi_lookup_port(link); 766 bzero(&pg, sizeof(pg)); 767 768 pg.hdr.device = T_DIRECT; 769 pg.hdr.page_code = SI_PG_SERIAL; 770 _lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length); 771 ata_swapcopy(ap->ap_identify.serial, pg.serial, 772 sizeof(ap->ap_identify.serial)); 773 774 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 775 776 atascsi_done(xs, XS_NOERROR); 777 } 778 779 void 780 atascsi_disk_vpd_ident(struct scsi_xfer *xs) 781 { 782 struct scsi_link *link = xs->sc_link; 783 struct atascsi_port *ap; 784 struct { 785 struct scsi_vpd_hdr hdr; 786 struct scsi_vpd_devid_hdr devid_hdr; 787 u_int8_t devid[68]; 788 } pg; 789 u_int8_t *p; 790 size_t pg_len; 791 792 ap = atascsi_lookup_port(link); 793 bzero(&pg, sizeof(pg)); 794 if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) { 795 pg_len = 8; 796 797 pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY; 798 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA; 799 800 ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len); 801 } else { 802 pg_len = 68; 803 804 pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII; 805 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10; 806 807 p = pg.devid; 808 bcopy("ATA ", p, 8); 809 p += 8; 810 ata_swapcopy(ap->ap_identify.model, p, 811 sizeof(ap->ap_identify.model)); 812 p += sizeof(ap->ap_identify.model); 813 ata_swapcopy(ap->ap_identify.serial, p, 814 sizeof(ap->ap_identify.serial)); 815 } 816 817 pg.devid_hdr.len = pg_len; 818 pg_len += sizeof(pg.devid_hdr); 819 820 pg.hdr.device = T_DIRECT; 821 pg.hdr.page_code = SI_PG_DEVID; 822 _lto2b(pg_len, pg.hdr.page_length); 823 pg_len += sizeof(pg.hdr); 824 825 bcopy(&pg, xs->data, MIN(pg_len, xs->datalen)); 826 827 atascsi_done(xs, XS_NOERROR); 828 } 829 830 void 831 atascsi_disk_vpd_ata(struct scsi_xfer *xs) 832 { 833 struct scsi_link *link = xs->sc_link; 834 struct atascsi_port *ap; 835 struct scsi_vpd_ata pg; 836 837 ap = atascsi_lookup_port(link); 838 bzero(&pg, sizeof(pg)); 839 840 pg.hdr.device = T_DIRECT; 841 pg.hdr.page_code = SI_PG_ATA; 842 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 843 844 memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor)); 845 memcpy(pg.sat_vendor, "OpenBSD", 846 MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor))); 847 memset(pg.sat_product, ' ', sizeof(pg.sat_product)); 848 memcpy(pg.sat_product, "atascsi", 849 MIN(strlen("atascsi"), sizeof(pg.sat_product))); 850 memset(pg.sat_revision, ' ', sizeof(pg.sat_revision)); 851 memcpy(pg.sat_revision, osrelease, 852 MIN(strlen(osrelease), sizeof(pg.sat_revision))); 853 854 /* XXX device signature */ 855 856 switch (ap->ap_type) { 857 case ATA_PORT_T_DISK: 858 pg.command_code = VPD_ATA_COMMAND_CODE_ATA; 859 break; 860 case ATA_PORT_T_ATAPI: 861 pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI; 862 break; 863 } 864 865 memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify)); 866 867 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 868 869 atascsi_done(xs, XS_NOERROR); 870 } 871 872 void 873 atascsi_disk_vpd_limits(struct scsi_xfer *xs) 874 { 875 struct scsi_link *link = xs->sc_link; 876 struct atascsi_port *ap; 877 struct scsi_vpd_disk_limits pg; 878 879 ap = atascsi_lookup_port(link); 880 bzero(&pg, sizeof(pg)); 881 pg.hdr.device = T_DIRECT; 882 pg.hdr.page_code = SI_PG_DISK_LIMITS; 883 _lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length); 884 885 _lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify), 886 pg.optimal_xfer_granularity); 887 888 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 889 /* 890 * ATA only supports 65535 blocks per TRIM descriptor, so 891 * avoid having to split UNMAP descriptors and overflow the page 892 * limit by using that as a max. 893 */ 894 _lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count); 895 _lto4b(512 / 8, pg.max_unmap_desc_count); 896 } 897 898 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 899 900 atascsi_done(xs, XS_NOERROR); 901 } 902 903 void 904 atascsi_disk_vpd_info(struct scsi_xfer *xs) 905 { 906 struct scsi_link *link = xs->sc_link; 907 struct atascsi_port *ap; 908 struct scsi_vpd_disk_info pg; 909 910 ap = atascsi_lookup_port(link); 911 bzero(&pg, sizeof(pg)); 912 pg.hdr.device = T_DIRECT; 913 pg.hdr.page_code = SI_PG_DISK_INFO; 914 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 915 916 _lto2b(letoh16(ap->ap_identify.rpm), pg.rpm); 917 pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK; 918 919 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 920 921 atascsi_done(xs, XS_NOERROR); 922 } 923 924 void 925 atascsi_disk_vpd_thin(struct scsi_xfer *xs) 926 { 927 struct scsi_link *link = xs->sc_link; 928 struct atascsi_port *ap; 929 struct scsi_vpd_disk_thin pg; 930 931 ap = atascsi_lookup_port(link); 932 if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 933 atascsi_done(xs, XS_DRIVER_STUFFUP); 934 return; 935 } 936 937 bzero(&pg, sizeof(pg)); 938 pg.hdr.device = T_DIRECT; 939 pg.hdr.page_code = SI_PG_DISK_THIN; 940 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 941 942 pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS; 943 944 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 945 946 atascsi_done(xs, XS_NOERROR); 947 } 948 949 void 950 atascsi_disk_write_same_16(struct scsi_xfer *xs) 951 { 952 struct scsi_link *link = xs->sc_link; 953 struct atascsi *as = link->bus->sb_adapter_softc; 954 struct atascsi_port *ap; 955 struct scsi_write_same_16 *cdb; 956 struct ata_xfer *xa = xs->io; 957 struct ata_fis_h2d *fis; 958 u_int64_t lba; 959 u_int32_t length; 960 u_int64_t desc; 961 962 if (xs->cmdlen != sizeof(*cdb)) { 963 atascsi_done(xs, XS_DRIVER_STUFFUP); 964 return; 965 } 966 967 ap = atascsi_lookup_port(link); 968 cdb = (struct scsi_write_same_16 *)&xs->cmd; 969 970 if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) || 971 !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 972 /* generate sense data */ 973 atascsi_done(xs, XS_DRIVER_STUFFUP); 974 return; 975 } 976 977 if (xs->datalen < 512) { 978 /* generate sense data */ 979 atascsi_done(xs, XS_DRIVER_STUFFUP); 980 return; 981 } 982 983 lba = _8btol(cdb->lba); 984 length = _4btol(cdb->length); 985 986 if (length > ATA_DSM_TRIM_MAX_LEN) { 987 /* XXX we dont support requests over 65535 blocks */ 988 atascsi_done(xs, XS_DRIVER_STUFFUP); 989 return; 990 } 991 992 xa->data = xs->data; 993 xa->datalen = 512; 994 xa->flags = ATA_F_WRITE; 995 xa->pmp_port = ap->ap_pmp_port; 996 if (xs->flags & SCSI_POLL) 997 xa->flags |= ATA_F_POLL; 998 xa->complete = atascsi_disk_write_same_16_done; 999 xa->atascsi_private = xs; 1000 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1001 1002 /* TRIM sends a list of blocks to discard in the databuf. */ 1003 memset(xa->data, 0, xa->datalen); 1004 desc = htole64(ATA_DSM_TRIM_DESC(lba, length)); 1005 memcpy(xa->data, &desc, sizeof(desc)); 1006 1007 fis = xa->fis; 1008 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1009 fis->command = ATA_C_DSM; 1010 fis->features = ATA_DSM_TRIM; 1011 fis->sector_count = 1; 1012 1013 ata_exec(as, xa); 1014 } 1015 1016 void 1017 atascsi_disk_write_same_16_done(struct ata_xfer *xa) 1018 { 1019 struct scsi_xfer *xs = xa->atascsi_private; 1020 1021 switch (xa->state) { 1022 case ATA_S_COMPLETE: 1023 xs->error = XS_NOERROR; 1024 break; 1025 case ATA_S_ERROR: 1026 xs->error = XS_DRIVER_STUFFUP; 1027 break; 1028 case ATA_S_TIMEOUT: 1029 xs->error = XS_TIMEOUT; 1030 break; 1031 1032 default: 1033 panic("atascsi_disk_write_same_16_done: " 1034 "unexpected ata_xfer state (%d)", xa->state); 1035 } 1036 1037 scsi_done(xs); 1038 } 1039 1040 void 1041 atascsi_disk_unmap(struct scsi_xfer *xs) 1042 { 1043 struct ata_xfer *xa = xs->io; 1044 struct scsi_unmap *cdb; 1045 struct scsi_unmap_data *unmap; 1046 u_int len; 1047 1048 if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb)) 1049 atascsi_done(xs, XS_DRIVER_STUFFUP); 1050 1051 cdb = (struct scsi_unmap *)&xs->cmd; 1052 len = _2btol(cdb->list_len); 1053 if (xs->datalen != len || len < sizeof(*unmap)) { 1054 atascsi_done(xs, XS_DRIVER_STUFFUP); 1055 return; 1056 } 1057 1058 unmap = (struct scsi_unmap_data *)xs->data; 1059 if (_2btol(unmap->data_length) != len) { 1060 atascsi_done(xs, XS_DRIVER_STUFFUP); 1061 return; 1062 } 1063 1064 len = _2btol(unmap->desc_length); 1065 if (len != xs->datalen - sizeof(*unmap)) { 1066 atascsi_done(xs, XS_DRIVER_STUFFUP); 1067 return; 1068 } 1069 1070 if (len < sizeof(struct scsi_unmap_desc)) { 1071 /* no work, no error according to sbc3 */ 1072 atascsi_done(xs, XS_NOERROR); 1073 } 1074 1075 if (len > sizeof(struct scsi_unmap_desc) * 64) { 1076 /* more work than we advertised */ 1077 atascsi_done(xs, XS_DRIVER_STUFFUP); 1078 return; 1079 } 1080 1081 /* let's go */ 1082 if (ISSET(xs->flags, SCSI_NOSLEEP)) { 1083 task_set(&xa->task, atascsi_disk_unmap_task, xs); 1084 task_add(systq, &xa->task); 1085 } else { 1086 /* we can already sleep for memory */ 1087 atascsi_disk_unmap_task(xs); 1088 } 1089 } 1090 1091 void 1092 atascsi_disk_unmap_task(void *xxs) 1093 { 1094 struct scsi_xfer *xs = xxs; 1095 struct scsi_link *link = xs->sc_link; 1096 struct atascsi *as = link->bus->sb_adapter_softc; 1097 struct atascsi_port *ap; 1098 struct ata_xfer *xa = xs->io; 1099 struct ata_fis_h2d *fis; 1100 struct scsi_unmap_data *unmap; 1101 struct scsi_unmap_desc *descs, *d; 1102 u_int64_t *trims; 1103 u_int len, i; 1104 1105 trims = dma_alloc(512, PR_WAITOK | PR_ZERO); 1106 1107 ap = atascsi_lookup_port(link); 1108 unmap = (struct scsi_unmap_data *)xs->data; 1109 descs = (struct scsi_unmap_desc *)(unmap + 1); 1110 1111 len = _2btol(unmap->desc_length) / sizeof(*d); 1112 for (i = 0; i < len; i++) { 1113 d = &descs[i]; 1114 if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN) 1115 goto fail; 1116 1117 trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr), 1118 _4btol(d->logical_blocks))); 1119 } 1120 1121 xa->data = trims; 1122 xa->datalen = 512; 1123 xa->flags = ATA_F_WRITE; 1124 xa->pmp_port = ap->ap_pmp_port; 1125 xa->complete = atascsi_disk_unmap_done; 1126 xa->atascsi_private = xs; 1127 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1128 1129 fis = xa->fis; 1130 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1131 fis->command = ATA_C_DSM; 1132 fis->features = ATA_DSM_TRIM; 1133 fis->sector_count = 1; 1134 1135 ata_exec(as, xa); 1136 return; 1137 1138 fail: 1139 dma_free(xa->data, 512); 1140 atascsi_done(xs, XS_DRIVER_STUFFUP); 1141 } 1142 1143 void 1144 atascsi_disk_unmap_done(struct ata_xfer *xa) 1145 { 1146 struct scsi_xfer *xs = xa->atascsi_private; 1147 1148 dma_free(xa->data, 512); 1149 1150 switch (xa->state) { 1151 case ATA_S_COMPLETE: 1152 xs->error = XS_NOERROR; 1153 break; 1154 case ATA_S_ERROR: 1155 xs->error = XS_DRIVER_STUFFUP; 1156 break; 1157 case ATA_S_TIMEOUT: 1158 xs->error = XS_TIMEOUT; 1159 break; 1160 1161 default: 1162 panic("atascsi_disk_unmap_done: " 1163 "unexpected ata_xfer state (%d)", xa->state); 1164 } 1165 1166 scsi_done(xs); 1167 } 1168 1169 void 1170 atascsi_disk_sync(struct scsi_xfer *xs) 1171 { 1172 struct scsi_link *link = xs->sc_link; 1173 struct atascsi *as = link->bus->sb_adapter_softc; 1174 struct atascsi_port *ap; 1175 struct ata_xfer *xa = xs->io; 1176 1177 if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) { 1178 atascsi_done(xs, XS_DRIVER_STUFFUP); 1179 return; 1180 } 1181 1182 ap = atascsi_lookup_port(link); 1183 xa->datalen = 0; 1184 xa->flags = ATA_F_READ; 1185 xa->complete = atascsi_disk_sync_done; 1186 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1187 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1188 xa->atascsi_private = xs; 1189 xa->pmp_port = ap->ap_pmp_port; 1190 if (xs->flags & SCSI_POLL) 1191 xa->flags |= ATA_F_POLL; 1192 1193 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1194 xa->fis->command = ATA_C_FLUSH_CACHE; 1195 xa->fis->device = 0; 1196 1197 ata_exec(as, xa); 1198 } 1199 1200 void 1201 atascsi_disk_sync_done(struct ata_xfer *xa) 1202 { 1203 struct scsi_xfer *xs = xa->atascsi_private; 1204 1205 switch (xa->state) { 1206 case ATA_S_COMPLETE: 1207 xs->error = XS_NOERROR; 1208 break; 1209 1210 case ATA_S_ERROR: 1211 case ATA_S_TIMEOUT: 1212 printf("atascsi_disk_sync_done: %s\n", 1213 xa->state == ATA_S_TIMEOUT ? "timeout" : "error"); 1214 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1215 XS_DRIVER_STUFFUP); 1216 break; 1217 1218 default: 1219 panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)", 1220 xa->state); 1221 } 1222 1223 scsi_done(xs); 1224 } 1225 1226 u_int64_t 1227 ata_identify_blocks(struct ata_identify *id) 1228 { 1229 u_int64_t blocks = 0; 1230 int i; 1231 1232 if (letoh16(id->cmdset83) & 0x0400) { 1233 /* LBA48 feature set supported */ 1234 for (i = 3; i >= 0; --i) { 1235 blocks <<= 16; 1236 blocks += letoh16(id->addrsecxt[i]); 1237 } 1238 } else { 1239 blocks = letoh16(id->addrsec[1]); 1240 blocks <<= 16; 1241 blocks += letoh16(id->addrsec[0]); 1242 } 1243 1244 return (blocks - 1); 1245 } 1246 1247 u_int 1248 ata_identify_blocksize(struct ata_identify *id) 1249 { 1250 u_int blocksize = 512; 1251 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1252 1253 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1254 ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) { 1255 blocksize = letoh16(id->words_lsec[1]); 1256 blocksize <<= 16; 1257 blocksize += letoh16(id->words_lsec[0]); 1258 blocksize <<= 1; 1259 } 1260 1261 return (blocksize); 1262 } 1263 1264 u_int 1265 ata_identify_block_l2p_exp(struct ata_identify *id) 1266 { 1267 u_int exponent = 0; 1268 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1269 1270 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1271 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) { 1272 exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE); 1273 } 1274 1275 return (exponent); 1276 } 1277 1278 u_int 1279 ata_identify_block_logical_align(struct ata_identify *id) 1280 { 1281 u_int align = 0; 1282 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1283 u_int16_t logical_align = letoh16(id->logical_align); 1284 1285 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1286 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) && 1287 (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID) 1288 align = logical_align & ATA_ID_LALIGN; 1289 1290 return (align); 1291 } 1292 1293 void 1294 atascsi_disk_capacity(struct scsi_xfer *xs) 1295 { 1296 struct scsi_link *link = xs->sc_link; 1297 struct atascsi_port *ap; 1298 struct scsi_read_cap_data rcd; 1299 u_int64_t capacity; 1300 1301 ap = atascsi_lookup_port(link); 1302 if (xs->cmdlen != sizeof(struct scsi_read_capacity)) { 1303 atascsi_done(xs, XS_DRIVER_STUFFUP); 1304 return; 1305 } 1306 1307 bzero(&rcd, sizeof(rcd)); 1308 capacity = ata_identify_blocks(&ap->ap_identify); 1309 if (capacity > 0xffffffff) 1310 capacity = 0xffffffff; 1311 1312 _lto4b(capacity, rcd.addr); 1313 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1314 1315 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1316 1317 atascsi_done(xs, XS_NOERROR); 1318 } 1319 1320 void 1321 atascsi_disk_capacity16(struct scsi_xfer *xs) 1322 { 1323 struct scsi_link *link = xs->sc_link; 1324 struct atascsi_port *ap; 1325 struct scsi_read_cap_data_16 rcd; 1326 u_int align; 1327 u_int16_t lowest_aligned = 0; 1328 1329 ap = atascsi_lookup_port(link); 1330 if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) { 1331 atascsi_done(xs, XS_DRIVER_STUFFUP); 1332 return; 1333 } 1334 1335 bzero(&rcd, sizeof(rcd)); 1336 1337 _lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr); 1338 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1339 rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify); 1340 align = ata_identify_block_logical_align(&ap->ap_identify); 1341 if (align > 0) 1342 lowest_aligned = (1 << rcd.logical_per_phys) - align; 1343 1344 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 1345 SET(lowest_aligned, READ_CAP_16_TPE); 1346 1347 if (ISSET(letoh16(ap->ap_identify.add_support), 1348 ATA_ID_ADD_SUPPORT_DRT)) 1349 SET(lowest_aligned, READ_CAP_16_TPRZ); 1350 } 1351 _lto2b(lowest_aligned, rcd.lowest_aligned); 1352 1353 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1354 1355 atascsi_done(xs, XS_NOERROR); 1356 } 1357 1358 int 1359 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags) 1360 { 1361 struct ata_xfer *xa = xs->io; 1362 1363 xa->data = xs->data; 1364 xa->datalen = xs->datalen; 1365 xa->timeout = xs->timeout; 1366 xa->flags = 0; 1367 if (xs->flags & SCSI_DATA_IN) 1368 xa->flags |= ATA_F_READ; 1369 if (xs->flags & SCSI_DATA_OUT) 1370 xa->flags |= ATA_F_WRITE; 1371 if (xs->flags & SCSI_POLL) 1372 xa->flags |= ATA_F_POLL; 1373 1374 switch (count_proto & ATA_PASSTHRU_PROTO_MASK) { 1375 case ATA_PASSTHRU_PROTO_NON_DATA: 1376 case ATA_PASSTHRU_PROTO_PIO_DATAIN: 1377 case ATA_PASSTHRU_PROTO_PIO_DATAOUT: 1378 xa->flags |= ATA_F_PIO; 1379 break; 1380 default: 1381 /* we dont support this yet */ 1382 return (1); 1383 } 1384 1385 xa->atascsi_private = xs; 1386 xa->complete = atascsi_passthru_done; 1387 1388 return (0); 1389 } 1390 1391 void 1392 atascsi_passthru_12(struct scsi_xfer *xs) 1393 { 1394 struct scsi_link *link = xs->sc_link; 1395 struct atascsi *as = link->bus->sb_adapter_softc; 1396 struct atascsi_port *ap; 1397 struct ata_xfer *xa = xs->io; 1398 struct scsi_ata_passthru_12 *cdb; 1399 struct ata_fis_h2d *fis; 1400 1401 if (xs->cmdlen != sizeof(*cdb)) { 1402 atascsi_done(xs, XS_DRIVER_STUFFUP); 1403 return; 1404 } 1405 1406 cdb = (struct scsi_ata_passthru_12 *)&xs->cmd; 1407 /* validate cdb */ 1408 1409 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1410 atascsi_done(xs, XS_DRIVER_STUFFUP); 1411 return; 1412 } 1413 1414 ap = atascsi_lookup_port(link); 1415 fis = xa->fis; 1416 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1417 fis->command = cdb->command; 1418 fis->features = cdb->features; 1419 fis->lba_low = cdb->lba_low; 1420 fis->lba_mid = cdb->lba_mid; 1421 fis->lba_high = cdb->lba_high; 1422 fis->device = cdb->device; 1423 fis->sector_count = cdb->sector_count; 1424 xa->pmp_port = ap->ap_pmp_port; 1425 1426 ata_exec(as, xa); 1427 } 1428 1429 void 1430 atascsi_passthru_16(struct scsi_xfer *xs) 1431 { 1432 struct scsi_link *link = xs->sc_link; 1433 struct atascsi *as = link->bus->sb_adapter_softc; 1434 struct atascsi_port *ap; 1435 struct ata_xfer *xa = xs->io; 1436 struct scsi_ata_passthru_16 *cdb; 1437 struct ata_fis_h2d *fis; 1438 1439 if (xs->cmdlen != sizeof(*cdb)) { 1440 atascsi_done(xs, XS_DRIVER_STUFFUP); 1441 return; 1442 } 1443 1444 cdb = (struct scsi_ata_passthru_16 *)&xs->cmd; 1445 /* validate cdb */ 1446 1447 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1448 atascsi_done(xs, XS_DRIVER_STUFFUP); 1449 return; 1450 } 1451 1452 ap = atascsi_lookup_port(link); 1453 fis = xa->fis; 1454 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1455 fis->command = cdb->command; 1456 fis->features = cdb->features[1]; 1457 fis->lba_low = cdb->lba_low[1]; 1458 fis->lba_mid = cdb->lba_mid[1]; 1459 fis->lba_high = cdb->lba_high[1]; 1460 fis->device = cdb->device; 1461 fis->lba_low_exp = cdb->lba_low[0]; 1462 fis->lba_mid_exp = cdb->lba_mid[0]; 1463 fis->lba_high_exp = cdb->lba_high[0]; 1464 fis->features_exp = cdb->features[0]; 1465 fis->sector_count = cdb->sector_count[1]; 1466 fis->sector_count_exp = cdb->sector_count[0]; 1467 xa->pmp_port = ap->ap_pmp_port; 1468 1469 ata_exec(as, xa); 1470 } 1471 1472 void 1473 atascsi_passthru_done(struct ata_xfer *xa) 1474 { 1475 struct scsi_xfer *xs = xa->atascsi_private; 1476 1477 /* 1478 * XXX need to generate sense if cdb wants it 1479 */ 1480 1481 switch (xa->state) { 1482 case ATA_S_COMPLETE: 1483 xs->error = XS_NOERROR; 1484 break; 1485 case ATA_S_ERROR: 1486 xs->error = XS_DRIVER_STUFFUP; 1487 break; 1488 case ATA_S_TIMEOUT: 1489 printf("atascsi_passthru_done, timeout\n"); 1490 xs->error = XS_TIMEOUT; 1491 break; 1492 default: 1493 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1494 xa->state); 1495 } 1496 1497 xs->resid = xa->resid; 1498 1499 scsi_done(xs); 1500 } 1501 1502 void 1503 atascsi_disk_sense(struct scsi_xfer *xs) 1504 { 1505 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1506 1507 bzero(xs->data, xs->datalen); 1508 /* check datalen > sizeof(struct scsi_sense_data)? */ 1509 sd->error_code = SSD_ERRCODE_CURRENT; 1510 sd->flags = SKEY_NO_SENSE; 1511 1512 atascsi_done(xs, XS_NOERROR); 1513 } 1514 1515 void 1516 atascsi_disk_start_stop(struct scsi_xfer *xs) 1517 { 1518 struct scsi_link *link = xs->sc_link; 1519 struct atascsi *as = link->bus->sb_adapter_softc; 1520 struct atascsi_port *ap; 1521 struct ata_xfer *xa = xs->io; 1522 struct scsi_start_stop *ss = (struct scsi_start_stop *)&xs->cmd; 1523 1524 if (xs->cmdlen != sizeof(*ss)) { 1525 atascsi_done(xs, XS_DRIVER_STUFFUP); 1526 return; 1527 } 1528 1529 if (ss->how != SSS_STOP) { 1530 atascsi_done(xs, XS_NOERROR); 1531 return; 1532 } 1533 1534 /* 1535 * A SCSI START STOP UNIT command with the START bit set to 1536 * zero gets translated into an ATA FLUSH CACHE command 1537 * followed by an ATA STANDBY IMMEDIATE command. 1538 */ 1539 ap = atascsi_lookup_port(link); 1540 xa->datalen = 0; 1541 xa->flags = ATA_F_READ; 1542 xa->complete = atascsi_disk_start_stop_done; 1543 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1544 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1545 xa->pmp_port = ap->ap_pmp_port; 1546 xa->atascsi_private = xs; 1547 if (xs->flags & SCSI_POLL) 1548 xa->flags |= ATA_F_POLL; 1549 1550 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1551 xa->fis->command = ATA_C_FLUSH_CACHE; 1552 xa->fis->device = 0; 1553 1554 ata_exec(as, xa); 1555 } 1556 1557 void 1558 atascsi_disk_start_stop_done(struct ata_xfer *xa) 1559 { 1560 struct scsi_xfer *xs = xa->atascsi_private; 1561 struct scsi_link *link = xs->sc_link; 1562 struct atascsi *as = link->bus->sb_adapter_softc; 1563 struct atascsi_port *ap; 1564 1565 switch (xa->state) { 1566 case ATA_S_COMPLETE: 1567 break; 1568 1569 case ATA_S_ERROR: 1570 case ATA_S_TIMEOUT: 1571 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1572 XS_DRIVER_STUFFUP); 1573 xs->resid = xa->resid; 1574 scsi_done(xs); 1575 return; 1576 1577 default: 1578 panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)", 1579 xa->state); 1580 } 1581 1582 /* 1583 * The FLUSH CACHE command completed succesfully; now issue 1584 * the STANDBY IMMEDATE command. 1585 */ 1586 ap = atascsi_lookup_port(link); 1587 xa->datalen = 0; 1588 xa->flags = ATA_F_READ; 1589 xa->state = ATA_S_SETUP; 1590 xa->complete = atascsi_disk_cmd_done; 1591 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1592 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1593 xa->pmp_port = ap->ap_pmp_port; 1594 xa->atascsi_private = xs; 1595 if (xs->flags & SCSI_POLL) 1596 xa->flags |= ATA_F_POLL; 1597 1598 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1599 xa->fis->command = ATA_C_STANDBY_IMMED; 1600 xa->fis->device = 0; 1601 1602 ata_exec(as, xa); 1603 } 1604 1605 void 1606 atascsi_atapi_cmd(struct scsi_xfer *xs) 1607 { 1608 struct scsi_link *link = xs->sc_link; 1609 struct atascsi *as = link->bus->sb_adapter_softc; 1610 struct atascsi_port *ap; 1611 struct ata_xfer *xa = xs->io; 1612 struct ata_fis_h2d *fis; 1613 1614 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1615 case SCSI_DATA_IN: 1616 xa->flags = ATA_F_PACKET | ATA_F_READ; 1617 break; 1618 case SCSI_DATA_OUT: 1619 xa->flags = ATA_F_PACKET | ATA_F_WRITE; 1620 break; 1621 default: 1622 xa->flags = ATA_F_PACKET; 1623 } 1624 xa->flags |= ATA_F_GET_RFIS; 1625 1626 ap = atascsi_lookup_port(link); 1627 xa->data = xs->data; 1628 xa->datalen = xs->datalen; 1629 xa->complete = atascsi_atapi_cmd_done; 1630 xa->timeout = xs->timeout; 1631 xa->pmp_port = ap->ap_pmp_port; 1632 xa->atascsi_private = xs; 1633 if (xs->flags & SCSI_POLL) 1634 xa->flags |= ATA_F_POLL; 1635 1636 fis = xa->fis; 1637 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1638 fis->command = ATA_C_PACKET; 1639 fis->device = 0; 1640 fis->sector_count = xa->tag << 3; 1641 fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ? 1642 ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ); 1643 fis->lba_mid = 0x00; 1644 fis->lba_high = 0x20; 1645 1646 /* Copy SCSI command into ATAPI packet. */ 1647 memcpy(xa->packetcmd, &xs->cmd, xs->cmdlen); 1648 1649 ata_exec(as, xa); 1650 } 1651 1652 void 1653 atascsi_atapi_cmd_done(struct ata_xfer *xa) 1654 { 1655 struct scsi_xfer *xs = xa->atascsi_private; 1656 struct scsi_sense_data *sd = &xs->sense; 1657 1658 switch (xa->state) { 1659 case ATA_S_COMPLETE: 1660 xs->error = XS_NOERROR; 1661 break; 1662 case ATA_S_ERROR: 1663 /* Return PACKET sense data */ 1664 sd->error_code = SSD_ERRCODE_CURRENT; 1665 sd->flags = (xa->rfis.error & 0xf0) >> 4; 1666 if (xa->rfis.error & 0x04) 1667 sd->flags = SKEY_ILLEGAL_REQUEST; 1668 if (xa->rfis.error & 0x02) 1669 sd->flags |= SSD_EOM; 1670 if (xa->rfis.error & 0x01) 1671 sd->flags |= SSD_ILI; 1672 xs->error = XS_SENSE; 1673 break; 1674 case ATA_S_TIMEOUT: 1675 printf("atascsi_atapi_cmd_done, timeout\n"); 1676 xs->error = XS_TIMEOUT; 1677 break; 1678 default: 1679 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1680 xa->state); 1681 } 1682 1683 xs->resid = xa->resid; 1684 1685 scsi_done(xs); 1686 } 1687 1688 void 1689 atascsi_pmp_cmd(struct scsi_xfer *xs) 1690 { 1691 switch (xs->cmd.opcode) { 1692 case REQUEST_SENSE: 1693 atascsi_pmp_sense(xs); 1694 return; 1695 case INQUIRY: 1696 atascsi_pmp_inq(xs); 1697 return; 1698 1699 case TEST_UNIT_READY: 1700 case PREVENT_ALLOW: 1701 atascsi_done(xs, XS_NOERROR); 1702 return; 1703 1704 default: 1705 atascsi_done(xs, XS_DRIVER_STUFFUP); 1706 return; 1707 } 1708 } 1709 1710 void 1711 atascsi_pmp_sense(struct scsi_xfer *xs) 1712 { 1713 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1714 1715 bzero(xs->data, xs->datalen); 1716 sd->error_code = SSD_ERRCODE_CURRENT; 1717 sd->flags = SKEY_NO_SENSE; 1718 1719 atascsi_done(xs, XS_NOERROR); 1720 } 1721 1722 void 1723 atascsi_pmp_inq(struct scsi_xfer *xs) 1724 { 1725 struct scsi_inquiry_data inq; 1726 struct scsi_inquiry *in_inq = (struct scsi_inquiry *)&xs->cmd; 1727 1728 if (ISSET(in_inq->flags, SI_EVPD)) { 1729 /* any evpd pages we need to support here? */ 1730 atascsi_done(xs, XS_DRIVER_STUFFUP); 1731 return; 1732 } 1733 1734 bzero(&inq, sizeof(inq)); 1735 inq.device = 0x1E; /* "well known logical unit" seems reasonable */ 1736 inq.version = SCSI_REV_SPC3; 1737 inq.response_format = SID_SCSI2_RESPONSE; 1738 inq.additional_length = SID_SCSI2_ALEN; 1739 inq.flags |= SID_CmdQue; 1740 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 1741 1742 /* should use the data from atascsi_pmp_identify here? 1743 * not sure how useful the chip id is, but maybe it'd be 1744 * nice to include the number of ports. 1745 */ 1746 bcopy("Port Multiplier", inq.product, sizeof(inq.product)); 1747 bcopy(" ", inq.revision, sizeof(inq.revision)); 1748 1749 scsi_copy_internal_data(xs, &inq, sizeof(inq)); 1750 1751 atascsi_done(xs, XS_NOERROR); 1752 } 1753 1754 void 1755 atascsi_done(struct scsi_xfer *xs, int error) 1756 { 1757 xs->error = error; 1758 scsi_done(xs); 1759 } 1760 1761 void 1762 ata_exec(struct atascsi *as, struct ata_xfer *xa) 1763 { 1764 as->as_methods->ata_cmd(xa); 1765 } 1766 1767 void * 1768 atascsi_io_get(void *cookie) 1769 { 1770 struct atascsi_host_port *ahp = cookie; 1771 struct atascsi *as = ahp->ahp_as; 1772 struct ata_xfer *xa; 1773 1774 xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port); 1775 if (xa != NULL) 1776 xa->fis->type = ATA_FIS_TYPE_H2D; 1777 1778 return (xa); 1779 } 1780 1781 void 1782 atascsi_io_put(void *cookie, void *io) 1783 { 1784 struct atascsi_host_port *ahp = cookie; 1785 struct atascsi *as = ahp->ahp_as; 1786 struct ata_xfer *xa = io; 1787 1788 xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */ 1789 as->as_methods->ata_put_xfer(xa); 1790 } 1791 1792 void 1793 ata_polled_complete(struct ata_xfer *xa) 1794 { 1795 /* do nothing */ 1796 } 1797 1798 int 1799 ata_polled(struct ata_xfer *xa) 1800 { 1801 int rv; 1802 1803 if (!ISSET(xa->flags, ATA_F_DONE)) 1804 panic("ata_polled: xa isnt complete"); 1805 1806 switch (xa->state) { 1807 case ATA_S_COMPLETE: 1808 rv = 0; 1809 break; 1810 case ATA_S_ERROR: 1811 case ATA_S_TIMEOUT: 1812 rv = EIO; 1813 break; 1814 default: 1815 panic("ata_polled: xa state (%d)", 1816 xa->state); 1817 } 1818 1819 scsi_io_put(xa->atascsi_private, xa); 1820 1821 return (rv); 1822 } 1823 1824 void 1825 ata_complete(struct ata_xfer *xa) 1826 { 1827 SET(xa->flags, ATA_F_DONE); 1828 xa->complete(xa); 1829 } 1830 1831 void 1832 ata_swapcopy(void *src, void *dst, size_t len) 1833 { 1834 u_int16_t *s = src, *d = dst; 1835 int i; 1836 1837 len /= 2; 1838 1839 for (i = 0; i < len; i++) 1840 d[i] = swap16(s[i]); 1841 } 1842 1843 int 1844 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify) 1845 { 1846 struct atascsi *as = ap->ap_as; 1847 struct atascsi_host_port *ahp = ap->ap_host_port; 1848 struct ata_xfer *xa; 1849 1850 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1851 if (xa == NULL) 1852 panic("no free xfers on a new port"); 1853 xa->pmp_port = ap->ap_pmp_port; 1854 xa->data = identify; 1855 xa->datalen = sizeof(*identify); 1856 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1857 xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ? 1858 ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET; 1859 xa->fis->device = 0; 1860 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 1861 xa->timeout = 1000; 1862 xa->complete = ata_polled_complete; 1863 xa->atascsi_private = &ahp->ahp_iopool; 1864 ata_exec(as, xa); 1865 return (ata_polled(xa)); 1866 } 1867 1868 int 1869 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg) 1870 { 1871 struct atascsi *as = ap->ap_as; 1872 struct atascsi_host_port *ahp = ap->ap_host_port; 1873 struct ata_xfer *xa; 1874 1875 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1876 if (xa == NULL) 1877 panic("no free xfers on a new port"); 1878 xa->fis->command = ATA_C_SET_FEATURES; 1879 xa->fis->features = subcommand; 1880 xa->fis->sector_count = arg; 1881 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1882 xa->flags = ATA_F_POLL; 1883 xa->timeout = 1000; 1884 xa->complete = ata_polled_complete; 1885 xa->pmp_port = ap->ap_pmp_port; 1886 xa->atascsi_private = &ahp->ahp_iopool; 1887 ata_exec(as, xa); 1888 return (ata_polled(xa)); 1889 } 1890