1 /* $OpenBSD: atascsi.c,v 1.128 2016/03/15 18:04:57 jca Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com> 6 * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/buf.h> 24 #include <sys/kernel.h> 25 #include <sys/malloc.h> 26 #include <sys/device.h> 27 #include <sys/queue.h> 28 #include <sys/pool.h> 29 30 #include <scsi/scsi_all.h> 31 #include <scsi/scsi_disk.h> 32 #include <scsi/scsiconf.h> 33 34 #include <dev/ata/atascsi.h> 35 #include <dev/ata/pmreg.h> 36 37 struct atascsi_port; 38 39 struct atascsi { 40 struct device *as_dev; 41 void *as_cookie; 42 43 struct atascsi_host_port **as_host_ports; 44 45 struct atascsi_methods *as_methods; 46 struct scsi_adapter as_switch; 47 struct scsi_link as_link; 48 struct scsibus_softc *as_scsibus; 49 50 int as_capability; 51 int as_ncqdepth; 52 }; 53 54 /* 55 * atascsi_host_port is a port attached to the host controller, and 56 * only holds the details relevant to the host controller. 57 * atascsi_port is any port, including ports on port multipliers, and 58 * it holds details of the device attached to the port. 59 * 60 * When there is a port multiplier attached to a port, the ahp_ports 61 * array in the atascsi_host_port struct contains one atascsi_port for 62 * each port, and one for the control port (port 15). The index into 63 * the array is the LUN used to address the port. For the control port, 64 * the LUN is 0, and for the port multiplier ports, the LUN is the 65 * port number plus one. 66 * 67 * When there is no port multiplier attached to a port, the ahp_ports 68 * array contains a single entry for the device. The LUN and port number 69 * for this entry are both 0. 70 */ 71 72 struct atascsi_host_port { 73 struct scsi_iopool ahp_iopool; 74 struct atascsi *ahp_as; 75 int ahp_port; 76 int ahp_nports; 77 78 struct atascsi_port **ahp_ports; 79 }; 80 81 struct atascsi_port { 82 struct ata_identify ap_identify; 83 struct atascsi_host_port *ap_host_port; 84 struct atascsi *ap_as; 85 int ap_pmp_port; 86 int ap_type; 87 int ap_ncqdepth; 88 int ap_features; 89 #define ATA_PORT_F_NCQ 0x1 90 #define ATA_PORT_F_TRIM 0x2 91 }; 92 93 void atascsi_cmd(struct scsi_xfer *); 94 int atascsi_probe(struct scsi_link *); 95 void atascsi_free(struct scsi_link *); 96 97 /* template */ 98 struct scsi_adapter atascsi_switch = { 99 atascsi_cmd, /* scsi_cmd */ 100 scsi_minphys, /* scsi_minphys */ 101 atascsi_probe, /* dev_probe */ 102 atascsi_free, /* dev_free */ 103 NULL, /* ioctl */ 104 }; 105 106 void ata_swapcopy(void *, void *, size_t); 107 108 void atascsi_disk_cmd(struct scsi_xfer *); 109 void atascsi_disk_cmd_done(struct ata_xfer *); 110 void atascsi_disk_inq(struct scsi_xfer *); 111 void atascsi_disk_inquiry(struct scsi_xfer *); 112 void atascsi_disk_vpd_supported(struct scsi_xfer *); 113 void atascsi_disk_vpd_serial(struct scsi_xfer *); 114 void atascsi_disk_vpd_ident(struct scsi_xfer *); 115 void atascsi_disk_vpd_ata(struct scsi_xfer *); 116 void atascsi_disk_vpd_limits(struct scsi_xfer *); 117 void atascsi_disk_vpd_info(struct scsi_xfer *); 118 void atascsi_disk_vpd_thin(struct scsi_xfer *); 119 void atascsi_disk_write_same_16(struct scsi_xfer *); 120 void atascsi_disk_write_same_16_done(struct ata_xfer *); 121 void atascsi_disk_unmap(struct scsi_xfer *); 122 void atascsi_disk_unmap_task(void *); 123 void atascsi_disk_unmap_done(struct ata_xfer *); 124 void atascsi_disk_capacity(struct scsi_xfer *); 125 void atascsi_disk_capacity16(struct scsi_xfer *); 126 void atascsi_disk_sync(struct scsi_xfer *); 127 void atascsi_disk_sync_done(struct ata_xfer *); 128 void atascsi_disk_sense(struct scsi_xfer *); 129 void atascsi_disk_start_stop(struct scsi_xfer *); 130 void atascsi_disk_start_stop_done(struct ata_xfer *); 131 132 void atascsi_atapi_cmd(struct scsi_xfer *); 133 void atascsi_atapi_cmd_done(struct ata_xfer *); 134 135 void atascsi_pmp_cmd(struct scsi_xfer *); 136 void atascsi_pmp_cmd_done(struct ata_xfer *); 137 void atascsi_pmp_sense(struct scsi_xfer *xs); 138 void atascsi_pmp_inq(struct scsi_xfer *xs); 139 140 141 void atascsi_passthru_12(struct scsi_xfer *); 142 void atascsi_passthru_16(struct scsi_xfer *); 143 int atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t); 144 void atascsi_passthru_done(struct ata_xfer *); 145 146 void atascsi_done(struct scsi_xfer *, int); 147 148 void ata_exec(struct atascsi *, struct ata_xfer *); 149 150 void ata_polled_complete(struct ata_xfer *); 151 int ata_polled(struct ata_xfer *); 152 153 u_int64_t ata_identify_blocks(struct ata_identify *); 154 u_int ata_identify_blocksize(struct ata_identify *); 155 u_int ata_identify_block_l2p_exp(struct ata_identify *); 156 u_int ata_identify_block_logical_align(struct ata_identify *); 157 158 void *atascsi_io_get(void *); 159 void atascsi_io_put(void *, void *); 160 struct atascsi_port * atascsi_lookup_port(struct scsi_link *); 161 162 int atascsi_port_identify(struct atascsi_port *, 163 struct ata_identify *); 164 int atascsi_port_set_features(struct atascsi_port *, int, int); 165 166 167 struct atascsi * 168 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa) 169 { 170 struct scsibus_attach_args saa; 171 struct atascsi *as; 172 173 as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO); 174 175 as->as_dev = self; 176 as->as_cookie = aaa->aaa_cookie; 177 as->as_methods = aaa->aaa_methods; 178 as->as_capability = aaa->aaa_capability; 179 as->as_ncqdepth = aaa->aaa_ncmds; 180 181 /* copy from template and modify for ourselves */ 182 as->as_switch = atascsi_switch; 183 if (aaa->aaa_minphys != NULL) 184 as->as_switch.scsi_minphys = aaa->aaa_minphys; 185 186 /* fill in our scsi_link */ 187 as->as_link.adapter = &as->as_switch; 188 as->as_link.adapter_softc = as; 189 as->as_link.adapter_buswidth = aaa->aaa_nports; 190 as->as_link.luns = SATA_PMP_MAX_PORTS; 191 as->as_link.adapter_target = aaa->aaa_nports; 192 as->as_link.openings = 1; 193 194 as->as_host_ports = mallocarray(aaa->aaa_nports, 195 sizeof(struct atascsi_host_port *), M_DEVBUF, M_WAITOK | M_ZERO); 196 197 bzero(&saa, sizeof(saa)); 198 saa.saa_sc_link = &as->as_link; 199 200 /* stash the scsibus so we can do hotplug on it */ 201 as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa, 202 scsiprint); 203 204 return (as); 205 } 206 207 int 208 atascsi_detach(struct atascsi *as, int flags) 209 { 210 int rv; 211 212 rv = config_detach((struct device *)as->as_scsibus, flags); 213 if (rv != 0) 214 return (rv); 215 216 free(as->as_host_ports, M_DEVBUF, 0); 217 free(as, M_DEVBUF, sizeof(*as)); 218 219 return (0); 220 } 221 222 int 223 atascsi_probe_dev(struct atascsi *as, int port, int lun) 224 { 225 if (lun == 0) { 226 return (scsi_probe_target(as->as_scsibus, port)); 227 } else { 228 return (scsi_probe_lun(as->as_scsibus, port, lun)); 229 } 230 } 231 232 int 233 atascsi_detach_dev(struct atascsi *as, int port, int lun, int flags) 234 { 235 if (lun == 0) { 236 return (scsi_detach_target(as->as_scsibus, port, flags)); 237 } else { 238 return (scsi_detach_lun(as->as_scsibus, port, lun, flags)); 239 } 240 } 241 242 struct atascsi_port * 243 atascsi_lookup_port(struct scsi_link *link) 244 { 245 struct atascsi *as = link->adapter_softc; 246 struct atascsi_host_port *ahp; 247 248 if (link->target >= as->as_link.adapter_buswidth) 249 return (NULL); 250 251 ahp = as->as_host_ports[link->target]; 252 if (link->lun >= ahp->ahp_nports) 253 return (NULL); 254 255 return (ahp->ahp_ports[link->lun]); 256 } 257 258 int 259 atascsi_probe(struct scsi_link *link) 260 { 261 struct atascsi *as = link->adapter_softc; 262 struct atascsi_host_port *ahp; 263 struct atascsi_port *ap; 264 struct ata_xfer *xa; 265 struct ata_identify *identify; 266 int port, type, qdepth; 267 int rv; 268 u_int16_t cmdset; 269 u_int16_t validinfo, ultradma; 270 int i, xfermode = -1; 271 272 port = link->target; 273 if (port >= as->as_link.adapter_buswidth) 274 return (ENXIO); 275 276 /* if this is a PMP port, check it's valid */ 277 if (link->lun > 0) { 278 if (link->lun >= as->as_host_ports[port]->ahp_nports) 279 return (ENXIO); 280 } 281 282 type = as->as_methods->ata_probe(as->as_cookie, port, link->lun); 283 switch (type) { 284 case ATA_PORT_T_DISK: 285 break; 286 case ATA_PORT_T_ATAPI: 287 link->flags |= SDEV_ATAPI; 288 link->quirks |= SDEV_ONLYBIG; 289 break; 290 case ATA_PORT_T_PM: 291 if (link->lun != 0) { 292 printf("%s.%d.%d: Port multipliers cannot be nested\n", 293 as->as_dev->dv_xname, port, link->lun); 294 rv = ENODEV; 295 goto unsupported; 296 } 297 break; 298 default: 299 rv = ENODEV; 300 goto unsupported; 301 } 302 303 ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 304 ap->ap_as = as; 305 306 if (link->lun == 0) { 307 ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO); 308 ahp->ahp_as = as; 309 ahp->ahp_port = port; 310 311 scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get, 312 atascsi_io_put); 313 314 as->as_host_ports[port] = ahp; 315 316 if (type == ATA_PORT_T_PM) { 317 ahp->ahp_nports = SATA_PMP_MAX_PORTS; 318 ap->ap_pmp_port = SATA_PMP_CONTROL_PORT; 319 } else { 320 ahp->ahp_nports = 1; 321 ap->ap_pmp_port = 0; 322 } 323 ahp->ahp_ports = mallocarray(ahp->ahp_nports, 324 sizeof(struct atascsi_port *), M_DEVBUF, M_WAITOK | M_ZERO); 325 } else { 326 ahp = as->as_host_ports[port]; 327 ap->ap_pmp_port = link->lun - 1; 328 } 329 330 ap->ap_host_port = ahp; 331 ap->ap_type = type; 332 333 link->pool = &ahp->ahp_iopool; 334 335 /* fetch the device info, except for port multipliers */ 336 if (type != ATA_PORT_T_PM) { 337 338 /* devices attached to port multipliers tend not to be 339 * spun up at this point, and sometimes this prevents 340 * identification from working, so we retry a few times 341 * with a fairly long delay. 342 */ 343 344 identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO); 345 346 int count = (link->lun > 0) ? 6 : 2; 347 while (count--) { 348 rv = atascsi_port_identify(ap, identify); 349 if (rv == 0) { 350 ap->ap_identify = *identify; 351 break; 352 } 353 if (count > 0) 354 delay(5000000); 355 } 356 357 dma_free(identify, sizeof(*identify)); 358 359 if (rv != 0) { 360 goto error; 361 } 362 } 363 364 ahp->ahp_ports[link->lun] = ap; 365 366 if (type != ATA_PORT_T_DISK) 367 return (0); 368 369 /* 370 * Early SATA drives (as well as PATA drives) need to have 371 * their transfer mode set properly, otherwise commands that 372 * use DMA will time out. 373 */ 374 validinfo = letoh16(ap->ap_identify.validinfo); 375 if (ISSET(validinfo, ATA_ID_VALIDINFO_ULTRADMA)) { 376 ultradma = letoh16(ap->ap_identify.ultradma); 377 for (i = 7; i >= 0; i--) { 378 if (ultradma & (1 << i)) { 379 xfermode = ATA_SF_XFERMODE_UDMA | i; 380 break; 381 } 382 } 383 } 384 if (xfermode != -1) 385 (void)atascsi_port_set_features(ap, ATA_SF_XFERMODE, xfermode); 386 387 if (as->as_capability & ASAA_CAP_NCQ && 388 ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) && 389 (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) { 390 ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth)); 391 qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth); 392 if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED)) 393 qdepth--; 394 395 if (qdepth > 1) { 396 SET(ap->ap_features, ATA_PORT_F_NCQ); 397 398 /* Raise the number of openings */ 399 link->openings = qdepth; 400 401 /* 402 * XXX for directly attached devices, throw away any xfers 403 * that have tag numbers higher than what the device supports. 404 */ 405 if (link->lun == 0) { 406 while (qdepth--) { 407 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 408 if (xa->tag < link->openings) { 409 xa->state = ATA_S_COMPLETE; 410 scsi_io_put(&ahp->ahp_iopool, xa); 411 } 412 } 413 } 414 } 415 } 416 417 if (ISSET(letoh16(ap->ap_identify.data_set_mgmt), 418 ATA_ID_DATA_SET_MGMT_TRIM)) 419 SET(ap->ap_features, ATA_PORT_F_TRIM); 420 421 cmdset = letoh16(ap->ap_identify.cmdset82); 422 423 /* Enable write cache if supported */ 424 if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) { 425 /* We don't care if it fails. */ 426 (void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0); 427 } 428 429 /* Enable read lookahead if supported */ 430 if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) { 431 /* We don't care if it fails. */ 432 (void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0); 433 } 434 435 /* 436 * FREEZE LOCK the device so malicous users can't lock it on us. 437 * As there is no harm in issuing this to devices that don't 438 * support the security feature set we just send it, and don't bother 439 * checking if the device sends a command abort to tell us it doesn't 440 * support it 441 */ 442 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 443 if (xa == NULL) 444 panic("no free xfers on a new port"); 445 xa->fis->command = ATA_C_SEC_FREEZE_LOCK; 446 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 447 xa->flags = ATA_F_POLL; 448 xa->timeout = 1000; 449 xa->complete = ata_polled_complete; 450 xa->pmp_port = ap->ap_pmp_port; 451 xa->atascsi_private = &ahp->ahp_iopool; 452 ata_exec(as, xa); 453 ata_polled(xa); /* we dont care if it doesnt work */ 454 455 return (0); 456 error: 457 free(ap, M_DEVBUF, sizeof(*ap)); 458 unsupported: 459 460 as->as_methods->ata_free(as->as_cookie, port, link->lun); 461 return (rv); 462 } 463 464 void 465 atascsi_free(struct scsi_link *link) 466 { 467 struct atascsi *as = link->adapter_softc; 468 struct atascsi_host_port *ahp; 469 struct atascsi_port *ap; 470 int port; 471 472 port = link->target; 473 if (port >= as->as_link.adapter_buswidth) 474 return; 475 476 ahp = as->as_host_ports[port]; 477 if (ahp == NULL) 478 return; 479 480 if (link->lun >= ahp->ahp_nports) 481 return; 482 483 ap = ahp->ahp_ports[link->lun]; 484 free(ap, M_DEVBUF, sizeof(*ap)); 485 ahp->ahp_ports[link->lun] = NULL; 486 487 as->as_methods->ata_free(as->as_cookie, port, link->lun); 488 489 if (link->lun == ahp->ahp_nports - 1) { 490 /* we've already freed all of ahp->ahp_ports, now 491 * free ahp itself. this relies on the order luns are 492 * detached in scsi_detach_target(). 493 */ 494 free(ahp, M_DEVBUF, sizeof(*ahp)); 495 as->as_host_ports[port] = NULL; 496 } 497 } 498 499 void 500 atascsi_cmd(struct scsi_xfer *xs) 501 { 502 struct scsi_link *link = xs->sc_link; 503 struct atascsi_port *ap; 504 505 ap = atascsi_lookup_port(link); 506 if (ap == NULL) { 507 atascsi_done(xs, XS_DRIVER_STUFFUP); 508 return; 509 } 510 511 switch (ap->ap_type) { 512 case ATA_PORT_T_DISK: 513 atascsi_disk_cmd(xs); 514 break; 515 case ATA_PORT_T_ATAPI: 516 atascsi_atapi_cmd(xs); 517 break; 518 case ATA_PORT_T_PM: 519 atascsi_pmp_cmd(xs); 520 break; 521 522 case ATA_PORT_T_NONE: 523 default: 524 atascsi_done(xs, XS_DRIVER_STUFFUP); 525 break; 526 } 527 } 528 529 void 530 atascsi_disk_cmd(struct scsi_xfer *xs) 531 { 532 struct scsi_link *link = xs->sc_link; 533 struct atascsi *as = link->adapter_softc; 534 struct atascsi_port *ap; 535 struct ata_xfer *xa = xs->io; 536 int flags = 0; 537 struct ata_fis_h2d *fis; 538 u_int64_t lba; 539 u_int32_t sector_count; 540 541 ap = atascsi_lookup_port(link); 542 543 switch (xs->cmd->opcode) { 544 case READ_COMMAND: 545 case READ_BIG: 546 case READ_12: 547 case READ_16: 548 flags = ATA_F_READ; 549 break; 550 case WRITE_COMMAND: 551 case WRITE_BIG: 552 case WRITE_12: 553 case WRITE_16: 554 flags = ATA_F_WRITE; 555 /* deal with io outside the switch */ 556 break; 557 558 case WRITE_SAME_16: 559 atascsi_disk_write_same_16(xs); 560 return; 561 case UNMAP: 562 atascsi_disk_unmap(xs); 563 return; 564 565 case SYNCHRONIZE_CACHE: 566 atascsi_disk_sync(xs); 567 return; 568 case REQUEST_SENSE: 569 atascsi_disk_sense(xs); 570 return; 571 case INQUIRY: 572 atascsi_disk_inq(xs); 573 return; 574 case READ_CAPACITY: 575 atascsi_disk_capacity(xs); 576 return; 577 case READ_CAPACITY_16: 578 atascsi_disk_capacity16(xs); 579 return; 580 581 case ATA_PASSTHRU_12: 582 atascsi_passthru_12(xs); 583 return; 584 case ATA_PASSTHRU_16: 585 atascsi_passthru_16(xs); 586 return; 587 588 case START_STOP: 589 atascsi_disk_start_stop(xs); 590 return; 591 592 case TEST_UNIT_READY: 593 case PREVENT_ALLOW: 594 atascsi_done(xs, XS_NOERROR); 595 return; 596 597 default: 598 atascsi_done(xs, XS_DRIVER_STUFFUP); 599 return; 600 } 601 602 xa->flags = flags; 603 scsi_cmd_rw_decode(xs->cmd, &lba, §or_count); 604 if ((lba >> 48) != 0 || (sector_count >> 16) != 0) { 605 atascsi_done(xs, XS_DRIVER_STUFFUP); 606 return; 607 } 608 609 fis = xa->fis; 610 611 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 612 fis->lba_low = lba & 0xff; 613 fis->lba_mid = (lba >> 8) & 0xff; 614 fis->lba_high = (lba >> 16) & 0xff; 615 616 if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) && 617 (xa->tag < ap->ap_ncqdepth) && 618 !(xs->flags & SCSI_POLL)) { 619 /* Use NCQ */ 620 xa->flags |= ATA_F_NCQ; 621 fis->command = (xa->flags & ATA_F_WRITE) ? 622 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; 623 fis->device = ATA_H2D_DEVICE_LBA; 624 fis->lba_low_exp = (lba >> 24) & 0xff; 625 fis->lba_mid_exp = (lba >> 32) & 0xff; 626 fis->lba_high_exp = (lba >> 40) & 0xff; 627 fis->sector_count = xa->tag << 3; 628 fis->features = sector_count & 0xff; 629 fis->features_exp = (sector_count >> 8) & 0xff; 630 } else if (sector_count > 0x100 || lba > 0xfffffff) { 631 /* Use LBA48 */ 632 fis->command = (xa->flags & ATA_F_WRITE) ? 633 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT; 634 fis->device = ATA_H2D_DEVICE_LBA; 635 fis->lba_low_exp = (lba >> 24) & 0xff; 636 fis->lba_mid_exp = (lba >> 32) & 0xff; 637 fis->lba_high_exp = (lba >> 40) & 0xff; 638 fis->sector_count = sector_count & 0xff; 639 fis->sector_count_exp = (sector_count >> 8) & 0xff; 640 } else { 641 /* Use LBA */ 642 fis->command = (xa->flags & ATA_F_WRITE) ? 643 ATA_C_WRITEDMA : ATA_C_READDMA; 644 fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f); 645 fis->sector_count = sector_count & 0xff; 646 } 647 648 xa->data = xs->data; 649 xa->datalen = xs->datalen; 650 xa->complete = atascsi_disk_cmd_done; 651 xa->timeout = xs->timeout; 652 xa->pmp_port = ap->ap_pmp_port; 653 xa->atascsi_private = xs; 654 if (xs->flags & SCSI_POLL) 655 xa->flags |= ATA_F_POLL; 656 657 ata_exec(as, xa); 658 } 659 660 void 661 atascsi_disk_cmd_done(struct ata_xfer *xa) 662 { 663 struct scsi_xfer *xs = xa->atascsi_private; 664 665 switch (xa->state) { 666 case ATA_S_COMPLETE: 667 xs->error = XS_NOERROR; 668 break; 669 case ATA_S_ERROR: 670 /* fake sense? */ 671 xs->error = XS_DRIVER_STUFFUP; 672 break; 673 case ATA_S_TIMEOUT: 674 xs->error = XS_TIMEOUT; 675 break; 676 default: 677 panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)", 678 xa->state); 679 } 680 681 xs->resid = xa->resid; 682 683 scsi_done(xs); 684 } 685 686 void 687 atascsi_disk_inq(struct scsi_xfer *xs) 688 { 689 struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd; 690 691 if (xs->cmdlen != sizeof(*inq)) { 692 atascsi_done(xs, XS_DRIVER_STUFFUP); 693 return; 694 } 695 696 if (ISSET(inq->flags, SI_EVPD)) { 697 switch (inq->pagecode) { 698 case SI_PG_SUPPORTED: 699 atascsi_disk_vpd_supported(xs); 700 break; 701 case SI_PG_SERIAL: 702 atascsi_disk_vpd_serial(xs); 703 break; 704 case SI_PG_DEVID: 705 atascsi_disk_vpd_ident(xs); 706 break; 707 case SI_PG_ATA: 708 atascsi_disk_vpd_ata(xs); 709 break; 710 case SI_PG_DISK_LIMITS: 711 atascsi_disk_vpd_limits(xs); 712 break; 713 case SI_PG_DISK_INFO: 714 atascsi_disk_vpd_info(xs); 715 break; 716 case SI_PG_DISK_THIN: 717 atascsi_disk_vpd_thin(xs); 718 break; 719 default: 720 atascsi_done(xs, XS_DRIVER_STUFFUP); 721 break; 722 } 723 } else 724 atascsi_disk_inquiry(xs); 725 } 726 727 void 728 atascsi_disk_inquiry(struct scsi_xfer *xs) 729 { 730 struct scsi_inquiry_data inq; 731 struct scsi_link *link = xs->sc_link; 732 struct atascsi_port *ap; 733 734 ap = atascsi_lookup_port(link); 735 736 bzero(&inq, sizeof(inq)); 737 738 inq.device = T_DIRECT; 739 inq.version = 0x05; /* SPC-3 */ 740 inq.response_format = 2; 741 inq.additional_length = 32; 742 inq.flags |= SID_CmdQue; 743 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 744 ata_swapcopy(ap->ap_identify.model, inq.product, 745 sizeof(inq.product)); 746 ata_swapcopy(ap->ap_identify.firmware, inq.revision, 747 sizeof(inq.revision)); 748 749 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 750 751 atascsi_done(xs, XS_NOERROR); 752 } 753 754 void 755 atascsi_disk_vpd_supported(struct scsi_xfer *xs) 756 { 757 struct { 758 struct scsi_vpd_hdr hdr; 759 u_int8_t list[7]; 760 } pg; 761 struct scsi_link *link = xs->sc_link; 762 struct atascsi_port *ap; 763 int fat; 764 765 ap = atascsi_lookup_port(link); 766 fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1; 767 768 bzero(&pg, sizeof(pg)); 769 770 pg.hdr.device = T_DIRECT; 771 pg.hdr.page_code = SI_PG_SUPPORTED; 772 _lto2b(sizeof(pg.list) - fat, pg.hdr.page_length); 773 pg.list[0] = SI_PG_SUPPORTED; 774 pg.list[1] = SI_PG_SERIAL; 775 pg.list[2] = SI_PG_DEVID; 776 pg.list[3] = SI_PG_ATA; 777 pg.list[4] = SI_PG_DISK_LIMITS; 778 pg.list[5] = SI_PG_DISK_INFO; 779 pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */ 780 781 bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen)); 782 783 atascsi_done(xs, XS_NOERROR); 784 } 785 786 void 787 atascsi_disk_vpd_serial(struct scsi_xfer *xs) 788 { 789 struct scsi_link *link = xs->sc_link; 790 struct atascsi_port *ap; 791 struct scsi_vpd_serial pg; 792 793 ap = atascsi_lookup_port(link); 794 bzero(&pg, sizeof(pg)); 795 796 pg.hdr.device = T_DIRECT; 797 pg.hdr.page_code = SI_PG_SERIAL; 798 _lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length); 799 ata_swapcopy(ap->ap_identify.serial, pg.serial, 800 sizeof(ap->ap_identify.serial)); 801 802 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 803 804 atascsi_done(xs, XS_NOERROR); 805 } 806 807 void 808 atascsi_disk_vpd_ident(struct scsi_xfer *xs) 809 { 810 struct scsi_link *link = xs->sc_link; 811 struct atascsi_port *ap; 812 struct { 813 struct scsi_vpd_hdr hdr; 814 struct scsi_vpd_devid_hdr devid_hdr; 815 u_int8_t devid[68]; 816 } pg; 817 u_int8_t *p; 818 size_t pg_len; 819 820 ap = atascsi_lookup_port(link); 821 bzero(&pg, sizeof(pg)); 822 if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) { 823 pg_len = 8; 824 825 pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY; 826 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA; 827 828 ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len); 829 } else { 830 pg_len = 68; 831 832 pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII; 833 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10; 834 835 p = pg.devid; 836 bcopy("ATA ", p, 8); 837 p += 8; 838 ata_swapcopy(ap->ap_identify.model, p, 839 sizeof(ap->ap_identify.model)); 840 p += sizeof(ap->ap_identify.model); 841 ata_swapcopy(ap->ap_identify.serial, p, 842 sizeof(ap->ap_identify.serial)); 843 } 844 845 pg.devid_hdr.len = pg_len; 846 pg_len += sizeof(pg.devid_hdr); 847 848 pg.hdr.device = T_DIRECT; 849 pg.hdr.page_code = SI_PG_DEVID; 850 _lto2b(pg_len, pg.hdr.page_length); 851 pg_len += sizeof(pg.hdr); 852 853 bcopy(&pg, xs->data, MIN(pg_len, xs->datalen)); 854 855 atascsi_done(xs, XS_NOERROR); 856 } 857 858 void 859 atascsi_disk_vpd_ata(struct scsi_xfer *xs) 860 { 861 struct scsi_link *link = xs->sc_link; 862 struct atascsi_port *ap; 863 struct scsi_vpd_ata pg; 864 865 ap = atascsi_lookup_port(link); 866 bzero(&pg, sizeof(pg)); 867 868 pg.hdr.device = T_DIRECT; 869 pg.hdr.page_code = SI_PG_ATA; 870 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 871 872 memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor)); 873 memcpy(pg.sat_vendor, "OpenBSD", 874 MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor))); 875 memset(pg.sat_product, ' ', sizeof(pg.sat_product)); 876 memcpy(pg.sat_product, "atascsi", 877 MIN(strlen("atascsi"), sizeof(pg.sat_product))); 878 memset(pg.sat_revision, ' ', sizeof(pg.sat_revision)); 879 memcpy(pg.sat_revision, osrelease, 880 MIN(strlen(osrelease), sizeof(pg.sat_product))); 881 882 /* XXX device signature */ 883 884 switch (ap->ap_type) { 885 case ATA_PORT_T_DISK: 886 pg.command_code = VPD_ATA_COMMAND_CODE_ATA; 887 break; 888 case ATA_PORT_T_ATAPI: 889 pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI; 890 break; 891 } 892 893 memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify)); 894 895 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 896 897 atascsi_done(xs, XS_NOERROR); 898 } 899 900 void 901 atascsi_disk_vpd_limits(struct scsi_xfer *xs) 902 { 903 struct scsi_link *link = xs->sc_link; 904 struct atascsi_port *ap; 905 struct scsi_vpd_disk_limits pg; 906 907 ap = atascsi_lookup_port(link); 908 bzero(&pg, sizeof(pg)); 909 pg.hdr.device = T_DIRECT; 910 pg.hdr.page_code = SI_PG_DISK_LIMITS; 911 _lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length); 912 913 _lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify), 914 pg.optimal_xfer_granularity); 915 916 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 917 /* 918 * ATA only supports 65535 blocks per TRIM descriptor, so 919 * avoid having to split UNMAP descriptors and overflow the page 920 * limit by using that as a max. 921 */ 922 _lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count); 923 _lto4b(512 / 8, pg.max_unmap_desc_count); 924 } 925 926 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 927 928 atascsi_done(xs, XS_NOERROR); 929 } 930 931 void 932 atascsi_disk_vpd_info(struct scsi_xfer *xs) 933 { 934 struct scsi_link *link = xs->sc_link; 935 struct atascsi_port *ap; 936 struct scsi_vpd_disk_info pg; 937 938 ap = atascsi_lookup_port(link); 939 bzero(&pg, sizeof(pg)); 940 pg.hdr.device = T_DIRECT; 941 pg.hdr.page_code = SI_PG_DISK_INFO; 942 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 943 944 _lto2b(letoh16(ap->ap_identify.rpm), pg.rpm); 945 pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK; 946 947 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 948 949 atascsi_done(xs, XS_NOERROR); 950 } 951 952 void 953 atascsi_disk_vpd_thin(struct scsi_xfer *xs) 954 { 955 struct scsi_link *link = xs->sc_link; 956 struct atascsi_port *ap; 957 struct scsi_vpd_disk_thin pg; 958 959 ap = atascsi_lookup_port(link); 960 if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 961 atascsi_done(xs, XS_DRIVER_STUFFUP); 962 return; 963 } 964 965 bzero(&pg, sizeof(pg)); 966 pg.hdr.device = T_DIRECT; 967 pg.hdr.page_code = SI_PG_DISK_THIN; 968 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 969 970 pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS; 971 972 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 973 974 atascsi_done(xs, XS_NOERROR); 975 } 976 977 void 978 atascsi_disk_write_same_16(struct scsi_xfer *xs) 979 { 980 struct scsi_link *link = xs->sc_link; 981 struct atascsi *as = link->adapter_softc; 982 struct atascsi_port *ap; 983 struct scsi_write_same_16 *cdb; 984 struct ata_xfer *xa = xs->io; 985 struct ata_fis_h2d *fis; 986 u_int64_t lba; 987 u_int32_t length; 988 u_int64_t desc; 989 990 if (xs->cmdlen != sizeof(*cdb)) { 991 atascsi_done(xs, XS_DRIVER_STUFFUP); 992 return; 993 } 994 995 ap = atascsi_lookup_port(link); 996 cdb = (struct scsi_write_same_16 *)xs->cmd; 997 998 if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) || 999 !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 1000 /* generate sense data */ 1001 atascsi_done(xs, XS_DRIVER_STUFFUP); 1002 return; 1003 } 1004 1005 if (xs->datalen < 512) { 1006 /* generate sense data */ 1007 atascsi_done(xs, XS_DRIVER_STUFFUP); 1008 return; 1009 } 1010 1011 lba = _8btol(cdb->lba); 1012 length = _4btol(cdb->length); 1013 1014 if (length > ATA_DSM_TRIM_MAX_LEN) { 1015 /* XXX we dont support requests over 65535 blocks */ 1016 atascsi_done(xs, XS_DRIVER_STUFFUP); 1017 return; 1018 } 1019 1020 xa->data = xs->data; 1021 xa->datalen = 512; 1022 xa->flags = ATA_F_WRITE; 1023 xa->pmp_port = ap->ap_pmp_port; 1024 if (xs->flags & SCSI_POLL) 1025 xa->flags |= ATA_F_POLL; 1026 xa->complete = atascsi_disk_write_same_16_done; 1027 xa->atascsi_private = xs; 1028 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1029 1030 /* TRIM sends a list of blocks to discard in the databuf. */ 1031 memset(xa->data, 0, xa->datalen); 1032 desc = htole64(ATA_DSM_TRIM_DESC(lba, length)); 1033 memcpy(xa->data, &desc, sizeof(desc)); 1034 1035 fis = xa->fis; 1036 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1037 fis->command = ATA_C_DSM; 1038 fis->features = ATA_DSM_TRIM; 1039 fis->sector_count = 1; 1040 1041 ata_exec(as, xa); 1042 } 1043 1044 void 1045 atascsi_disk_write_same_16_done(struct ata_xfer *xa) 1046 { 1047 struct scsi_xfer *xs = xa->atascsi_private; 1048 1049 switch (xa->state) { 1050 case ATA_S_COMPLETE: 1051 xs->error = XS_NOERROR; 1052 break; 1053 case ATA_S_ERROR: 1054 xs->error = XS_DRIVER_STUFFUP; 1055 break; 1056 case ATA_S_TIMEOUT: 1057 xs->error = XS_TIMEOUT; 1058 break; 1059 1060 default: 1061 panic("atascsi_disk_write_same_16_done: " 1062 "unexpected ata_xfer state (%d)", xa->state); 1063 } 1064 1065 scsi_done(xs); 1066 } 1067 1068 void 1069 atascsi_disk_unmap(struct scsi_xfer *xs) 1070 { 1071 struct ata_xfer *xa = xs->io; 1072 struct scsi_unmap *cdb; 1073 struct scsi_unmap_data *unmap; 1074 u_int len; 1075 1076 if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb)) 1077 atascsi_done(xs, XS_DRIVER_STUFFUP); 1078 1079 cdb = (struct scsi_unmap *)xs->cmd; 1080 len = _2btol(cdb->list_len); 1081 if (xs->datalen != len || len < sizeof(*unmap)) { 1082 atascsi_done(xs, XS_DRIVER_STUFFUP); 1083 return; 1084 } 1085 1086 unmap = (struct scsi_unmap_data *)xs->data; 1087 if (_2btol(unmap->data_length) != len) { 1088 atascsi_done(xs, XS_DRIVER_STUFFUP); 1089 return; 1090 } 1091 1092 len = _2btol(unmap->desc_length); 1093 if (len != xs->datalen - sizeof(*unmap)) { 1094 atascsi_done(xs, XS_DRIVER_STUFFUP); 1095 return; 1096 } 1097 1098 if (len < sizeof(struct scsi_unmap_desc)) { 1099 /* no work, no error according to sbc3 */ 1100 atascsi_done(xs, XS_NOERROR); 1101 } 1102 1103 if (len > sizeof(struct scsi_unmap_desc) * 64) { 1104 /* more work than we advertised */ 1105 atascsi_done(xs, XS_DRIVER_STUFFUP); 1106 return; 1107 } 1108 1109 /* let's go */ 1110 if (ISSET(xs->flags, SCSI_NOSLEEP)) { 1111 task_set(&xa->task, atascsi_disk_unmap_task, xs); 1112 task_add(systq, &xa->task); 1113 } else { 1114 /* we can already sleep for memory */ 1115 atascsi_disk_unmap_task(xs); 1116 } 1117 } 1118 1119 void 1120 atascsi_disk_unmap_task(void *xxs) 1121 { 1122 struct scsi_xfer *xs = xxs; 1123 struct scsi_link *link = xs->sc_link; 1124 struct atascsi *as = link->adapter_softc; 1125 struct atascsi_port *ap; 1126 struct ata_xfer *xa = xs->io; 1127 struct ata_fis_h2d *fis; 1128 struct scsi_unmap_data *unmap; 1129 struct scsi_unmap_desc *descs, *d; 1130 u_int64_t *trims; 1131 u_int len, i; 1132 1133 trims = dma_alloc(512, PR_WAITOK | PR_ZERO); 1134 1135 ap = atascsi_lookup_port(link); 1136 unmap = (struct scsi_unmap_data *)xs->data; 1137 descs = (struct scsi_unmap_desc *)(unmap + 1); 1138 1139 len = _2btol(unmap->desc_length) / sizeof(*d); 1140 for (i = 0; i < len; i++) { 1141 d = &descs[i]; 1142 if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN) 1143 goto fail; 1144 1145 trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr), 1146 _4btol(d->logical_blocks))); 1147 } 1148 1149 xa->data = trims; 1150 xa->datalen = 512; 1151 xa->flags = ATA_F_WRITE; 1152 xa->pmp_port = ap->ap_pmp_port; 1153 xa->complete = atascsi_disk_unmap_done; 1154 xa->atascsi_private = xs; 1155 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1156 1157 fis = xa->fis; 1158 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1159 fis->command = ATA_C_DSM; 1160 fis->features = ATA_DSM_TRIM; 1161 fis->sector_count = 1; 1162 1163 ata_exec(as, xa); 1164 return; 1165 1166 fail: 1167 dma_free(xa->data, 512); 1168 atascsi_done(xs, XS_DRIVER_STUFFUP); 1169 } 1170 1171 void 1172 atascsi_disk_unmap_done(struct ata_xfer *xa) 1173 { 1174 struct scsi_xfer *xs = xa->atascsi_private; 1175 1176 dma_free(xa->data, 512); 1177 1178 switch (xa->state) { 1179 case ATA_S_COMPLETE: 1180 xs->error = XS_NOERROR; 1181 break; 1182 case ATA_S_ERROR: 1183 xs->error = XS_DRIVER_STUFFUP; 1184 break; 1185 case ATA_S_TIMEOUT: 1186 xs->error = XS_TIMEOUT; 1187 break; 1188 1189 default: 1190 panic("atascsi_disk_unmap_done: " 1191 "unexpected ata_xfer state (%d)", xa->state); 1192 } 1193 1194 scsi_done(xs); 1195 } 1196 1197 void 1198 atascsi_disk_sync(struct scsi_xfer *xs) 1199 { 1200 struct scsi_link *link = xs->sc_link; 1201 struct atascsi *as = link->adapter_softc; 1202 struct atascsi_port *ap; 1203 struct ata_xfer *xa = xs->io; 1204 1205 if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) { 1206 atascsi_done(xs, XS_DRIVER_STUFFUP); 1207 return; 1208 } 1209 1210 ap = atascsi_lookup_port(link); 1211 xa->datalen = 0; 1212 xa->flags = ATA_F_READ; 1213 xa->complete = atascsi_disk_sync_done; 1214 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1215 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1216 xa->atascsi_private = xs; 1217 xa->pmp_port = ap->ap_pmp_port; 1218 if (xs->flags & SCSI_POLL) 1219 xa->flags |= ATA_F_POLL; 1220 1221 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1222 xa->fis->command = ATA_C_FLUSH_CACHE; 1223 xa->fis->device = 0; 1224 1225 ata_exec(as, xa); 1226 } 1227 1228 void 1229 atascsi_disk_sync_done(struct ata_xfer *xa) 1230 { 1231 struct scsi_xfer *xs = xa->atascsi_private; 1232 1233 switch (xa->state) { 1234 case ATA_S_COMPLETE: 1235 xs->error = XS_NOERROR; 1236 break; 1237 1238 case ATA_S_ERROR: 1239 case ATA_S_TIMEOUT: 1240 printf("atascsi_disk_sync_done: %s\n", 1241 xa->state == ATA_S_TIMEOUT ? "timeout" : "error"); 1242 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1243 XS_DRIVER_STUFFUP); 1244 break; 1245 1246 default: 1247 panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)", 1248 xa->state); 1249 } 1250 1251 scsi_done(xs); 1252 } 1253 1254 u_int64_t 1255 ata_identify_blocks(struct ata_identify *id) 1256 { 1257 u_int64_t blocks = 0; 1258 int i; 1259 1260 if (letoh16(id->cmdset83) & 0x0400) { 1261 /* LBA48 feature set supported */ 1262 for (i = 3; i >= 0; --i) { 1263 blocks <<= 16; 1264 blocks += letoh16(id->addrsecxt[i]); 1265 } 1266 } else { 1267 blocks = letoh16(id->addrsec[1]); 1268 blocks <<= 16; 1269 blocks += letoh16(id->addrsec[0]); 1270 } 1271 1272 return (blocks - 1); 1273 } 1274 1275 u_int 1276 ata_identify_blocksize(struct ata_identify *id) 1277 { 1278 u_int blocksize = 512; 1279 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1280 1281 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1282 ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) { 1283 blocksize = letoh16(id->words_lsec[1]); 1284 blocksize <<= 16; 1285 blocksize += letoh16(id->words_lsec[0]); 1286 blocksize <<= 1; 1287 } 1288 1289 return (blocksize); 1290 } 1291 1292 u_int 1293 ata_identify_block_l2p_exp(struct ata_identify *id) 1294 { 1295 u_int exponent = 0; 1296 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1297 1298 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1299 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) { 1300 exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE); 1301 } 1302 1303 return (exponent); 1304 } 1305 1306 u_int 1307 ata_identify_block_logical_align(struct ata_identify *id) 1308 { 1309 u_int align = 0; 1310 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1311 u_int16_t logical_align = letoh16(id->logical_align); 1312 1313 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1314 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) && 1315 (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID) 1316 align = logical_align & ATA_ID_LALIGN; 1317 1318 return (align); 1319 } 1320 1321 void 1322 atascsi_disk_capacity(struct scsi_xfer *xs) 1323 { 1324 struct scsi_link *link = xs->sc_link; 1325 struct atascsi_port *ap; 1326 struct scsi_read_cap_data rcd; 1327 u_int64_t capacity; 1328 1329 ap = atascsi_lookup_port(link); 1330 if (xs->cmdlen != sizeof(struct scsi_read_capacity)) { 1331 atascsi_done(xs, XS_DRIVER_STUFFUP); 1332 return; 1333 } 1334 1335 bzero(&rcd, sizeof(rcd)); 1336 capacity = ata_identify_blocks(&ap->ap_identify); 1337 if (capacity > 0xffffffff) 1338 capacity = 0xffffffff; 1339 1340 _lto4b(capacity, rcd.addr); 1341 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1342 1343 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1344 1345 atascsi_done(xs, XS_NOERROR); 1346 } 1347 1348 void 1349 atascsi_disk_capacity16(struct scsi_xfer *xs) 1350 { 1351 struct scsi_link *link = xs->sc_link; 1352 struct atascsi_port *ap; 1353 struct scsi_read_cap_data_16 rcd; 1354 u_int align; 1355 u_int16_t lowest_aligned = 0; 1356 1357 ap = atascsi_lookup_port(link); 1358 if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) { 1359 atascsi_done(xs, XS_DRIVER_STUFFUP); 1360 return; 1361 } 1362 1363 bzero(&rcd, sizeof(rcd)); 1364 1365 _lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr); 1366 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1367 rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify); 1368 align = ata_identify_block_logical_align(&ap->ap_identify); 1369 if (align > 0) 1370 lowest_aligned = (1 << rcd.logical_per_phys) - align; 1371 1372 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 1373 SET(lowest_aligned, READ_CAP_16_TPE); 1374 1375 if (ISSET(letoh16(ap->ap_identify.add_support), 1376 ATA_ID_ADD_SUPPORT_DRT)) 1377 SET(lowest_aligned, READ_CAP_16_TPRZ); 1378 } 1379 _lto2b(lowest_aligned, rcd.lowest_aligned); 1380 1381 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1382 1383 atascsi_done(xs, XS_NOERROR); 1384 } 1385 1386 int 1387 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags) 1388 { 1389 struct ata_xfer *xa = xs->io; 1390 1391 xa->data = xs->data; 1392 xa->datalen = xs->datalen; 1393 xa->timeout = xs->timeout; 1394 xa->flags = 0; 1395 if (xs->flags & SCSI_DATA_IN) 1396 xa->flags |= ATA_F_READ; 1397 if (xs->flags & SCSI_DATA_OUT) 1398 xa->flags |= ATA_F_WRITE; 1399 if (xs->flags & SCSI_POLL) 1400 xa->flags |= ATA_F_POLL; 1401 1402 switch (count_proto & ATA_PASSTHRU_PROTO_MASK) { 1403 case ATA_PASSTHRU_PROTO_NON_DATA: 1404 case ATA_PASSTHRU_PROTO_PIO_DATAIN: 1405 case ATA_PASSTHRU_PROTO_PIO_DATAOUT: 1406 xa->flags |= ATA_F_PIO; 1407 break; 1408 default: 1409 /* we dont support this yet */ 1410 return (1); 1411 } 1412 1413 xa->atascsi_private = xs; 1414 xa->complete = atascsi_passthru_done; 1415 1416 return (0); 1417 } 1418 1419 void 1420 atascsi_passthru_12(struct scsi_xfer *xs) 1421 { 1422 struct scsi_link *link = xs->sc_link; 1423 struct atascsi *as = link->adapter_softc; 1424 struct atascsi_port *ap; 1425 struct ata_xfer *xa = xs->io; 1426 struct scsi_ata_passthru_12 *cdb; 1427 struct ata_fis_h2d *fis; 1428 1429 if (xs->cmdlen != sizeof(*cdb)) { 1430 atascsi_done(xs, XS_DRIVER_STUFFUP); 1431 return; 1432 } 1433 1434 cdb = (struct scsi_ata_passthru_12 *)xs->cmd; 1435 /* validate cdb */ 1436 1437 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1438 atascsi_done(xs, XS_DRIVER_STUFFUP); 1439 return; 1440 } 1441 1442 ap = atascsi_lookup_port(link); 1443 fis = xa->fis; 1444 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1445 fis->command = cdb->command; 1446 fis->features = cdb->features; 1447 fis->lba_low = cdb->lba_low; 1448 fis->lba_mid = cdb->lba_mid; 1449 fis->lba_high = cdb->lba_high; 1450 fis->device = cdb->device; 1451 fis->sector_count = cdb->sector_count; 1452 xa->pmp_port = ap->ap_pmp_port; 1453 1454 ata_exec(as, xa); 1455 } 1456 1457 void 1458 atascsi_passthru_16(struct scsi_xfer *xs) 1459 { 1460 struct scsi_link *link = xs->sc_link; 1461 struct atascsi *as = link->adapter_softc; 1462 struct atascsi_port *ap; 1463 struct ata_xfer *xa = xs->io; 1464 struct scsi_ata_passthru_16 *cdb; 1465 struct ata_fis_h2d *fis; 1466 1467 if (xs->cmdlen != sizeof(*cdb)) { 1468 atascsi_done(xs, XS_DRIVER_STUFFUP); 1469 return; 1470 } 1471 1472 cdb = (struct scsi_ata_passthru_16 *)xs->cmd; 1473 /* validate cdb */ 1474 1475 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1476 atascsi_done(xs, XS_DRIVER_STUFFUP); 1477 return; 1478 } 1479 1480 ap = atascsi_lookup_port(link); 1481 fis = xa->fis; 1482 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1483 fis->command = cdb->command; 1484 fis->features = cdb->features[1]; 1485 fis->lba_low = cdb->lba_low[1]; 1486 fis->lba_mid = cdb->lba_mid[1]; 1487 fis->lba_high = cdb->lba_high[1]; 1488 fis->device = cdb->device; 1489 fis->lba_low_exp = cdb->lba_low[0]; 1490 fis->lba_mid_exp = cdb->lba_mid[0]; 1491 fis->lba_high_exp = cdb->lba_high[0]; 1492 fis->features_exp = cdb->features[0]; 1493 fis->sector_count = cdb->sector_count[1]; 1494 fis->sector_count_exp = cdb->sector_count[0]; 1495 xa->pmp_port = ap->ap_pmp_port; 1496 1497 ata_exec(as, xa); 1498 } 1499 1500 void 1501 atascsi_passthru_done(struct ata_xfer *xa) 1502 { 1503 struct scsi_xfer *xs = xa->atascsi_private; 1504 1505 /* 1506 * XXX need to generate sense if cdb wants it 1507 */ 1508 1509 switch (xa->state) { 1510 case ATA_S_COMPLETE: 1511 xs->error = XS_NOERROR; 1512 break; 1513 case ATA_S_ERROR: 1514 xs->error = XS_DRIVER_STUFFUP; 1515 break; 1516 case ATA_S_TIMEOUT: 1517 printf("atascsi_passthru_done, timeout\n"); 1518 xs->error = XS_TIMEOUT; 1519 break; 1520 default: 1521 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1522 xa->state); 1523 } 1524 1525 xs->resid = xa->resid; 1526 1527 scsi_done(xs); 1528 } 1529 1530 void 1531 atascsi_disk_sense(struct scsi_xfer *xs) 1532 { 1533 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1534 1535 bzero(xs->data, xs->datalen); 1536 /* check datalen > sizeof(struct scsi_sense_data)? */ 1537 sd->error_code = SSD_ERRCODE_CURRENT; 1538 sd->flags = SKEY_NO_SENSE; 1539 1540 atascsi_done(xs, XS_NOERROR); 1541 } 1542 1543 void 1544 atascsi_disk_start_stop(struct scsi_xfer *xs) 1545 { 1546 struct scsi_link *link = xs->sc_link; 1547 struct atascsi *as = link->adapter_softc; 1548 struct atascsi_port *ap; 1549 struct ata_xfer *xa = xs->io; 1550 struct scsi_start_stop *ss = (struct scsi_start_stop *)xs->cmd; 1551 1552 if (xs->cmdlen != sizeof(*ss)) { 1553 atascsi_done(xs, XS_DRIVER_STUFFUP); 1554 return; 1555 } 1556 1557 if (ss->how != SSS_STOP) { 1558 atascsi_done(xs, XS_NOERROR); 1559 return; 1560 } 1561 1562 /* 1563 * A SCSI START STOP UNIT command with the START bit set to 1564 * zero gets translated into an ATA FLUSH CACHE command 1565 * followed by an ATA STANDBY IMMEDIATE command. 1566 */ 1567 ap = atascsi_lookup_port(link); 1568 xa->datalen = 0; 1569 xa->flags = ATA_F_READ; 1570 xa->complete = atascsi_disk_start_stop_done; 1571 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1572 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1573 xa->pmp_port = ap->ap_pmp_port; 1574 xa->atascsi_private = xs; 1575 if (xs->flags & SCSI_POLL) 1576 xa->flags |= ATA_F_POLL; 1577 1578 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1579 xa->fis->command = ATA_C_FLUSH_CACHE; 1580 xa->fis->device = 0; 1581 1582 ata_exec(as, xa); 1583 } 1584 1585 void 1586 atascsi_disk_start_stop_done(struct ata_xfer *xa) 1587 { 1588 struct scsi_xfer *xs = xa->atascsi_private; 1589 struct scsi_link *link = xs->sc_link; 1590 struct atascsi *as = link->adapter_softc; 1591 struct atascsi_port *ap; 1592 1593 switch (xa->state) { 1594 case ATA_S_COMPLETE: 1595 break; 1596 1597 case ATA_S_ERROR: 1598 case ATA_S_TIMEOUT: 1599 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1600 XS_DRIVER_STUFFUP); 1601 xs->resid = xa->resid; 1602 scsi_done(xs); 1603 return; 1604 1605 default: 1606 panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)", 1607 xa->state); 1608 } 1609 1610 /* 1611 * The FLUSH CACHE command completed succesfully; now issue 1612 * the STANDBY IMMEDATE command. 1613 */ 1614 ap = atascsi_lookup_port(link); 1615 xa->datalen = 0; 1616 xa->flags = ATA_F_READ; 1617 xa->state = ATA_S_SETUP; 1618 xa->complete = atascsi_disk_cmd_done; 1619 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1620 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1621 xa->pmp_port = ap->ap_pmp_port; 1622 xa->atascsi_private = xs; 1623 if (xs->flags & SCSI_POLL) 1624 xa->flags |= ATA_F_POLL; 1625 1626 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1627 xa->fis->command = ATA_C_STANDBY_IMMED; 1628 xa->fis->device = 0; 1629 1630 ata_exec(as, xa); 1631 } 1632 1633 void 1634 atascsi_atapi_cmd(struct scsi_xfer *xs) 1635 { 1636 struct scsi_link *link = xs->sc_link; 1637 struct atascsi *as = link->adapter_softc; 1638 struct atascsi_port *ap; 1639 struct ata_xfer *xa = xs->io; 1640 struct ata_fis_h2d *fis; 1641 1642 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1643 case SCSI_DATA_IN: 1644 xa->flags = ATA_F_PACKET | ATA_F_READ; 1645 break; 1646 case SCSI_DATA_OUT: 1647 xa->flags = ATA_F_PACKET | ATA_F_WRITE; 1648 break; 1649 default: 1650 xa->flags = ATA_F_PACKET; 1651 } 1652 xa->flags |= ATA_F_GET_RFIS; 1653 1654 ap = atascsi_lookup_port(link); 1655 xa->data = xs->data; 1656 xa->datalen = xs->datalen; 1657 xa->complete = atascsi_atapi_cmd_done; 1658 xa->timeout = xs->timeout; 1659 xa->pmp_port = ap->ap_pmp_port; 1660 xa->atascsi_private = xs; 1661 if (xs->flags & SCSI_POLL) 1662 xa->flags |= ATA_F_POLL; 1663 1664 fis = xa->fis; 1665 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1666 fis->command = ATA_C_PACKET; 1667 fis->device = 0; 1668 fis->sector_count = xa->tag << 3; 1669 fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ? 1670 ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ); 1671 fis->lba_mid = 0x00; 1672 fis->lba_high = 0x20; 1673 1674 /* Copy SCSI command into ATAPI packet. */ 1675 memcpy(xa->packetcmd, xs->cmd, xs->cmdlen); 1676 1677 ata_exec(as, xa); 1678 } 1679 1680 void 1681 atascsi_atapi_cmd_done(struct ata_xfer *xa) 1682 { 1683 struct scsi_xfer *xs = xa->atascsi_private; 1684 struct scsi_sense_data *sd = &xs->sense; 1685 1686 switch (xa->state) { 1687 case ATA_S_COMPLETE: 1688 xs->error = XS_NOERROR; 1689 break; 1690 case ATA_S_ERROR: 1691 /* Return PACKET sense data */ 1692 sd->error_code = SSD_ERRCODE_CURRENT; 1693 sd->flags = (xa->rfis.error & 0xf0) >> 4; 1694 if (xa->rfis.error & 0x04) 1695 sd->flags = SKEY_ILLEGAL_REQUEST; 1696 if (xa->rfis.error & 0x02) 1697 sd->flags |= SSD_EOM; 1698 if (xa->rfis.error & 0x01) 1699 sd->flags |= SSD_ILI; 1700 xs->error = XS_SENSE; 1701 break; 1702 case ATA_S_TIMEOUT: 1703 printf("atascsi_atapi_cmd_done, timeout\n"); 1704 xs->error = XS_TIMEOUT; 1705 break; 1706 default: 1707 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1708 xa->state); 1709 } 1710 1711 xs->resid = xa->resid; 1712 1713 scsi_done(xs); 1714 } 1715 1716 void 1717 atascsi_pmp_cmd(struct scsi_xfer *xs) 1718 { 1719 switch (xs->cmd->opcode) { 1720 case REQUEST_SENSE: 1721 atascsi_pmp_sense(xs); 1722 return; 1723 case INQUIRY: 1724 atascsi_pmp_inq(xs); 1725 return; 1726 1727 case TEST_UNIT_READY: 1728 case PREVENT_ALLOW: 1729 atascsi_done(xs, XS_NOERROR); 1730 return; 1731 1732 default: 1733 atascsi_done(xs, XS_DRIVER_STUFFUP); 1734 return; 1735 } 1736 } 1737 1738 void 1739 atascsi_pmp_sense(struct scsi_xfer *xs) 1740 { 1741 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1742 1743 bzero(xs->data, xs->datalen); 1744 sd->error_code = SSD_ERRCODE_CURRENT; 1745 sd->flags = SKEY_NO_SENSE; 1746 1747 atascsi_done(xs, XS_NOERROR); 1748 } 1749 1750 void 1751 atascsi_pmp_inq(struct scsi_xfer *xs) 1752 { 1753 struct scsi_inquiry_data inq; 1754 struct scsi_inquiry *in_inq = (struct scsi_inquiry *)xs->cmd; 1755 1756 if (ISSET(in_inq->flags, SI_EVPD)) { 1757 /* any evpd pages we need to support here? */ 1758 atascsi_done(xs, XS_DRIVER_STUFFUP); 1759 return; 1760 } 1761 1762 bzero(&inq, sizeof(inq)); 1763 inq.device = 0x1E; /* "well known logical unit" seems reasonable */ 1764 inq.version = 0x05; /* SPC-3? */ 1765 inq.response_format = 2; 1766 inq.additional_length = 32; 1767 inq.flags |= SID_CmdQue; 1768 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 1769 1770 /* should use the data from atascsi_pmp_identify here? 1771 * not sure how useful the chip id is, but maybe it'd be 1772 * nice to include the number of ports. 1773 */ 1774 bcopy("Port Multiplier", inq.product, sizeof(inq.product)); 1775 bcopy(" ", inq.revision, sizeof(inq.revision)); 1776 1777 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 1778 atascsi_done(xs, XS_NOERROR); 1779 } 1780 1781 void 1782 atascsi_done(struct scsi_xfer *xs, int error) 1783 { 1784 xs->error = error; 1785 scsi_done(xs); 1786 } 1787 1788 void 1789 ata_exec(struct atascsi *as, struct ata_xfer *xa) 1790 { 1791 as->as_methods->ata_cmd(xa); 1792 } 1793 1794 void * 1795 atascsi_io_get(void *cookie) 1796 { 1797 struct atascsi_host_port *ahp = cookie; 1798 struct atascsi *as = ahp->ahp_as; 1799 struct ata_xfer *xa; 1800 1801 xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port); 1802 if (xa != NULL) 1803 xa->fis->type = ATA_FIS_TYPE_H2D; 1804 1805 return (xa); 1806 } 1807 1808 void 1809 atascsi_io_put(void *cookie, void *io) 1810 { 1811 struct atascsi_host_port *ahp = cookie; 1812 struct atascsi *as = ahp->ahp_as; 1813 struct ata_xfer *xa = io; 1814 1815 xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */ 1816 as->as_methods->ata_put_xfer(xa); 1817 } 1818 1819 void 1820 ata_polled_complete(struct ata_xfer *xa) 1821 { 1822 /* do nothing */ 1823 } 1824 1825 int 1826 ata_polled(struct ata_xfer *xa) 1827 { 1828 int rv; 1829 1830 if (!ISSET(xa->flags, ATA_F_DONE)) 1831 panic("ata_polled: xa isnt complete"); 1832 1833 switch (xa->state) { 1834 case ATA_S_COMPLETE: 1835 rv = 0; 1836 break; 1837 case ATA_S_ERROR: 1838 case ATA_S_TIMEOUT: 1839 rv = EIO; 1840 break; 1841 default: 1842 panic("ata_polled: xa state (%d)", 1843 xa->state); 1844 } 1845 1846 scsi_io_put(xa->atascsi_private, xa); 1847 1848 return (rv); 1849 } 1850 1851 void 1852 ata_complete(struct ata_xfer *xa) 1853 { 1854 SET(xa->flags, ATA_F_DONE); 1855 xa->complete(xa); 1856 } 1857 1858 void 1859 ata_swapcopy(void *src, void *dst, size_t len) 1860 { 1861 u_int16_t *s = src, *d = dst; 1862 int i; 1863 1864 len /= 2; 1865 1866 for (i = 0; i < len; i++) 1867 d[i] = swap16(s[i]); 1868 } 1869 1870 int 1871 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify) 1872 { 1873 struct atascsi *as = ap->ap_as; 1874 struct atascsi_host_port *ahp = ap->ap_host_port; 1875 struct ata_xfer *xa; 1876 1877 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1878 if (xa == NULL) 1879 panic("no free xfers on a new port"); 1880 xa->pmp_port = ap->ap_pmp_port; 1881 xa->data = identify; 1882 xa->datalen = sizeof(*identify); 1883 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1884 xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ? 1885 ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET; 1886 xa->fis->device = 0; 1887 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 1888 xa->timeout = 1000; 1889 xa->complete = ata_polled_complete; 1890 xa->atascsi_private = &ahp->ahp_iopool; 1891 ata_exec(as, xa); 1892 return (ata_polled(xa)); 1893 } 1894 1895 int 1896 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg) 1897 { 1898 struct atascsi *as = ap->ap_as; 1899 struct atascsi_host_port *ahp = ap->ap_host_port; 1900 struct ata_xfer *xa; 1901 1902 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1903 if (xa == NULL) 1904 panic("no free xfers on a new port"); 1905 xa->fis->command = ATA_C_SET_FEATURES; 1906 xa->fis->features = subcommand; 1907 xa->fis->sector_count = arg; 1908 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1909 xa->flags = ATA_F_POLL; 1910 xa->timeout = 1000; 1911 xa->complete = ata_polled_complete; 1912 xa->pmp_port = ap->ap_pmp_port; 1913 xa->atascsi_private = &ahp->ahp_iopool; 1914 ata_exec(as, xa); 1915 return (ata_polled(xa)); 1916 } 1917