1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 38 * 39 * Permission to use, copy, modify, and distribute this software for any 40 * purpose with or without fee is hereby granted, provided that the above 41 * copyright notice and this permission notice appear in all copies. 42 * 43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 50 * 51 * $OpenBSD: atascsi.c,v 1.64 2009/02/16 21:19:06 miod Exp $ 52 */ 53 /* 54 * Implement each SATA port as its own SCSI bus on CAM. This way we can 55 * implement future port multiplier features as individual devices on the 56 * bus. 57 * 58 * Much of the cdb<->xa conversion code was taken from OpenBSD, the rest 59 * was written natively for DragonFly. 60 */ 61 62 #include "ahci.h" 63 64 static void ahci_xpt_action(struct cam_sim *sim, union ccb *ccb); 65 static void ahci_xpt_poll(struct cam_sim *sim); 66 static void ahci_xpt_scsi_disk_io(struct ahci_port *ap, 67 struct ata_port *at, union ccb *ccb); 68 static void ahci_xpt_scsi_atapi_io(struct ahci_port *ap, 69 struct ata_port *at, union ccb *ccb); 70 static void ahci_xpt_page_inquiry(struct ahci_port *ap, 71 struct ata_port *at, union ccb *ccb); 72 73 static void ahci_ata_complete_disk_rw(struct ata_xfer *xa); 74 static void ahci_ata_complete_disk_synchronize_cache(struct ata_xfer *xa); 75 static void ahci_atapi_complete_cmd(struct ata_xfer *xa); 76 static void ahci_ata_dummy_sense(struct scsi_sense_data *sense_data); 77 static void ahci_ata_atapi_sense(struct ata_fis_d2h *rfis, 78 struct scsi_sense_data *sense_data); 79 80 static int ahci_cam_probe_disk(struct ahci_port *ap, struct ata_port *at); 81 static int ahci_cam_probe_atapi(struct ahci_port *ap, struct ata_port *at); 82 static int ahci_set_xfer(struct ahci_port *ap, struct ata_port *atx); 83 static void ahci_ata_dummy_done(struct ata_xfer *xa); 84 static void ata_fix_identify(struct ata_identify *id); 85 static void ahci_cam_rescan(struct ahci_port *ap); 86 static void ahci_strip_string(const char **basep, int *lenp); 87 88 int 89 ahci_cam_attach(struct ahci_port *ap) 90 { 91 struct cam_devq *devq; 92 struct cam_sim *sim; 93 int error; 94 int unit; 95 96 /* 97 * We want at least one ccb to be available for error processing 98 * so don't let CAM use more then ncmds - 1. 99 */ 100 unit = device_get_unit(ap->ap_sc->sc_dev); 101 if (ap->ap_sc->sc_ncmds > 1) 102 devq = cam_simq_alloc(ap->ap_sc->sc_ncmds - 1); 103 else 104 devq = cam_simq_alloc(ap->ap_sc->sc_ncmds); 105 if (devq == NULL) { 106 return (ENOMEM); 107 } 108 109 /* 110 * Give the devq enough room to run with 32 max_dev_transactions, 111 * but set the overall max tags to 1 until NCQ is negotiated. 112 */ 113 sim = cam_sim_alloc(ahci_xpt_action, ahci_xpt_poll, "ahci", 114 (void *)ap, unit, &ap->ap_sim_lock, 115 32, 1, devq); 116 cam_simq_release(devq); 117 if (sim == NULL) { 118 return (ENOMEM); 119 } 120 ap->ap_sim = sim; 121 ahci_os_unlock_port(ap); 122 lockmgr(&ap->ap_sim_lock, LK_EXCLUSIVE); 123 error = xpt_bus_register(ap->ap_sim, ap->ap_num); 124 lockmgr(&ap->ap_sim_lock, LK_RELEASE); 125 ahci_os_lock_port(ap); 126 if (error != CAM_SUCCESS) { 127 ahci_cam_detach(ap); 128 return (EINVAL); 129 } 130 ap->ap_flags |= AP_F_BUS_REGISTERED; 131 132 if (ap->ap_probe == ATA_PROBE_NEED_IDENT) 133 error = ahci_cam_probe(ap, NULL); 134 else 135 error = 0; 136 if (error) { 137 ahci_cam_detach(ap); 138 return (EIO); 139 } 140 ap->ap_flags |= AP_F_CAM_ATTACHED; 141 142 return(0); 143 } 144 145 /* 146 * The state of the port has changed. 147 * 148 * If atx is NULL the physical port has changed state. 149 * If atx is non-NULL a particular target behind a PM has changed state. 150 * 151 * If found is -1 the target state must be queued to a non-interrupt context. 152 * (only works with at == NULL). 153 * 154 * If found is 0 the target was removed. 155 * If found is 1 the target was inserted. 156 */ 157 void 158 ahci_cam_changed(struct ahci_port *ap, struct ata_port *atx, int found) 159 { 160 struct cam_path *tmppath; 161 int status; 162 int target; 163 164 target = atx ? atx->at_target : CAM_TARGET_WILDCARD; 165 166 if (ap->ap_sim == NULL) 167 return; 168 if (found == CAM_TARGET_WILDCARD) { 169 status = xpt_create_path(&tmppath, NULL, 170 cam_sim_path(ap->ap_sim), 171 target, CAM_LUN_WILDCARD); 172 if (status != CAM_REQ_CMP) 173 return; 174 ahci_cam_rescan(ap); 175 } else { 176 status = xpt_create_path(&tmppath, NULL, 177 cam_sim_path(ap->ap_sim), 178 target, 179 CAM_LUN_WILDCARD); 180 if (status != CAM_REQ_CMP) 181 return; 182 #if 0 183 /* 184 * This confuses CAM 185 */ 186 if (found) 187 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 188 else 189 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 190 #endif 191 } 192 xpt_free_path(tmppath); 193 } 194 195 void 196 ahci_cam_detach(struct ahci_port *ap) 197 { 198 int error; 199 200 if ((ap->ap_flags & AP_F_CAM_ATTACHED) == 0) 201 return; 202 lockmgr(&ap->ap_sim_lock, LK_EXCLUSIVE); 203 if (ap->ap_sim) { 204 xpt_freeze_simq(ap->ap_sim, 1); 205 } 206 if (ap->ap_flags & AP_F_BUS_REGISTERED) { 207 error = xpt_bus_deregister(cam_sim_path(ap->ap_sim)); 208 KKASSERT(error == CAM_REQ_CMP); 209 ap->ap_flags &= ~AP_F_BUS_REGISTERED; 210 } 211 if (ap->ap_sim) { 212 cam_sim_free(ap->ap_sim); 213 ap->ap_sim = NULL; 214 } 215 lockmgr(&ap->ap_sim_lock, LK_RELEASE); 216 ap->ap_flags &= ~AP_F_CAM_ATTACHED; 217 } 218 219 /* 220 * Once the AHCI port has been attached we need to probe for a device or 221 * devices on the port and setup various options. 222 * 223 * If at is NULL we are probing the direct-attached device on the port, 224 * which may or may not be a port multiplier. 225 */ 226 int 227 ahci_cam_probe(struct ahci_port *ap, struct ata_port *atx) 228 { 229 struct ata_port *at; 230 struct ata_xfer *xa; 231 u_int64_t capacity; 232 u_int64_t capacity_bytes; 233 int model_len; 234 int firmware_len; 235 int serial_len; 236 int error; 237 int devncqdepth; 238 int i; 239 const char *model_id; 240 const char *firmware_id; 241 const char *serial_id; 242 const char *wcstr; 243 const char *rastr; 244 const char *scstr; 245 const char *type; 246 247 error = EIO; 248 249 /* 250 * Delayed CAM attachment for initial probe, sim may be NULL 251 */ 252 if (ap->ap_sim == NULL) 253 return(0); 254 255 /* 256 * A NULL atx indicates a probe of the directly connected device. 257 * A non-NULL atx indicates a device connected via a port multiplier. 258 * We need to preserve atx for calls to ahci_ata_get_xfer(). 259 * 260 * at is always non-NULL. For directly connected devices we supply 261 * an (at) pointing to target 0. 262 */ 263 if (atx == NULL) { 264 at = ap->ap_ata[0]; /* direct attached - device 0 */ 265 if (ap->ap_type == ATA_PORT_T_PM) { 266 kprintf("%s: Found Port Multiplier\n", 267 ATANAME(ap, atx)); 268 return (0); 269 } 270 at->at_type = ap->ap_type; 271 } else { 272 at = atx; 273 if (atx->at_type == ATA_PORT_T_PM) { 274 kprintf("%s: Bogus device, reducing port count to %d\n", 275 ATANAME(ap, atx), atx->at_target); 276 if (ap->ap_pmcount > atx->at_target) 277 ap->ap_pmcount = atx->at_target; 278 goto err; 279 } 280 } 281 if (ap->ap_type == ATA_PORT_T_NONE) 282 goto err; 283 if (at->at_type == ATA_PORT_T_NONE) 284 goto err; 285 286 /* 287 * Issue identify, saving the result 288 */ 289 xa = ahci_ata_get_xfer(ap, atx); 290 xa->complete = ahci_ata_dummy_done; 291 xa->data = &at->at_identify; 292 xa->datalen = sizeof(at->at_identify); 293 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 294 xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 295 296 switch(at->at_type) { 297 case ATA_PORT_T_DISK: 298 xa->fis->command = ATA_C_IDENTIFY; 299 type = "DISK"; 300 break; 301 case ATA_PORT_T_ATAPI: 302 xa->fis->command = ATA_C_ATAPI_IDENTIFY; 303 xa->flags |= ATA_F_AUTOSENSE; 304 type = "ATAPI"; 305 break; 306 default: 307 xa->fis->command = ATA_C_ATAPI_IDENTIFY; 308 type = "UNKNOWN(ATAPI?)"; 309 break; 310 } 311 xa->fis->features = 0; 312 xa->fis->device = 0; 313 xa->timeout = 1000; 314 315 if (ahci_ata_cmd(xa) != ATA_S_COMPLETE) { 316 kprintf("%s: Detected %s device but unable to IDENTIFY\n", 317 ATANAME(ap, atx), type); 318 ahci_ata_put_xfer(xa); 319 goto err; 320 } 321 ahci_ata_put_xfer(xa); 322 323 ata_fix_identify(&at->at_identify); 324 325 /* 326 * Read capacity using SATA probe info. 327 */ 328 if (le16toh(at->at_identify.cmdset83) & 0x0400) { 329 /* LBA48 feature set supported */ 330 capacity = 0; 331 for (i = 3; i >= 0; --i) { 332 capacity <<= 16; 333 capacity += 334 le16toh(at->at_identify.addrsecxt[i]); 335 } 336 } else { 337 capacity = le16toh(at->at_identify.addrsec[1]); 338 capacity <<= 16; 339 capacity += le16toh(at->at_identify.addrsec[0]); 340 } 341 if (capacity == 0) 342 capacity = 1024 * 1024 / 512; 343 at->at_capacity = capacity; 344 if (atx == NULL) 345 ap->ap_probe = ATA_PROBE_GOOD; 346 347 capacity_bytes = capacity * 512; 348 349 /* 350 * Negotiate NCQ, throw away any ata_xfer's beyond the negotiated 351 * number of slots and limit the number of CAM ccb's to one less 352 * so we always have a slot available for recovery. 353 * 354 * NCQ is not used if ap_ncqdepth is 1 or the host controller does 355 * not support it, and in that case the driver can handle extra 356 * ccb's. 357 * 358 * NCQ is currently used only with direct-attached disks. It is 359 * not used with port multipliers or direct-attached ATAPI devices. 360 * 361 * Remember at least one extra CCB needs to be reserved for the 362 * error ccb. 363 */ 364 if ((ap->ap_sc->sc_cap & AHCI_REG_CAP_SNCQ) && 365 ap->ap_type == ATA_PORT_T_DISK && 366 (le16toh(at->at_identify.satacap) & (1 << 8))) { 367 at->at_ncqdepth = (le16toh(at->at_identify.qdepth) & 0x1F) + 1; 368 devncqdepth = at->at_ncqdepth; 369 if (at->at_ncqdepth > ap->ap_sc->sc_ncmds) 370 at->at_ncqdepth = ap->ap_sc->sc_ncmds; 371 if (at->at_ncqdepth > 1) { 372 for (i = 0; i < ap->ap_sc->sc_ncmds; ++i) { 373 xa = ahci_ata_get_xfer(ap, atx); 374 if (xa->tag < at->at_ncqdepth) { 375 xa->state = ATA_S_COMPLETE; 376 ahci_ata_put_xfer(xa); 377 } 378 } 379 if (at->at_ncqdepth >= ap->ap_sc->sc_ncmds) { 380 cam_sim_set_max_tags(ap->ap_sim, 381 at->at_ncqdepth - 1); 382 } 383 } 384 } else { 385 devncqdepth = 0; 386 } 387 388 model_len = sizeof(at->at_identify.model); 389 model_id = at->at_identify.model; 390 ahci_strip_string(&model_id, &model_len); 391 392 firmware_len = sizeof(at->at_identify.firmware); 393 firmware_id = at->at_identify.firmware; 394 ahci_strip_string(&firmware_id, &firmware_len); 395 396 serial_len = sizeof(at->at_identify.serial); 397 serial_id = at->at_identify.serial; 398 ahci_strip_string(&serial_id, &serial_len); 399 400 /* 401 * Generate informatiive strings. 402 * 403 * NOTE: We do not automatically set write caching, lookahead, 404 * or the security state for ATAPI devices. 405 */ 406 if (at->at_identify.cmdset82 & ATA_IDENTIFY_WRITECACHE) { 407 if (at->at_identify.features85 & ATA_IDENTIFY_WRITECACHE) 408 wcstr = "enabled"; 409 else if (at->at_type == ATA_PORT_T_ATAPI) 410 wcstr = "disabled"; 411 else 412 wcstr = "enabling"; 413 } else { 414 wcstr = "notsupp"; 415 } 416 417 if (at->at_identify.cmdset82 & ATA_IDENTIFY_LOOKAHEAD) { 418 if (at->at_identify.features85 & ATA_IDENTIFY_LOOKAHEAD) 419 rastr = "enabled"; 420 else if (at->at_type == ATA_PORT_T_ATAPI) 421 rastr = "disabled"; 422 else 423 rastr = "enabling"; 424 } else { 425 rastr = "notsupp"; 426 } 427 428 if (at->at_identify.cmdset82 & ATA_IDENTIFY_SECURITY) { 429 if (at->at_identify.securestatus & ATA_SECURE_FROZEN) 430 scstr = "frozen"; 431 else if (at->at_type == ATA_PORT_T_ATAPI) 432 scstr = "unfrozen"; 433 else if (AhciNoFeatures & (1 << ap->ap_num)) 434 scstr = "<disabled>"; 435 else 436 scstr = "freezing"; 437 } else { 438 scstr = "notsupp"; 439 } 440 441 kprintf("%s: Found %s \"%*.*s %*.*s\" serial=\"%*.*s\"\n" 442 "%s: tags=%d/%d satacap=%04x satafea=%04x NCQ=%s " 443 "capacity=%lld.%02dMB\n", 444 445 ATANAME(ap, atx), 446 type, 447 model_len, model_len, model_id, 448 firmware_len, firmware_len, firmware_id, 449 serial_len, serial_len, serial_id, 450 451 ATANAME(ap, atx), 452 devncqdepth, ap->ap_sc->sc_ncmds, 453 at->at_identify.satacap, 454 at->at_identify.satafsup, 455 (at->at_ncqdepth > 1 ? "YES" : "NO"), 456 (long long)capacity_bytes / (1024 * 1024), 457 (int)(capacity_bytes % (1024 * 1024)) * 100 / (1024 * 1024) 458 ); 459 kprintf("%s: f85=%04x f86=%04x f87=%04x WC=%s RA=%s SEC=%s\n", 460 ATANAME(ap, atx), 461 at->at_identify.features85, 462 at->at_identify.features86, 463 at->at_identify.features87, 464 wcstr, 465 rastr, 466 scstr 467 ); 468 469 /* 470 * Additional type-specific probing 471 */ 472 switch(at->at_type) { 473 case ATA_PORT_T_DISK: 474 error = ahci_cam_probe_disk(ap, atx); 475 break; 476 case ATA_PORT_T_ATAPI: 477 error = ahci_cam_probe_atapi(ap, atx); 478 break; 479 default: 480 error = EIO; 481 break; 482 } 483 err: 484 if (error) { 485 at->at_probe = ATA_PROBE_FAILED; 486 if (atx == NULL) 487 ap->ap_probe = at->at_probe; 488 } else { 489 at->at_probe = ATA_PROBE_GOOD; 490 if (atx == NULL) 491 ap->ap_probe = at->at_probe; 492 } 493 return (error); 494 } 495 496 /* 497 * DISK-specific probe after initial ident 498 */ 499 static int 500 ahci_cam_probe_disk(struct ahci_port *ap, struct ata_port *atx) 501 { 502 struct ata_port *at; 503 struct ata_xfer *xa; 504 505 at = atx ? atx : ap->ap_ata[0]; 506 507 /* 508 * Set dummy xfer mode 509 */ 510 ahci_set_xfer(ap, atx); 511 512 /* 513 * Enable write cache if supported 514 * 515 * NOTE: "WD My Book" external disk devices have a very poor 516 * daughter board between the the ESATA and the HD. Sending 517 * any ATA_C_SET_FEATURES commands will break the hardware port 518 * with a fatal protocol error. However, this device also 519 * indicates that WRITECACHE is already on and READAHEAD is 520 * not supported so we avoid the issue. 521 */ 522 if ((at->at_identify.cmdset82 & ATA_IDENTIFY_WRITECACHE) && 523 (at->at_identify.features85 & ATA_IDENTIFY_WRITECACHE) == 0) { 524 xa = ahci_ata_get_xfer(ap, atx); 525 xa->complete = ahci_ata_dummy_done; 526 xa->fis->command = ATA_C_SET_FEATURES; 527 xa->fis->features = ATA_SF_WRITECACHE_EN; 528 /* xa->fis->features = ATA_SF_LOOKAHEAD_EN; */ 529 xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 530 xa->fis->device = 0; 531 xa->flags = ATA_F_PIO | ATA_F_POLL; 532 xa->timeout = 1000; 533 xa->datalen = 0; 534 if (ahci_ata_cmd(xa) == ATA_S_COMPLETE) 535 at->at_features |= ATA_PORT_F_WCACHE; 536 else 537 kprintf("%s: Unable to enable write-caching\n", 538 ATANAME(ap, atx)); 539 ahci_ata_put_xfer(xa); 540 } 541 542 /* 543 * Enable readahead if supported 544 */ 545 if ((at->at_identify.cmdset82 & ATA_IDENTIFY_LOOKAHEAD) && 546 (at->at_identify.features85 & ATA_IDENTIFY_LOOKAHEAD) == 0) { 547 xa = ahci_ata_get_xfer(ap, atx); 548 xa->complete = ahci_ata_dummy_done; 549 xa->fis->command = ATA_C_SET_FEATURES; 550 xa->fis->features = ATA_SF_LOOKAHEAD_EN; 551 xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 552 xa->fis->device = 0; 553 xa->flags = ATA_F_PIO | ATA_F_POLL; 554 xa->timeout = 1000; 555 xa->datalen = 0; 556 if (ahci_ata_cmd(xa) == ATA_S_COMPLETE) 557 at->at_features |= ATA_PORT_F_RAHEAD; 558 else 559 kprintf("%s: Unable to enable read-ahead\n", 560 ATANAME(ap, atx)); 561 ahci_ata_put_xfer(xa); 562 } 563 564 /* 565 * FREEZE LOCK the device so malicious users can't lock it on us. 566 * As there is no harm in issuing this to devices that don't 567 * support the security feature set we just send it, and don't bother 568 * checking if the device sends a command abort to tell us it doesn't 569 * support it 570 */ 571 if ((at->at_identify.cmdset82 & ATA_IDENTIFY_SECURITY) && 572 (at->at_identify.securestatus & ATA_SECURE_FROZEN) == 0 && 573 (AhciNoFeatures & (1 << ap->ap_num)) == 0) { 574 xa = ahci_ata_get_xfer(ap, atx); 575 xa->complete = ahci_ata_dummy_done; 576 xa->fis->command = ATA_C_SEC_FREEZE_LOCK; 577 xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 578 xa->flags = ATA_F_PIO | ATA_F_POLL; 579 xa->timeout = 1000; 580 xa->datalen = 0; 581 if (ahci_ata_cmd(xa) == ATA_S_COMPLETE) 582 at->at_features |= ATA_PORT_F_FRZLCK; 583 else 584 kprintf("%s: Unable to set security freeze\n", 585 ATANAME(ap, atx)); 586 ahci_ata_put_xfer(xa); 587 } 588 589 return (0); 590 } 591 592 /* 593 * ATAPI-specific probe after initial ident 594 */ 595 static int 596 ahci_cam_probe_atapi(struct ahci_port *ap, struct ata_port *atx) 597 { 598 ahci_set_xfer(ap, atx); 599 return(0); 600 } 601 602 /* 603 * Setting the transfer mode is irrelevant for the SATA transport 604 * but some (atapi) devices seem to need it anyway. In addition 605 * if we are running through a SATA->PATA converter for some reason 606 * beyond my comprehension we might have to set the mode. 607 * 608 * We only support DMA modes for SATA attached devices, so don't bother 609 * with legacy modes. 610 */ 611 static int 612 ahci_set_xfer(struct ahci_port *ap, struct ata_port *atx) 613 { 614 struct ata_port *at; 615 struct ata_xfer *xa; 616 u_int16_t mode; 617 u_int16_t mask; 618 619 at = atx ? atx : ap->ap_ata[0]; 620 621 /* 622 * Figure out the supported UDMA mode. Ignore other legacy modes. 623 */ 624 mask = le16toh(at->at_identify.ultradma); 625 if ((mask & 0xFF) == 0 || mask == 0xFFFF) 626 return(0); 627 mask &= 0xFF; 628 mode = 0x4F; 629 while ((mask & 0x8000) == 0) { 630 mask <<= 1; 631 --mode; 632 } 633 634 /* 635 * SATA atapi devices often still report a dma mode, even though 636 * it is irrelevant for SATA transport. It is also possible that 637 * we are running through a SATA->PATA converter and seeing the 638 * PATA dma mode. 639 * 640 * In this case the device may require a (dummy) SETXFER to be 641 * sent before it will work properly. 642 */ 643 xa = ahci_ata_get_xfer(ap, atx); 644 xa->complete = ahci_ata_dummy_done; 645 xa->fis->command = ATA_C_SET_FEATURES; 646 xa->fis->features = ATA_SF_SETXFER; 647 xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 648 xa->fis->sector_count = mode; 649 xa->flags = ATA_F_PIO | ATA_F_POLL; 650 xa->timeout = 1000; 651 xa->datalen = 0; 652 if (ahci_ata_cmd(xa) != ATA_S_COMPLETE) { 653 kprintf("%s: Unable to set dummy xfer mode \n", 654 ATANAME(ap, atx)); 655 } else if (bootverbose) { 656 kprintf("%s: Set dummy xfer mode to %02x\n", 657 ATANAME(ap, atx), mode); 658 } 659 ahci_ata_put_xfer(xa); 660 return(0); 661 } 662 663 /* 664 * Fix byte ordering so buffers can be accessed as 665 * strings. 666 */ 667 static void 668 ata_fix_identify(struct ata_identify *id) 669 { 670 u_int16_t *swap; 671 int i; 672 673 swap = (u_int16_t *)id->serial; 674 for (i = 0; i < sizeof(id->serial) / sizeof(u_int16_t); i++) 675 swap[i] = bswap16(swap[i]); 676 677 swap = (u_int16_t *)id->firmware; 678 for (i = 0; i < sizeof(id->firmware) / sizeof(u_int16_t); i++) 679 swap[i] = bswap16(swap[i]); 680 681 swap = (u_int16_t *)id->model; 682 for (i = 0; i < sizeof(id->model) / sizeof(u_int16_t); i++) 683 swap[i] = bswap16(swap[i]); 684 } 685 686 /* 687 * Dummy done callback for xa. 688 */ 689 static void 690 ahci_ata_dummy_done(struct ata_xfer *xa) 691 { 692 } 693 694 /* 695 * Use an engineering request to initiate a target scan for devices 696 * behind a port multiplier. 697 * 698 * An asynchronous bus scan is used to avoid reentrancy issues. 699 */ 700 static void 701 ahci_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) 702 { 703 struct ahci_port *ap = ccb->ccb_h.sim_priv.entries[0].ptr; 704 705 if (ccb->ccb_h.func_code == XPT_SCAN_BUS) { 706 ap->ap_flags &= ~AP_F_SCAN_RUNNING; 707 if (ap->ap_flags & AP_F_SCAN_REQUESTED) { 708 ap->ap_flags &= ~AP_F_SCAN_REQUESTED; 709 ahci_cam_rescan(ap); 710 } 711 ap->ap_flags |= AP_F_SCAN_COMPLETED; 712 wakeup(&ap->ap_flags); 713 } 714 xpt_free_ccb(ccb); 715 } 716 717 static void 718 ahci_cam_rescan(struct ahci_port *ap) 719 { 720 struct cam_path *path; 721 union ccb *ccb; 722 int status; 723 int i; 724 725 if (ap->ap_flags & AP_F_SCAN_RUNNING) { 726 ap->ap_flags |= AP_F_SCAN_REQUESTED; 727 return; 728 } 729 ap->ap_flags |= AP_F_SCAN_RUNNING; 730 for (i = 0; i < AHCI_MAX_PMPORTS; ++i) { 731 ap->ap_ata[i]->at_features |= ATA_PORT_F_RESCAN; 732 } 733 734 status = xpt_create_path(&path, xpt_periph, cam_sim_path(ap->ap_sim), 735 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 736 if (status != CAM_REQ_CMP) 737 return; 738 739 ccb = xpt_alloc_ccb(); 740 xpt_setup_ccb(&ccb->ccb_h, path, 5); /* 5 = low priority */ 741 ccb->ccb_h.func_code = XPT_ENG_EXEC; 742 ccb->ccb_h.cbfcnp = ahci_cam_rescan_callback; 743 ccb->ccb_h.sim_priv.entries[0].ptr = ap; 744 ccb->crcn.flags = CAM_FLAG_NONE; 745 xpt_action_async(ccb); 746 } 747 748 static void 749 ahci_xpt_rescan(struct ahci_port *ap) 750 { 751 struct cam_path *path; 752 union ccb *ccb; 753 int status; 754 755 status = xpt_create_path(&path, xpt_periph, cam_sim_path(ap->ap_sim), 756 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 757 if (status != CAM_REQ_CMP) 758 return; 759 760 ccb = xpt_alloc_ccb(); 761 xpt_setup_ccb(&ccb->ccb_h, path, 5); /* 5 = low priority */ 762 ccb->ccb_h.func_code = XPT_SCAN_BUS; 763 ccb->ccb_h.cbfcnp = ahci_cam_rescan_callback; 764 ccb->ccb_h.sim_priv.entries[0].ptr = ap; 765 ccb->crcn.flags = CAM_FLAG_NONE; 766 xpt_action_async(ccb); 767 } 768 769 /* 770 * Action function - dispatch command 771 */ 772 static 773 void 774 ahci_xpt_action(struct cam_sim *sim, union ccb *ccb) 775 { 776 struct ahci_port *ap; 777 struct ata_port *at, *atx; 778 struct ccb_hdr *ccbh; 779 int unit; 780 781 /* XXX lock */ 782 ap = cam_sim_softc(sim); 783 atx = NULL; 784 KKASSERT(ap != NULL); 785 ccbh = &ccb->ccb_h; 786 unit = cam_sim_unit(sim); 787 788 /* 789 * Early failure checks. These checks do not apply to XPT_PATH_INQ, 790 * otherwise the bus rescan will not remove the dead devices when 791 * unplugging a PM. 792 * 793 * For non-wildcards we have one target (0) and one lun (0), 794 * unless we have a port multiplier. 795 * 796 * A wildcard target indicates only the general bus is being 797 * probed. 798 * 799 * Calculate at and atx. at is always non-NULL. atx is only 800 * NULL for direct-attached devices. It will be non-NULL for 801 * devices behind a port multiplier. 802 * 803 * XXX What do we do with a LUN wildcard? 804 */ 805 if (ccbh->target_id != CAM_TARGET_WILDCARD && 806 ccbh->func_code != XPT_PATH_INQ) { 807 if (ap->ap_type == ATA_PORT_T_NONE) { 808 ccbh->status = CAM_DEV_NOT_THERE; 809 xpt_done(ccb); 810 return; 811 } 812 if (ccbh->target_id < 0 || ccbh->target_id >= ap->ap_pmcount) { 813 ccbh->status = CAM_DEV_NOT_THERE; 814 xpt_done(ccb); 815 return; 816 } 817 at = ap->ap_ata[ccbh->target_id]; 818 if (ap->ap_type == ATA_PORT_T_PM) 819 atx = at; 820 821 if (ccbh->target_lun != CAM_LUN_WILDCARD && ccbh->target_lun) { 822 ccbh->status = CAM_DEV_NOT_THERE; 823 xpt_done(ccb); 824 return; 825 } 826 } else { 827 at = ap->ap_ata[0]; 828 } 829 830 /* 831 * Switch on the meta XPT command 832 */ 833 switch(ccbh->func_code) { 834 case XPT_ENG_EXEC: 835 /* 836 * This routine is called after a port multiplier has been 837 * probed. 838 */ 839 ccbh->status = CAM_REQ_CMP; 840 ahci_os_lock_port(ap); 841 ahci_port_state_machine(ap, 0); 842 ahci_os_unlock_port(ap); 843 xpt_done(ccb); 844 ahci_xpt_rescan(ap); 845 break; 846 case XPT_PATH_INQ: 847 /* 848 * This command always succeeds, otherwise the bus scan 849 * will not detach dead devices. 850 */ 851 ccb->cpi.version_num = 1; 852 ccb->cpi.hba_inquiry = 0; 853 ccb->cpi.target_sprt = 0; 854 ccb->cpi.hba_misc = PIM_SEQSCAN; 855 ccb->cpi.hba_eng_cnt = 0; 856 bzero(ccb->cpi.vuhba_flags, sizeof(ccb->cpi.vuhba_flags)); 857 ccb->cpi.max_target = AHCI_MAX_PMPORTS - 1; 858 ccb->cpi.max_lun = 0; 859 ccb->cpi.async_flags = 0; 860 ccb->cpi.hpath_id = 0; 861 ccb->cpi.initiator_id = AHCI_MAX_PMPORTS - 1; 862 ccb->cpi.unit_number = cam_sim_unit(sim); 863 ccb->cpi.bus_id = cam_sim_bus(sim); 864 ccb->cpi.base_transfer_speed = 150000; 865 ccb->cpi.transport = XPORT_SATA; 866 ccb->cpi.transport_version = 1; 867 ccb->cpi.protocol = PROTO_SCSI; 868 ccb->cpi.protocol_version = SCSI_REV_2; 869 870 ccbh->status = CAM_REQ_CMP; 871 if (ccbh->target_id == CAM_TARGET_WILDCARD) { 872 ahci_os_lock_port(ap); 873 ahci_port_state_machine(ap, 0); 874 ahci_os_unlock_port(ap); 875 } else { 876 switch(ahci_pread(ap, AHCI_PREG_SSTS) & 877 AHCI_PREG_SSTS_SPD) { 878 case AHCI_PREG_SSTS_SPD_GEN1: 879 ccb->cpi.base_transfer_speed = 150000; 880 break; 881 case AHCI_PREG_SSTS_SPD_GEN2: 882 ccb->cpi.base_transfer_speed = 300000; 883 break; 884 case AHCI_PREG_SSTS_SPD_GEN3: 885 ccb->cpi.base_transfer_speed = 600000; 886 break; 887 default: 888 /* unknown */ 889 ccb->cpi.base_transfer_speed = 1000; 890 break; 891 } 892 #if 0 893 if (ap->ap_type == ATA_PORT_T_NONE) 894 ccbh->status = CAM_DEV_NOT_THERE; 895 #endif 896 } 897 xpt_done(ccb); 898 break; 899 case XPT_RESET_DEV: 900 ahci_os_lock_port(ap); 901 if (ap->ap_type == ATA_PORT_T_NONE) { 902 ccbh->status = CAM_DEV_NOT_THERE; 903 } else { 904 ahci_port_reset(ap, atx, 0); 905 ccbh->status = CAM_REQ_CMP; 906 } 907 ahci_os_unlock_port(ap); 908 xpt_done(ccb); 909 break; 910 case XPT_RESET_BUS: 911 ahci_os_lock_port(ap); 912 ahci_port_reset(ap, NULL, 1); 913 ahci_os_unlock_port(ap); 914 ccbh->status = CAM_REQ_CMP; 915 xpt_done(ccb); 916 break; 917 case XPT_SET_TRAN_SETTINGS: 918 ccbh->status = CAM_FUNC_NOTAVAIL; 919 xpt_done(ccb); 920 break; 921 case XPT_GET_TRAN_SETTINGS: 922 ccb->cts.protocol = PROTO_SCSI; 923 ccb->cts.protocol_version = SCSI_REV_2; 924 ccb->cts.transport = XPORT_SATA; 925 ccb->cts.transport_version = XPORT_VERSION_UNSPECIFIED; 926 ccb->cts.proto_specific.valid = 0; 927 ccb->cts.xport_specific.valid = 0; 928 ccbh->status = CAM_REQ_CMP; 929 xpt_done(ccb); 930 break; 931 case XPT_CALC_GEOMETRY: 932 cam_calc_geometry(&ccb->ccg, 1); 933 xpt_done(ccb); 934 break; 935 case XPT_SCSI_IO: 936 /* 937 * Our parallel startup code might have only probed through 938 * to the IDENT, so do the last step if necessary. 939 */ 940 if (at->at_probe == ATA_PROBE_NEED_IDENT) 941 ahci_cam_probe(ap, atx); 942 if (at->at_probe != ATA_PROBE_GOOD) { 943 ccbh->status = CAM_DEV_NOT_THERE; 944 xpt_done(ccb); 945 break; 946 } 947 switch(at->at_type) { 948 case ATA_PORT_T_DISK: 949 ahci_xpt_scsi_disk_io(ap, atx, ccb); 950 break; 951 case ATA_PORT_T_ATAPI: 952 ahci_xpt_scsi_atapi_io(ap, atx, ccb); 953 break; 954 default: 955 ccbh->status = CAM_REQ_INVALID; 956 xpt_done(ccb); 957 break; 958 } 959 break; 960 case XPT_TRIM: 961 { 962 scsi_cdb_t cdb; 963 struct ccb_scsiio *csio; 964 csio = &ccb->csio; 965 cdb = (void *)((ccbh->flags & CAM_CDB_POINTER) ? 966 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); 967 cdb->generic.opcode = TRIM; 968 ahci_xpt_scsi_disk_io(ap, atx, ccb); 969 break; 970 } 971 default: 972 ccbh->status = CAM_REQ_INVALID; 973 xpt_done(ccb); 974 break; 975 } 976 } 977 978 /* 979 * Poll function. 980 * 981 * Generally this function gets called heavily when interrupts might be 982 * non-operational, during a halt/reboot or panic. 983 */ 984 static 985 void 986 ahci_xpt_poll(struct cam_sim *sim) 987 { 988 struct ahci_port *ap; 989 990 ap = cam_sim_softc(sim); 991 crit_enter(); 992 ahci_os_lock_port(ap); 993 ahci_port_intr(ap, 1); 994 ahci_os_unlock_port(ap); 995 crit_exit(); 996 } 997 998 /* 999 * Convert the SCSI command in ccb to an ata_xfer command in xa 1000 * for ATA_PORT_T_DISK operations. Set the completion function 1001 * to convert the response back, then dispatch to the OpenBSD AHCI 1002 * layer. 1003 * 1004 * AHCI DISK commands only support a limited command set, and we 1005 * fake additional commands to make it play nice with the CAM subsystem. 1006 */ 1007 static 1008 void 1009 ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, 1010 union ccb *ccb) 1011 { 1012 struct ccb_hdr *ccbh; 1013 struct ccb_scsiio *csio; 1014 struct ata_xfer *xa; 1015 struct ata_port *at; 1016 struct ata_fis_h2d *fis; 1017 struct ata_pass_12 *atp12; 1018 struct ata_pass_16 *atp16; 1019 scsi_cdb_t cdb; 1020 union scsi_data *rdata; 1021 int rdata_len; 1022 u_int64_t capacity; 1023 u_int64_t lba; 1024 u_int32_t count; 1025 1026 ccbh = &ccb->csio.ccb_h; 1027 csio = &ccb->csio; 1028 at = atx ? atx : ap->ap_ata[0]; 1029 1030 /* 1031 * XXX not passing NULL at for direct attach! 1032 */ 1033 xa = ahci_ata_get_xfer(ap, atx); 1034 rdata = (void *)csio->data_ptr; 1035 rdata_len = csio->dxfer_len; 1036 1037 /* 1038 * Build the FIS or process the csio to completion. 1039 */ 1040 cdb = (void *)((ccbh->flags & CAM_CDB_POINTER) ? 1041 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); 1042 1043 switch(cdb->generic.opcode) { 1044 case REQUEST_SENSE: 1045 /* 1046 * Auto-sense everything, so explicit sense requests 1047 * return no-sense. 1048 */ 1049 ccbh->status = CAM_SCSI_STATUS_ERROR; 1050 break; 1051 case INQUIRY: 1052 /* 1053 * Inquiry supported features 1054 * 1055 * [opcode, byte2, page_code, length, control] 1056 */ 1057 if (cdb->inquiry.byte2 & SI_EVPD) { 1058 ahci_xpt_page_inquiry(ap, at, ccb); 1059 } else { 1060 bzero(rdata, rdata_len); 1061 if (rdata_len < SHORT_INQUIRY_LENGTH) { 1062 ccbh->status = CAM_CCB_LEN_ERR; 1063 break; 1064 } 1065 if (rdata_len > sizeof(rdata->inquiry_data)) 1066 rdata_len = sizeof(rdata->inquiry_data); 1067 rdata->inquiry_data.device = T_DIRECT; 1068 rdata->inquiry_data.version = SCSI_REV_SPC2; 1069 rdata->inquiry_data.response_format = 2; 1070 rdata->inquiry_data.additional_length = 32; 1071 bcopy("SATA ", rdata->inquiry_data.vendor, 8); 1072 bcopy(at->at_identify.model, 1073 rdata->inquiry_data.product, 1074 sizeof(rdata->inquiry_data.product)); 1075 bcopy(at->at_identify.firmware, 1076 rdata->inquiry_data.revision, 1077 sizeof(rdata->inquiry_data.revision)); 1078 ccbh->status = CAM_REQ_CMP; 1079 } 1080 1081 /* 1082 * Use the vendor specific area to set the TRIM status 1083 * for scsi_da 1084 */ 1085 if (at->at_identify.support_dsm) { 1086 rdata->inquiry_data.vendor_specific1[0] = 1087 at->at_identify.support_dsm &ATA_SUPPORT_DSM_TRIM; 1088 rdata->inquiry_data.vendor_specific1[1] = 1089 at->at_identify.max_dsm_blocks; 1090 } 1091 break; 1092 case READ_CAPACITY_16: 1093 if (cdb->read_capacity_16.service_action != SRC16_SERVICE_ACTION) { 1094 ccbh->status = CAM_REQ_INVALID; 1095 break; 1096 } 1097 if (rdata_len < sizeof(rdata->read_capacity_data_16)) { 1098 ccbh->status = CAM_CCB_LEN_ERR; 1099 break; 1100 } 1101 /* fall through */ 1102 case READ_CAPACITY: 1103 if (rdata_len < sizeof(rdata->read_capacity_data)) { 1104 ccbh->status = CAM_CCB_LEN_ERR; 1105 break; 1106 } 1107 1108 capacity = at->at_capacity; 1109 1110 bzero(rdata, rdata_len); 1111 if (cdb->generic.opcode == READ_CAPACITY) { 1112 rdata_len = sizeof(rdata->read_capacity_data); 1113 if (capacity > 0xFFFFFFFFU) 1114 capacity = 0xFFFFFFFFU; 1115 bzero(&rdata->read_capacity_data, rdata_len); 1116 scsi_ulto4b((u_int32_t)capacity - 1, 1117 rdata->read_capacity_data.addr); 1118 scsi_ulto4b(512, rdata->read_capacity_data.length); 1119 } else { 1120 rdata_len = sizeof(rdata->read_capacity_data_16); 1121 bzero(&rdata->read_capacity_data_16, rdata_len); 1122 scsi_u64to8b(capacity - 1, 1123 rdata->read_capacity_data_16.addr); 1124 scsi_ulto4b(512, rdata->read_capacity_data_16.length); 1125 } 1126 ccbh->status = CAM_REQ_CMP; 1127 break; 1128 case SYNCHRONIZE_CACHE: 1129 /* 1130 * Synchronize cache. Specification says this can take 1131 * greater then 30 seconds so give it at least 45. 1132 */ 1133 fis = xa->fis; 1134 fis->flags = ATA_H2D_FLAGS_CMD; 1135 fis->command = ATA_C_FLUSH_CACHE; 1136 fis->device = 0; 1137 if (xa->timeout < 45000) 1138 xa->timeout = 45000; 1139 xa->datalen = 0; 1140 xa->flags = 0; 1141 xa->complete = ahci_ata_complete_disk_synchronize_cache; 1142 break; 1143 case TRIM: 1144 fis = xa->fis; 1145 fis->command = ATA_C_DATA_SET_MANAGEMENT; 1146 fis->features = (u_int8_t)ATA_SF_DSM_TRIM; 1147 fis->features_exp = (u_int8_t)(ATA_SF_DSM_TRIM>> 8); 1148 1149 xa->flags = ATA_F_WRITE; 1150 fis->flags = ATA_H2D_FLAGS_CMD; 1151 1152 xa->data = csio->data_ptr; 1153 xa->datalen = csio->dxfer_len; 1154 xa->timeout = ccbh->timeout*50; /* milliseconds */ 1155 1156 fis->sector_count =(u_int8_t)(xa->datalen/512); 1157 fis->sector_count_exp =(u_int8_t)((xa->datalen/512)>>8); 1158 1159 lba = 0; 1160 fis->lba_low = (u_int8_t)lba; 1161 fis->lba_mid = (u_int8_t)(lba >> 8); 1162 fis->lba_high = (u_int8_t)(lba >> 16); 1163 fis->lba_low_exp = (u_int8_t)(lba >> 24); 1164 fis->lba_mid_exp = (u_int8_t)(lba >> 32); 1165 fis->lba_high_exp = (u_int8_t)(lba >> 40); 1166 1167 fis->device = ATA_H2D_DEVICE_LBA; 1168 xa->data = csio->data_ptr; 1169 1170 xa->complete = ahci_ata_complete_disk_rw; 1171 ccbh->status = CAM_REQ_INPROG; 1172 break; 1173 case TEST_UNIT_READY: 1174 case START_STOP_UNIT: 1175 case PREVENT_ALLOW: 1176 /* 1177 * Just silently return success 1178 */ 1179 ccbh->status = CAM_REQ_CMP; 1180 rdata_len = 0; 1181 break; 1182 case ATA_PASS_12: 1183 atp12 = &cdb->ata_pass_12; 1184 fis = xa->fis; 1185 /* 1186 * Figure out the flags to be used, depending on the direction of the 1187 * CAM request. 1188 */ 1189 switch (ccbh->flags & CAM_DIR_MASK) { 1190 case CAM_DIR_IN: 1191 xa->flags = ATA_F_READ; 1192 break; 1193 case CAM_DIR_OUT: 1194 xa->flags = ATA_F_WRITE; 1195 break; 1196 default: 1197 xa->flags = 0; 1198 } 1199 xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; 1200 xa->data = csio->data_ptr; 1201 xa->datalen = csio->dxfer_len; 1202 xa->complete = ahci_ata_complete_disk_rw; 1203 xa->timeout = ccbh->timeout; 1204 1205 /* 1206 * Populate the fis from the information we received through CAM 1207 * ATA passthrough. 1208 */ 1209 fis->flags = ATA_H2D_FLAGS_CMD; /* maybe also atp12->flags ? */ 1210 fis->features = atp12->features; 1211 fis->sector_count = atp12->sector_count; 1212 fis->lba_low = atp12->lba_low; 1213 fis->lba_mid = atp12->lba_mid; 1214 fis->lba_high = atp12->lba_high; 1215 fis->device = atp12->device; /* maybe always 0? */ 1216 fis->command = atp12->command; 1217 fis->control = atp12->control; 1218 1219 /* 1220 * Mark as in progress so it is sent to the device. 1221 */ 1222 ccbh->status = CAM_REQ_INPROG; 1223 break; 1224 case ATA_PASS_16: 1225 atp16 = &cdb->ata_pass_16; 1226 fis = xa->fis; 1227 /* 1228 * Figure out the flags to be used, depending on the direction of the 1229 * CAM request. 1230 */ 1231 switch (ccbh->flags & CAM_DIR_MASK) { 1232 case CAM_DIR_IN: 1233 xa->flags = ATA_F_READ; 1234 break; 1235 case CAM_DIR_OUT: 1236 xa->flags = ATA_F_WRITE; 1237 break; 1238 default: 1239 xa->flags = 0; 1240 } 1241 xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; 1242 xa->data = csio->data_ptr; 1243 xa->datalen = csio->dxfer_len; 1244 xa->complete = ahci_ata_complete_disk_rw; 1245 xa->timeout = ccbh->timeout; 1246 1247 /* 1248 * Populate the fis from the information we received through CAM 1249 * ATA passthrough. 1250 */ 1251 fis->flags = ATA_H2D_FLAGS_CMD; /* maybe also atp16->flags ? */ 1252 fis->features = atp16->features; 1253 fis->features_exp = atp16->features_ext; 1254 fis->sector_count = atp16->sector_count; 1255 fis->sector_count_exp = atp16->sector_count_ext; 1256 fis->lba_low = atp16->lba_low; 1257 fis->lba_low_exp = atp16->lba_low_ext; 1258 fis->lba_mid = atp16->lba_mid; 1259 fis->lba_mid_exp = atp16->lba_mid_ext; 1260 fis->lba_high = atp16->lba_high; 1261 fis->lba_mid_exp = atp16->lba_mid_ext; 1262 fis->device = atp16->device; /* maybe always 0? */ 1263 fis->command = atp16->command; 1264 1265 /* 1266 * Mark as in progress so it is sent to the device. 1267 */ 1268 ccbh->status = CAM_REQ_INPROG; 1269 break; 1270 default: 1271 switch(cdb->generic.opcode) { 1272 case READ_6: 1273 lba = scsi_3btoul(cdb->rw_6.addr) & 0x1FFFFF; 1274 count = cdb->rw_6.length ? cdb->rw_6.length : 0x100; 1275 xa->flags = ATA_F_READ; 1276 break; 1277 case READ_10: 1278 lba = scsi_4btoul(cdb->rw_10.addr); 1279 count = scsi_2btoul(cdb->rw_10.length); 1280 xa->flags = ATA_F_READ; 1281 break; 1282 case READ_12: 1283 lba = scsi_4btoul(cdb->rw_12.addr); 1284 count = scsi_4btoul(cdb->rw_12.length); 1285 xa->flags = ATA_F_READ; 1286 break; 1287 case READ_16: 1288 lba = scsi_8btou64(cdb->rw_16.addr); 1289 count = scsi_4btoul(cdb->rw_16.length); 1290 xa->flags = ATA_F_READ; 1291 break; 1292 case WRITE_6: 1293 lba = scsi_3btoul(cdb->rw_6.addr) & 0x1FFFFF; 1294 count = cdb->rw_6.length ? cdb->rw_6.length : 0x100; 1295 xa->flags = ATA_F_WRITE; 1296 break; 1297 case WRITE_10: 1298 lba = scsi_4btoul(cdb->rw_10.addr); 1299 count = scsi_2btoul(cdb->rw_10.length); 1300 xa->flags = ATA_F_WRITE; 1301 break; 1302 case WRITE_12: 1303 lba = scsi_4btoul(cdb->rw_12.addr); 1304 count = scsi_4btoul(cdb->rw_12.length); 1305 xa->flags = ATA_F_WRITE; 1306 break; 1307 case WRITE_16: 1308 lba = scsi_8btou64(cdb->rw_16.addr); 1309 count = scsi_4btoul(cdb->rw_16.length); 1310 xa->flags = ATA_F_WRITE; 1311 break; 1312 default: 1313 ccbh->status = CAM_REQ_INVALID; 1314 break; 1315 } 1316 if (ccbh->status != CAM_REQ_INPROG) 1317 break; 1318 1319 fis = xa->fis; 1320 fis->flags = ATA_H2D_FLAGS_CMD; 1321 fis->lba_low = (u_int8_t)lba; 1322 fis->lba_mid = (u_int8_t)(lba >> 8); 1323 fis->lba_high = (u_int8_t)(lba >> 16); 1324 fis->device = ATA_H2D_DEVICE_LBA; 1325 1326 /* 1327 * NCQ only for direct-attached disks, do not currently 1328 * try to use NCQ with port multipliers. 1329 */ 1330 if (at->at_ncqdepth > 1 && 1331 ap->ap_type == ATA_PORT_T_DISK && 1332 (ap->ap_sc->sc_cap & AHCI_REG_CAP_SNCQ) && 1333 (ccbh->flags & CAM_POLLED) == 0) { 1334 /* 1335 * Use NCQ - always uses 48 bit addressing 1336 */ 1337 xa->flags |= ATA_F_NCQ; 1338 fis->command = (xa->flags & ATA_F_WRITE) ? 1339 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; 1340 fis->lba_low_exp = (u_int8_t)(lba >> 24); 1341 fis->lba_mid_exp = (u_int8_t)(lba >> 32); 1342 fis->lba_high_exp = (u_int8_t)(lba >> 40); 1343 fis->sector_count = xa->tag << 3; 1344 fis->features = (u_int8_t)count; 1345 fis->features_exp = (u_int8_t)(count >> 8); 1346 } else if (count > 0x100 || lba > 0x0FFFFFFFU) { 1347 /* 1348 * Use LBA48 1349 */ 1350 fis->command = (xa->flags & ATA_F_WRITE) ? 1351 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT; 1352 fis->lba_low_exp = (u_int8_t)(lba >> 24); 1353 fis->lba_mid_exp = (u_int8_t)(lba >> 32); 1354 fis->lba_high_exp = (u_int8_t)(lba >> 40); 1355 fis->sector_count = (u_int8_t)count; 1356 fis->sector_count_exp = (u_int8_t)(count >> 8); 1357 } else { 1358 /* 1359 * Use LBA 1360 * 1361 * NOTE: 256 sectors is supported, stored as 0. 1362 */ 1363 fis->command = (xa->flags & ATA_F_WRITE) ? 1364 ATA_C_WRITEDMA : ATA_C_READDMA; 1365 fis->device |= (u_int8_t)(lba >> 24) & 0x0F; 1366 fis->sector_count = (u_int8_t)count; 1367 } 1368 1369 xa->data = csio->data_ptr; 1370 xa->datalen = csio->dxfer_len; 1371 xa->complete = ahci_ata_complete_disk_rw; 1372 xa->timeout = ccbh->timeout; /* milliseconds */ 1373 #if 0 1374 if (xa->timeout > 10000) /* XXX - debug */ 1375 xa->timeout = 10000; 1376 #endif 1377 if (ccbh->flags & CAM_POLLED) 1378 xa->flags |= ATA_F_POLL; 1379 break; 1380 } 1381 1382 /* 1383 * If the request is still in progress the xa and FIS have 1384 * been set up (except for the PM target), and must be dispatched. 1385 * Otherwise the request was completed. 1386 */ 1387 if (ccbh->status == CAM_REQ_INPROG) { 1388 KKASSERT(xa->complete != NULL); 1389 xa->atascsi_private = ccb; 1390 ccb->ccb_h.sim_priv.entries[0].ptr = ap; 1391 ahci_os_lock_port(ap); 1392 xa->fis->flags |= at->at_target; 1393 ahci_ata_cmd(xa); 1394 ahci_os_unlock_port(ap); 1395 } else { 1396 ahci_ata_put_xfer(xa); 1397 xpt_done(ccb); 1398 } 1399 } 1400 1401 /* 1402 * Convert the SCSI command in ccb to an ata_xfer command in xa 1403 * for ATA_PORT_T_ATAPI operations. Set the completion function 1404 * to convert the response back, then dispatch to the OpenBSD AHCI 1405 * layer. 1406 */ 1407 static 1408 void 1409 ahci_xpt_scsi_atapi_io(struct ahci_port *ap, struct ata_port *atx, 1410 union ccb *ccb) 1411 { 1412 struct ccb_hdr *ccbh; 1413 struct ccb_scsiio *csio; 1414 struct ata_xfer *xa; 1415 struct ata_fis_h2d *fis; 1416 scsi_cdb_t cdbs; 1417 scsi_cdb_t cdbd; 1418 int flags; 1419 struct ata_port *at; 1420 1421 ccbh = &ccb->csio.ccb_h; 1422 csio = &ccb->csio; 1423 at = atx ? atx : ap->ap_ata[0]; 1424 1425 switch (ccbh->flags & CAM_DIR_MASK) { 1426 case CAM_DIR_IN: 1427 flags = ATA_F_PACKET | ATA_F_READ; 1428 break; 1429 case CAM_DIR_OUT: 1430 flags = ATA_F_PACKET | ATA_F_WRITE; 1431 break; 1432 case CAM_DIR_NONE: 1433 flags = ATA_F_PACKET; 1434 break; 1435 default: 1436 ccbh->status = CAM_REQ_INVALID; 1437 xpt_done(ccb); 1438 return; 1439 /* NOT REACHED */ 1440 } 1441 1442 /* 1443 * Special handling to get the rfis back into host memory while 1444 * still allowing the chip to run commands in parallel to 1445 * ATAPI devices behind a PM. 1446 */ 1447 flags |= ATA_F_AUTOSENSE; 1448 1449 /* 1450 * The command has to fit in the packet command buffer. 1451 */ 1452 if (csio->cdb_len < 6 || csio->cdb_len > 16) { 1453 ccbh->status = CAM_CCB_LEN_ERR; 1454 xpt_done(ccb); 1455 return; 1456 } 1457 1458 /* 1459 * Initialize the XA and FIS. It is unclear how much of 1460 * this has to mimic the equivalent ATA command. 1461 * 1462 * XXX not passing NULL at for direct attach! 1463 */ 1464 xa = ahci_ata_get_xfer(ap, atx); 1465 fis = xa->fis; 1466 1467 fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; 1468 fis->command = ATA_C_PACKET; 1469 fis->device = ATA_H2D_DEVICE_LBA; 1470 fis->sector_count = xa->tag << 3; 1471 if (flags & (ATA_F_READ | ATA_F_WRITE)) { 1472 if (flags & ATA_F_WRITE) { 1473 fis->features = ATA_H2D_FEATURES_DMA | 1474 ATA_H2D_FEATURES_DIR_WRITE; 1475 } else { 1476 fis->features = ATA_H2D_FEATURES_DMA | 1477 ATA_H2D_FEATURES_DIR_READ; 1478 } 1479 } else { 1480 fis->lba_mid = 0; 1481 fis->lba_high = 0; 1482 } 1483 fis->control = ATA_FIS_CONTROL_4BIT; 1484 1485 xa->flags = flags; 1486 xa->data = csio->data_ptr; 1487 xa->datalen = csio->dxfer_len; 1488 xa->timeout = ccbh->timeout; /* milliseconds */ 1489 1490 if (ccbh->flags & CAM_POLLED) 1491 xa->flags |= ATA_F_POLL; 1492 1493 /* 1494 * Copy the cdb to the packetcmd buffer in the FIS using a 1495 * convenient pointer in the xa. 1496 * 1497 * Zero-out any trailing bytes in case the ATAPI device cares. 1498 */ 1499 cdbs = (void *)((ccbh->flags & CAM_CDB_POINTER) ? 1500 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); 1501 bcopy(cdbs, xa->packetcmd, csio->cdb_len); 1502 if (csio->cdb_len < 16) 1503 bzero(xa->packetcmd + csio->cdb_len, 16 - csio->cdb_len); 1504 1505 #if 0 1506 kprintf("opcode %d cdb_len %d dxfer_len %d\n", 1507 cdbs->generic.opcode, 1508 csio->cdb_len, csio->dxfer_len); 1509 #endif 1510 1511 /* 1512 * Some ATAPI commands do not actually follow the SCSI standard. 1513 */ 1514 cdbd = (void *)xa->packetcmd; 1515 1516 switch(cdbd->generic.opcode) { 1517 case REQUEST_SENSE: 1518 /* 1519 * Force SENSE requests to the ATAPI sense length. 1520 * 1521 * It is unclear if this is needed or not. 1522 */ 1523 if (cdbd->sense.length == SSD_FULL_SIZE) { 1524 if (bootverbose) { 1525 kprintf("%s: Shortening sense request\n", 1526 PORTNAME(ap)); 1527 } 1528 cdbd->sense.length = offsetof(struct scsi_sense_data, 1529 extra_bytes[0]); 1530 } 1531 break; 1532 case INQUIRY: 1533 /* 1534 * Some ATAPI devices can't handle long inquiry lengths, 1535 * don't ask me why. Truncate the inquiry length. 1536 */ 1537 if (cdbd->inquiry.page_code == 0 && 1538 cdbd->inquiry.length > SHORT_INQUIRY_LENGTH) { 1539 cdbd->inquiry.length = SHORT_INQUIRY_LENGTH; 1540 } 1541 break; 1542 case READ_6: 1543 case WRITE_6: 1544 /* 1545 * Convert *_6 to *_10 commands. Most ATAPI devices 1546 * cannot handle the SCSI READ_6 and WRITE_6 commands. 1547 */ 1548 cdbd->rw_10.opcode |= 0x20; 1549 cdbd->rw_10.byte2 = 0; 1550 cdbd->rw_10.addr[0] = cdbs->rw_6.addr[0] & 0x1F; 1551 cdbd->rw_10.addr[1] = cdbs->rw_6.addr[1]; 1552 cdbd->rw_10.addr[2] = cdbs->rw_6.addr[2]; 1553 cdbd->rw_10.addr[3] = 0; 1554 cdbd->rw_10.reserved = 0; 1555 cdbd->rw_10.length[0] = 0; 1556 cdbd->rw_10.length[1] = cdbs->rw_6.length; 1557 cdbd->rw_10.control = cdbs->rw_6.control; 1558 break; 1559 default: 1560 break; 1561 } 1562 1563 /* 1564 * And dispatch 1565 */ 1566 xa->complete = ahci_atapi_complete_cmd; 1567 xa->atascsi_private = ccb; 1568 ccb->ccb_h.sim_priv.entries[0].ptr = ap; 1569 ahci_os_lock_port(ap); 1570 ahci_ata_cmd(xa); 1571 ahci_os_unlock_port(ap); 1572 } 1573 1574 /* 1575 * Simulate page inquiries for disk attachments. 1576 */ 1577 static 1578 void 1579 ahci_xpt_page_inquiry(struct ahci_port *ap, struct ata_port *at, union ccb *ccb) 1580 { 1581 union { 1582 struct scsi_vpd_supported_page_list list; 1583 struct scsi_vpd_unit_serial_number serno; 1584 struct scsi_vpd_unit_devid devid; 1585 char buf[256]; 1586 } *page; 1587 scsi_cdb_t cdb; 1588 int i; 1589 int j; 1590 int len; 1591 1592 page = kmalloc(sizeof(*page), M_DEVBUF, M_WAITOK | M_ZERO); 1593 1594 cdb = (void *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 1595 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes); 1596 1597 switch(cdb->inquiry.page_code) { 1598 case SVPD_SUPPORTED_PAGE_LIST: 1599 i = 0; 1600 page->list.device = T_DIRECT; 1601 page->list.page_code = SVPD_SUPPORTED_PAGE_LIST; 1602 page->list.list[i++] = SVPD_SUPPORTED_PAGE_LIST; 1603 page->list.list[i++] = SVPD_UNIT_SERIAL_NUMBER; 1604 page->list.list[i++] = SVPD_UNIT_DEVID; 1605 page->list.length = i; 1606 len = offsetof(struct scsi_vpd_supported_page_list, list[3]); 1607 break; 1608 case SVPD_UNIT_SERIAL_NUMBER: 1609 i = 0; 1610 j = sizeof(at->at_identify.serial); 1611 for (i = 0; i < j && at->at_identify.serial[i] == ' '; ++i) 1612 ; 1613 while (j > i && at->at_identify.serial[j-1] == ' ') 1614 --j; 1615 page->serno.device = T_DIRECT; 1616 page->serno.page_code = SVPD_UNIT_SERIAL_NUMBER; 1617 page->serno.length = j - i; 1618 bcopy(at->at_identify.serial + i, 1619 page->serno.serial_num, j - i); 1620 len = offsetof(struct scsi_vpd_unit_serial_number, 1621 serial_num[j-i]); 1622 break; 1623 case SVPD_UNIT_DEVID: 1624 /* fall through for now */ 1625 default: 1626 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1627 len = 0; 1628 break; 1629 } 1630 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 1631 if (len <= ccb->csio.dxfer_len) { 1632 ccb->ccb_h.status = CAM_REQ_CMP; 1633 bzero(ccb->csio.data_ptr, ccb->csio.dxfer_len); 1634 bcopy(page, ccb->csio.data_ptr, len); 1635 ccb->csio.resid = ccb->csio.dxfer_len - len; 1636 } else { 1637 ccb->ccb_h.status = CAM_CCB_LEN_ERR; 1638 } 1639 } 1640 kfree(page, M_DEVBUF); 1641 } 1642 1643 /* 1644 * Completion function for ATA_PORT_T_DISK cache synchronization. 1645 */ 1646 static 1647 void 1648 ahci_ata_complete_disk_synchronize_cache(struct ata_xfer *xa) 1649 { 1650 union ccb *ccb = xa->atascsi_private; 1651 struct ccb_hdr *ccbh = &ccb->ccb_h; 1652 struct ahci_port *ap = ccb->ccb_h.sim_priv.entries[0].ptr; 1653 1654 switch(xa->state) { 1655 case ATA_S_COMPLETE: 1656 ccbh->status = CAM_REQ_CMP; 1657 ccb->csio.scsi_status = SCSI_STATUS_OK; 1658 break; 1659 case ATA_S_ERROR: 1660 kprintf("%s: synchronize_cache: error\n", 1661 ATANAME(ap, xa->at)); 1662 ccbh->status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; 1663 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1664 ahci_ata_dummy_sense(&ccb->csio.sense_data); 1665 break; 1666 case ATA_S_TIMEOUT: 1667 kprintf("%s: synchronize_cache: timeout\n", 1668 ATANAME(ap, xa->at)); 1669 ccbh->status = CAM_CMD_TIMEOUT; 1670 break; 1671 default: 1672 kprintf("%s: synchronize_cache: unknown state %d\n", 1673 ATANAME(ap, xa->at), xa->state); 1674 panic("%s: Unknown state", ATANAME(ap, xa->at)); 1675 ccbh->status = CAM_REQ_CMP_ERR; 1676 break; 1677 } 1678 ahci_ata_put_xfer(xa); 1679 ahci_os_unlock_port(ap); 1680 xpt_done(ccb); 1681 ahci_os_lock_port(ap); 1682 } 1683 1684 /* 1685 * Completion function for ATA_PORT_T_DISK I/O 1686 */ 1687 static 1688 void 1689 ahci_ata_complete_disk_rw(struct ata_xfer *xa) 1690 { 1691 union ccb *ccb = xa->atascsi_private; 1692 struct ccb_hdr *ccbh = &ccb->ccb_h; 1693 struct ahci_port *ap = ccb->ccb_h.sim_priv.entries[0].ptr; 1694 1695 switch(xa->state) { 1696 case ATA_S_COMPLETE: 1697 ccbh->status = CAM_REQ_CMP; 1698 ccb->csio.scsi_status = SCSI_STATUS_OK; 1699 break; 1700 case ATA_S_ERROR: 1701 kprintf("%s: disk_rw: error\n", ATANAME(ap, xa->at)); 1702 ccbh->status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; 1703 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1704 ahci_ata_dummy_sense(&ccb->csio.sense_data); 1705 break; 1706 case ATA_S_TIMEOUT: 1707 kprintf("%s: disk_rw: timeout\n", ATANAME(ap, xa->at)); 1708 ccbh->status = CAM_CMD_TIMEOUT; 1709 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1710 ahci_ata_dummy_sense(&ccb->csio.sense_data); 1711 break; 1712 default: 1713 kprintf("%s: disk_rw: unknown state %d\n", 1714 ATANAME(ap, xa->at), xa->state); 1715 panic("%s: Unknown state", ATANAME(ap, xa->at)); 1716 ccbh->status = CAM_REQ_CMP_ERR; 1717 break; 1718 } 1719 ccb->csio.resid = xa->resid; 1720 ahci_ata_put_xfer(xa); 1721 ahci_os_unlock_port(ap); 1722 xpt_done(ccb); 1723 ahci_os_lock_port(ap); 1724 } 1725 1726 /* 1727 * Completion function for ATA_PORT_T_ATAPI I/O 1728 * 1729 * Sense data is returned in the rfis. 1730 */ 1731 static 1732 void 1733 ahci_atapi_complete_cmd(struct ata_xfer *xa) 1734 { 1735 union ccb *ccb = xa->atascsi_private; 1736 struct ccb_hdr *ccbh = &ccb->ccb_h; 1737 struct ahci_port *ap = ccb->ccb_h.sim_priv.entries[0].ptr; 1738 scsi_cdb_t cdb; 1739 1740 cdb = (void *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 1741 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes); 1742 1743 switch(xa->state) { 1744 case ATA_S_COMPLETE: 1745 ccbh->status = CAM_REQ_CMP; 1746 ccb->csio.scsi_status = SCSI_STATUS_OK; 1747 break; 1748 case ATA_S_ERROR: 1749 ccbh->status = CAM_SCSI_STATUS_ERROR; 1750 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1751 ahci_ata_atapi_sense(&xa->rfis, &ccb->csio.sense_data); 1752 break; 1753 case ATA_S_TIMEOUT: 1754 kprintf("%s: cmd %d: timeout\n", 1755 PORTNAME(ap), cdb->generic.opcode); 1756 ccbh->status = CAM_CMD_TIMEOUT; 1757 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1758 ahci_ata_dummy_sense(&ccb->csio.sense_data); 1759 break; 1760 default: 1761 kprintf("%s: cmd %d: unknown state %d\n", 1762 PORTNAME(ap), cdb->generic.opcode, xa->state); 1763 panic("%s: Unknown state", PORTNAME(ap)); 1764 ccbh->status = CAM_REQ_CMP_ERR; 1765 break; 1766 } 1767 ccb->csio.resid = xa->resid; 1768 ahci_ata_put_xfer(xa); 1769 ahci_os_unlock_port(ap); 1770 xpt_done(ccb); 1771 ahci_os_lock_port(ap); 1772 } 1773 1774 /* 1775 * Construct dummy sense data for errors on DISKs 1776 */ 1777 static 1778 void 1779 ahci_ata_dummy_sense(struct scsi_sense_data *sense_data) 1780 { 1781 sense_data->error_code = SSD_ERRCODE_VALID | SSD_CURRENT_ERROR; 1782 sense_data->segment = 0; 1783 sense_data->flags = SSD_KEY_MEDIUM_ERROR; 1784 sense_data->info[0] = 0; 1785 sense_data->info[1] = 0; 1786 sense_data->info[2] = 0; 1787 sense_data->info[3] = 0; 1788 sense_data->extra_len = 0; 1789 } 1790 1791 /* 1792 * Construct atapi sense data for errors on ATAPI 1793 * 1794 * The ATAPI sense data is stored in the passed rfis and must be converted 1795 * to SCSI sense data. 1796 */ 1797 static 1798 void 1799 ahci_ata_atapi_sense(struct ata_fis_d2h *rfis, 1800 struct scsi_sense_data *sense_data) 1801 { 1802 sense_data->error_code = SSD_ERRCODE_VALID | SSD_CURRENT_ERROR; 1803 sense_data->segment = 0; 1804 sense_data->flags = (rfis->error & 0xF0) >> 4; 1805 if (rfis->error & 0x04) 1806 sense_data->flags |= SSD_KEY_ILLEGAL_REQUEST; 1807 if (rfis->error & 0x02) 1808 sense_data->flags |= SSD_EOM; 1809 if (rfis->error & 0x01) 1810 sense_data->flags |= SSD_ILI; 1811 sense_data->info[0] = 0; 1812 sense_data->info[1] = 0; 1813 sense_data->info[2] = 0; 1814 sense_data->info[3] = 0; 1815 sense_data->extra_len = 0; 1816 } 1817 1818 static 1819 void 1820 ahci_strip_string(const char **basep, int *lenp) 1821 { 1822 const char *base = *basep; 1823 int len = *lenp; 1824 1825 while (len && (*base == 0 || *base == ' ')) { 1826 --len; 1827 ++base; 1828 } 1829 while (len && (base[len-1] == 0 || base[len-1] == ' ')) 1830 --len; 1831 *basep = base; 1832 *lenp = len; 1833 } 1834