1 /*- 2 * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.199 2006/09/14 19:12:29 sos Exp $ 27 */ 28 29 #include "opt_ata.h" 30 31 #include <sys/param.h> 32 #include <sys/bio.h> 33 #include <sys/buf.h> 34 #include <sys/bus.h> 35 #include <sys/device.h> 36 #include <sys/devicestat.h> 37 #include <sys/disk.h> 38 #include <sys/libkern.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/nata.h> 42 #include <sys/systm.h> 43 44 #include <vm/pmap.h> 45 46 #include <machine/md_var.h> 47 48 #include "ata-all.h" 49 #include "ata-disk.h" 50 #include "ata_if.h" 51 52 /* local implementation, to trigger a warning */ 53 static inline void 54 biofinish(struct bio *bp, struct bio *x __unused, int error) 55 { 56 struct buf *bbp = bp->bio_buf; 57 58 bbp->b_flags |= B_ERROR; 59 bbp->b_error = error; 60 biodone(bp); 61 } 62 63 /* device structure */ 64 static d_open_t ad_open; 65 static d_close_t ad_close; 66 static d_ioctl_t ad_ioctl; 67 static d_strategy_t ad_strategy; 68 static d_dump_t ad_dump; 69 static struct dev_ops ad_ops = { 70 { "ad", 0, D_DISK }, 71 .d_open = ad_open, 72 .d_close = ad_close, 73 .d_read = physread, 74 .d_write = physwrite, 75 .d_ioctl = ad_ioctl, 76 .d_strategy = ad_strategy, 77 .d_dump = ad_dump, 78 }; 79 80 /* prototypes */ 81 static void ad_init(device_t); 82 static void ad_done(struct ata_request *); 83 static void ad_describe(device_t dev); 84 static int ad_version(u_int16_t); 85 86 /* local vars */ 87 static MALLOC_DEFINE(M_AD, "ad_driver", "ATA disk driver"); 88 89 static int 90 ad_probe(device_t dev) 91 { 92 struct ata_device *atadev = device_get_softc(dev); 93 94 if (!(atadev->param.config & ATA_PROTO_ATAPI) || 95 (atadev->param.config == ATA_CFA_MAGIC1) || 96 (atadev->param.config == ATA_CFA_MAGIC2) || 97 (atadev->param.config == ATA_CFA_MAGIC3)) 98 return 0; 99 else 100 return ENXIO; 101 } 102 103 static int 104 ad_attach(device_t dev) 105 { 106 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 107 struct ata_device *atadev = device_get_softc(dev); 108 struct disk_info info; 109 struct ad_softc *adp; 110 cdev_t cdev; 111 u_int32_t lbasize; 112 u_int64_t lbasize48; 113 114 /* check that we have a virgin disk to attach */ 115 if (device_get_ivars(dev)) 116 return EEXIST; 117 118 adp = kmalloc(sizeof(struct ad_softc), M_AD, M_INTWAIT | M_ZERO); 119 device_set_ivars(dev, adp); 120 121 if ((atadev->param.atavalid & ATA_FLAG_54_58) && 122 atadev->param.current_heads && atadev->param.current_sectors) { 123 adp->heads = atadev->param.current_heads; 124 adp->sectors = atadev->param.current_sectors; 125 adp->total_secs = (u_int32_t)atadev->param.current_size_1 | 126 ((u_int32_t)atadev->param.current_size_2 << 16); 127 } 128 else { 129 adp->heads = atadev->param.heads; 130 adp->sectors = atadev->param.sectors; 131 adp->total_secs = atadev->param.cylinders * adp->heads * adp->sectors; 132 } 133 lbasize = (u_int32_t)atadev->param.lba_size_1 | 134 ((u_int32_t)atadev->param.lba_size_2 << 16); 135 136 /* does this device need oldstyle CHS addressing */ 137 if (!ad_version(atadev->param.version_major) || !lbasize) 138 atadev->flags |= ATA_D_USE_CHS; 139 140 /* use the 28bit LBA size if valid or bigger than the CHS mapping */ 141 if (atadev->param.cylinders == 16383 || adp->total_secs < lbasize) 142 adp->total_secs = lbasize; 143 144 /* use the 48bit LBA size if valid */ 145 lbasize48 = ((u_int64_t)atadev->param.lba_size48_1) | 146 ((u_int64_t)atadev->param.lba_size48_2 << 16) | 147 ((u_int64_t)atadev->param.lba_size48_3 << 32) | 148 ((u_int64_t)atadev->param.lba_size48_4 << 48); 149 if ((atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) && 150 lbasize48 > ATA_MAX_28BIT_LBA) 151 adp->total_secs = lbasize48; 152 153 /* init device parameters */ 154 ad_init(dev); 155 156 /* create the disk device */ 157 /* XXX TGEN Maybe use DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, 158 DEVSTAT_PRIORITY_MAX. */ 159 devstat_add_entry(&adp->stats, "ad", device_get_unit(dev), DEV_BSIZE, 160 DEVSTAT_NO_ORDERED_TAGS, 161 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE, 162 DEVSTAT_PRIORITY_DISK); 163 cdev = disk_create(device_get_unit(dev), &adp->disk, &ad_ops); 164 cdev->si_drv1 = dev; 165 if (ch->dma) 166 cdev->si_iosize_max = ch->dma->max_iosize; 167 else 168 cdev->si_iosize_max = min(MAXPHYS,64*1024); 169 adp->cdev = cdev; 170 171 bzero(&info, sizeof(info)); 172 info.d_media_blksize = DEV_BSIZE; /* mandatory */ 173 info.d_media_blocks = adp->total_secs; 174 175 info.d_secpertrack = adp->sectors; /* optional */ 176 info.d_nheads = adp->heads; 177 info.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors); 178 info.d_secpercyl = adp->sectors * adp->heads; 179 info.d_serialno = atadev->param.serial; 180 181 device_add_child(dev, "subdisk", device_get_unit(dev)); 182 bus_generic_attach(dev); 183 184 /* announce we are here */ 185 ad_describe(dev); 186 187 disk_setdiskinfo(&adp->disk, &info); 188 189 #if defined(__DragonFly__) 190 callout_init_mp(&atadev->spindown_timer); 191 #else 192 callout_init(&atadev->spindown_timer, 1); 193 #endif 194 return 0; 195 } 196 197 static int 198 ad_detach(device_t dev) 199 { 200 struct ad_softc *adp = device_get_ivars(dev); 201 struct ata_device *atadev = device_get_softc(dev); 202 device_t *children; 203 int nchildren, i; 204 205 /* check that we have a valid disk to detach */ 206 if (!adp) 207 return ENXIO; 208 209 /* destroy the power timeout */ 210 callout_drain(&atadev->spindown_timer); 211 212 /* detach & delete all children */ 213 if (!device_get_children(dev, &children, &nchildren)) { 214 for (i = 0; i < nchildren; i++) 215 if (children[i]) 216 device_delete_child(dev, children[i]); 217 kfree(children, M_TEMP); 218 } 219 220 /* detroy disk from the system so we dont get any further requests */ 221 disk_invalidate(&adp->disk); 222 disk_destroy(&adp->disk); 223 224 /* fail requests on the queue and any thats "in flight" for this device */ 225 ata_fail_requests(dev); 226 227 /* dont leave anything behind */ 228 /* disk_destroy() already took care of the dev_ops */ 229 devstat_remove_entry(&adp->stats); 230 device_set_ivars(dev, NULL); 231 kfree(adp, M_AD); 232 return 0; 233 } 234 235 static void 236 ad_shutdown(device_t dev) 237 { 238 struct ata_device *atadev = device_get_softc(dev); 239 240 if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE) 241 ata_controlcmd(dev, ATA_FLUSHCACHE, 0, 0, 0); 242 } 243 244 static int 245 ad_reinit(device_t dev) 246 { 247 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 248 struct ata_device *atadev = device_get_softc(dev); 249 250 /* if detach pending, return error */ 251 if (((atadev->unit == ATA_MASTER) && !(ch->devices & ATA_ATA_MASTER)) || 252 ((atadev->unit == ATA_SLAVE) && !(ch->devices & ATA_ATA_SLAVE))) { 253 return 1; 254 } 255 ad_init(dev); 256 return 0; 257 } 258 259 static void 260 ad_power_callback(struct ata_request *request) 261 { 262 device_printf(request->dev, "drive spun down.\n"); 263 ata_free_request(request); 264 } 265 266 static void 267 ad_spindown(void *priv) 268 { 269 device_t dev = priv; 270 struct ata_device *atadev = device_get_softc(dev); 271 struct ata_request *request; 272 273 if (!atadev->spindown) 274 return; 275 device_printf(dev, "Idle, spin down\n"); 276 atadev->spindown_state = 1; 277 if (!(request = ata_alloc_request())) { 278 device_printf(dev, "FAILURE - out of memory in ad_spindown\n"); 279 return; 280 } 281 request->dev = dev; 282 request->flags = ATA_R_CONTROL; 283 request->timeout = ATA_DEFAULT_TIMEOUT; 284 request->retries = 1; 285 request->callback = ad_power_callback; 286 request->u.ata.command = ATA_STANDBY_IMMEDIATE; 287 ata_queue_request(request); 288 } 289 290 static int 291 ad_open(struct dev_open_args *ap) 292 { 293 device_t dev = ap->a_head.a_dev->si_drv1; 294 struct ad_softc *adp = device_get_ivars(dev); 295 296 if (!adp || adp->cdev == NULL) 297 return ENXIO; 298 if(!device_is_attached(dev)) 299 return EBUSY; 300 301 return 0; 302 } 303 304 static int 305 ad_close(struct dev_close_args *ap) 306 { 307 return 0; 308 } 309 310 static int 311 ad_strategy(struct dev_strategy_args *ap) 312 { 313 device_t dev = ap->a_head.a_dev->si_drv1; 314 struct bio *bp = ap->a_bio; 315 struct buf *bbp = bp->bio_buf; 316 struct ata_device *atadev = device_get_softc(dev); 317 struct ata_request *request; 318 struct ad_softc *adp = device_get_ivars(dev); 319 320 if (atadev->spindown) 321 callout_reset(&atadev->spindown_timer, hz * atadev->spindown, 322 ad_spindown, dev); 323 324 if (!(request = ata_alloc_request())) { 325 device_printf(dev, "FAILURE - out of memory in strategy\n"); 326 biofinish(bp, NULL, ENOMEM); 327 return(0); 328 } 329 330 /* setup request */ 331 request->dev = dev; 332 request->bio = bp; 333 request->callback = ad_done; 334 if (atadev->spindown_state) { 335 device_printf(dev, "request while spun down, starting.\n"); 336 atadev->spindown_state = 0; 337 request->timeout = MAX(ATA_DEFAULT_TIMEOUT, 31); 338 } else { 339 request->timeout = ATA_DEFAULT_TIMEOUT; 340 } 341 request->retries = 2; 342 request->data = bbp->b_data; 343 request->bytecount = bbp->b_bcount; 344 /* lba is block granularity, convert byte granularity bio_offset */ 345 request->u.ata.lba = (u_int64_t)(bp->bio_offset >> DEV_BSHIFT); 346 request->u.ata.count = request->bytecount / DEV_BSIZE; 347 request->transfersize = min(bbp->b_bcount, atadev->max_iosize); 348 349 switch (bbp->b_cmd) { 350 case BUF_CMD_READ: 351 request->flags = ATA_R_READ; 352 if (atadev->mode >= ATA_DMA) { 353 request->u.ata.command = ATA_READ_DMA; 354 request->flags |= ATA_R_DMA; 355 } 356 else if (request->transfersize > DEV_BSIZE) 357 request->u.ata.command = ATA_READ_MUL; 358 else 359 request->u.ata.command = ATA_READ; 360 break; 361 case BUF_CMD_WRITE: 362 request->flags = ATA_R_WRITE; 363 if (atadev->mode >= ATA_DMA) { 364 request->u.ata.command = ATA_WRITE_DMA; 365 request->flags |= ATA_R_DMA; 366 } 367 else if (request->transfersize > DEV_BSIZE) 368 request->u.ata.command = ATA_WRITE_MUL; 369 else 370 request->u.ata.command = ATA_WRITE; 371 break; 372 case BUF_CMD_FLUSH: 373 request->u.ata.lba = 0; 374 request->u.ata.count = 0; 375 request->u.ata.feature = 0; 376 request->bytecount = 0; 377 request->transfersize = 0; 378 request->flags = ATA_R_CONTROL; 379 request->u.ata.command = ATA_FLUSHCACHE; 380 /* ATA FLUSHCACHE requests may take up to 30 sec to timeout */ 381 request->timeout = 30; 382 break; 383 default: 384 device_printf(dev, "FAILURE - unknown BUF operation\n"); 385 ata_free_request(request); 386 biofinish(bp, NULL, EIO); 387 return(0); 388 } 389 request->flags |= ATA_R_ORDERED; 390 devstat_start_transaction(&adp->stats); 391 ata_queue_request(request); 392 return(0); 393 } 394 395 static void 396 ad_done(struct ata_request *request) 397 { 398 struct ad_softc *adp = device_get_ivars(request->dev); 399 struct bio *bp = request->bio; 400 struct buf *bbp = bp->bio_buf; 401 402 /* finish up transfer */ 403 if ((bbp->b_error = request->result)) 404 bbp->b_flags |= B_ERROR; 405 bbp->b_resid = bbp->b_bcount - request->donecount; 406 devstat_end_transaction_buf(&adp->stats, bbp); 407 biodone(bp); 408 ata_free_request(request); 409 } 410 411 static int 412 ad_ioctl(struct dev_ioctl_args *ap) 413 { 414 return ata_device_ioctl(ap->a_head.a_dev->si_drv1, ap->a_cmd, ap->a_data); 415 } 416 417 static int 418 ad_dump(struct dev_dump_args *ap) 419 { 420 device_t dev = ap->a_head.a_dev->si_drv1; 421 struct ata_device *atadev = device_get_softc(dev); 422 struct ata_request request; 423 424 ata_drop_requests(dev); 425 /* 426 * 0 length means flush buffers and return 427 */ 428 if (ap->a_length == 0) { 429 /* flush buffers to media */ 430 if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE) 431 return ata_controlcmd(dev, ATA_FLUSHCACHE, 0, 0, 0); 432 else 433 return ENXIO; 434 } 435 436 bzero(&request, sizeof(struct ata_request)); 437 request.dev = dev; 438 439 request.data = ap->a_virtual; 440 request.bytecount = ap->a_length; 441 request.transfersize = min(request.bytecount, atadev->max_iosize); 442 request.flags = ATA_R_WRITE; 443 444 if (atadev->mode >= ATA_DMA) { 445 request.u.ata.command = ATA_WRITE_DMA; 446 request.flags |= ATA_DMA; 447 } else if (request.transfersize > DEV_BSIZE) 448 request.u.ata.command = ATA_WRITE_MUL; 449 else 450 request.u.ata.command = ATA_WRITE; 451 request.u.ata.lba = ap->a_offset / DEV_BSIZE; 452 request.u.ata.count = request.bytecount / DEV_BSIZE; 453 454 request.timeout = ATA_DEFAULT_TIMEOUT; 455 request.retries = 2; 456 457 ata_queue_request(&request); 458 return request.result; 459 } 460 461 static void 462 ad_init(device_t dev) 463 { 464 struct ata_device *atadev = device_get_softc(dev); 465 466 ATA_SETMODE(device_get_parent(dev), dev); 467 468 /* enable readahead caching */ 469 if (atadev->param.support.command1 & ATA_SUPPORT_LOOKAHEAD) 470 ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_ENAB_RCACHE, 0, 0); 471 472 /* enable write caching if supported and configured */ 473 if (atadev->param.support.command1 & ATA_SUPPORT_WRITECACHE) { 474 if (ata_wc) 475 ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_ENAB_WCACHE, 0, 0); 476 else 477 ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_DIS_WCACHE, 0, 0); 478 } 479 480 /* use multiple sectors/interrupt if device supports it */ 481 if (ad_version(atadev->param.version_major)) { 482 int secsperint = max(1, min(atadev->param.sectors_intr & 0xff, 16)); 483 484 if (!ata_controlcmd(dev, ATA_SET_MULTI, 0, 0, secsperint)) 485 atadev->max_iosize = secsperint * DEV_BSIZE; 486 } 487 else 488 atadev->max_iosize = DEV_BSIZE; 489 } 490 491 static void 492 ad_describe(device_t dev) 493 { 494 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 495 struct ata_device *atadev = device_get_softc(dev); 496 struct ad_softc *adp = device_get_ivars(dev); 497 u_int8_t *marker, vendor[64], product[64]; 498 499 /* try to seperate the ATA model string into vendor and model parts */ 500 if ((marker = index(atadev->param.model, ' ')) || 501 (marker = index(atadev->param.model, '-'))) { 502 int len = (marker - atadev->param.model); 503 504 strncpy(vendor, atadev->param.model, len); 505 vendor[len++] = 0; 506 strcat(vendor, " "); 507 strncpy(product, atadev->param.model + len, 40 - len); 508 vendor[40 - len] = 0; 509 } 510 else { 511 if (!strncmp(atadev->param.model, "ST", 2)) 512 strcpy(vendor, "Seagate "); 513 else if (!strncmp(atadev->param.model, "HDS", 3)) 514 strcpy(vendor, "Hitachi "); 515 else 516 strcpy(vendor, ""); 517 strncpy(product, atadev->param.model, 40); 518 } 519 520 device_printf(dev, "%juMB <%s%s %.8s> at ata%d-%s %s%s\n", 521 adp->total_secs / (1048576 / DEV_BSIZE), 522 vendor, product, atadev->param.revision, 523 device_get_unit(ch->dev), 524 (atadev->unit == ATA_MASTER) ? "master" : "slave", 525 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "", 526 ata_mode2str(atadev->mode)); 527 if (bootverbose) { 528 device_printf(dev, "%ju sectors [%juC/%dH/%dS] " 529 "%d sectors/interrupt %d depth queue\n", adp->total_secs, 530 adp->total_secs / (adp->heads * adp->sectors), 531 adp->heads, adp->sectors, atadev->max_iosize / DEV_BSIZE, 532 adp->num_tags + 1); 533 } 534 } 535 536 static int 537 ad_version(u_int16_t version) 538 { 539 int bit; 540 541 if (version == 0xffff) 542 return 0; 543 for (bit = 15; bit >= 0; bit--) 544 if (version & (1<<bit)) 545 return bit; 546 return 0; 547 } 548 549 static device_method_t ad_methods[] = { 550 /* device interface */ 551 DEVMETHOD(device_probe, ad_probe), 552 DEVMETHOD(device_attach, ad_attach), 553 DEVMETHOD(device_detach, ad_detach), 554 DEVMETHOD(device_shutdown, ad_shutdown), 555 556 /* ATA methods */ 557 DEVMETHOD(ata_reinit, ad_reinit), 558 559 DEVMETHOD_END 560 }; 561 562 static driver_t ad_driver = { 563 "ad", 564 ad_methods, 565 0, 566 }; 567 568 devclass_t ad_devclass; 569 570 DRIVER_MODULE(ad, ata, ad_driver, ad_devclass, NULL, NULL); 571 MODULE_VERSION(ad, 1); 572 MODULE_DEPEND(ad, ata, 1, 1, 1); 573