1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51489Swebaker * Common Development and Distribution License (the "License"). 61489Swebaker * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 22*4451Seschrock * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens #include <sys/zfs_context.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/vdev_disk.h> 31789Sahrens #include <sys/vdev_impl.h> 32789Sahrens #include <sys/fs/zfs.h> 33789Sahrens #include <sys/zio.h> 341171Seschrock #include <sys/sunldi.h> 35789Sahrens 36789Sahrens /* 37789Sahrens * Virtual device vector for disks. 38789Sahrens */ 39789Sahrens 40789Sahrens extern ldi_ident_t zfs_li; 41789Sahrens 42789Sahrens typedef struct vdev_disk_buf { 43789Sahrens buf_t vdb_buf; 44789Sahrens zio_t *vdb_io; 45789Sahrens } vdev_disk_buf_t; 46789Sahrens 47789Sahrens static int 48789Sahrens vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) 49789Sahrens { 50789Sahrens vdev_disk_t *dvd; 511732Sbonwick struct dk_minfo dkm; 52789Sahrens int error; 53*4451Seschrock dev_t dev; 54*4451Seschrock char *physpath, *minorname; 55*4451Seschrock int otyp; 56789Sahrens 57789Sahrens /* 58789Sahrens * We must have a pathname, and it must be absolute. 59789Sahrens */ 60789Sahrens if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { 61789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 62789Sahrens return (EINVAL); 63789Sahrens } 64789Sahrens 65789Sahrens dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); 66789Sahrens 67789Sahrens /* 68789Sahrens * When opening a disk device, we want to preserve the user's original 69789Sahrens * intent. We always want to open the device by the path the user gave 70789Sahrens * us, even if it is one of multiple paths to the save device. But we 71789Sahrens * also want to be able to survive disks being removed/recabled. 72789Sahrens * Therefore the sequence of opening devices is: 73789Sahrens * 741171Seschrock * 1. Try opening the device by path. For legacy pools without the 751171Seschrock * 'whole_disk' property, attempt to fix the path by appending 's0'. 76789Sahrens * 77789Sahrens * 2. If the devid of the device matches the stored value, return 78789Sahrens * success. 79789Sahrens * 80789Sahrens * 3. Otherwise, the device may have moved. Try opening the device 81789Sahrens * by the devid instead. 82789Sahrens * 83789Sahrens */ 84789Sahrens if (vd->vdev_devid != NULL) { 85789Sahrens if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, 86789Sahrens &dvd->vd_minor) != 0) { 87789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 88789Sahrens return (EINVAL); 89789Sahrens } 90789Sahrens } 91789Sahrens 92789Sahrens error = EINVAL; /* presume failure */ 93789Sahrens 94789Sahrens if (vd->vdev_path != NULL) { 95789Sahrens ddi_devid_t devid; 96789Sahrens 971171Seschrock if (vd->vdev_wholedisk == -1ULL) { 981171Seschrock size_t len = strlen(vd->vdev_path) + 3; 991171Seschrock char *buf = kmem_alloc(len, KM_SLEEP); 1001171Seschrock ldi_handle_t lh; 1011171Seschrock 1021171Seschrock (void) snprintf(buf, len, "%ss0", vd->vdev_path); 103789Sahrens 1041171Seschrock if (ldi_open_by_name(buf, spa_mode, kcred, 1051171Seschrock &lh, zfs_li) == 0) { 1061171Seschrock spa_strfree(vd->vdev_path); 1071171Seschrock vd->vdev_path = buf; 1081171Seschrock vd->vdev_wholedisk = 1ULL; 1091171Seschrock (void) ldi_close(lh, spa_mode, kcred); 1101171Seschrock } else { 1111171Seschrock kmem_free(buf, len); 1121171Seschrock } 1131171Seschrock } 114789Sahrens 1151171Seschrock error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred, 1161171Seschrock &dvd->vd_lh, zfs_li); 117789Sahrens 118789Sahrens /* 119789Sahrens * Compare the devid to the stored value. 120789Sahrens */ 121789Sahrens if (error == 0 && vd->vdev_devid != NULL && 122789Sahrens ldi_get_devid(dvd->vd_lh, &devid) == 0) { 123789Sahrens if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { 124789Sahrens error = EINVAL; 125789Sahrens (void) ldi_close(dvd->vd_lh, spa_mode, kcred); 126789Sahrens dvd->vd_lh = NULL; 127789Sahrens } 128789Sahrens ddi_devid_free(devid); 129789Sahrens } 1301171Seschrock 1311171Seschrock /* 1321171Seschrock * If we succeeded in opening the device, but 'vdev_wholedisk' 1331171Seschrock * is not yet set, then this must be a slice. 1341171Seschrock */ 1351171Seschrock if (error == 0 && vd->vdev_wholedisk == -1ULL) 1361171Seschrock vd->vdev_wholedisk = 0; 137789Sahrens } 138789Sahrens 139789Sahrens /* 140789Sahrens * If we were unable to open by path, or the devid check fails, open by 141789Sahrens * devid instead. 142789Sahrens */ 143789Sahrens if (error != 0 && vd->vdev_devid != NULL) 144789Sahrens error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, 145789Sahrens spa_mode, kcred, &dvd->vd_lh, zfs_li); 146789Sahrens 147*4451Seschrock /* 148*4451Seschrock * If all else fails, then try opening by physical path (if available) 149*4451Seschrock * or the logical path (if we failed due to the devid check). While not 150*4451Seschrock * as reliable as the devid, this will give us something, and the higher 151*4451Seschrock * level vdev validation will prevent us from opening the wrong device. 152*4451Seschrock */ 153*4451Seschrock if (error) { 154*4451Seschrock if (vd->vdev_physpath != NULL && 155*4451Seschrock (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != ENODEV) 156*4451Seschrock error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode, 157*4451Seschrock kcred, &dvd->vd_lh, zfs_li); 158*4451Seschrock 159*4451Seschrock /* 160*4451Seschrock * Note that we don't support the legacy auto-wholedisk support 161*4451Seschrock * as above. This hasn't been used in a very long time and we 162*4451Seschrock * don't need to propagate its oddities to this edge condition. 163*4451Seschrock */ 164*4451Seschrock if (error && vd->vdev_path != NULL) 165*4451Seschrock error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred, 166*4451Seschrock &dvd->vd_lh, zfs_li); 167*4451Seschrock } 168*4451Seschrock 169789Sahrens if (error) { 170789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 171789Sahrens return (error); 172789Sahrens } 173789Sahrens 174789Sahrens /* 175*4451Seschrock * Once a device is opened, verify that the physical device path (if 176*4451Seschrock * available) is up to date. 177*4451Seschrock */ 178*4451Seschrock if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && 179*4451Seschrock ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { 180*4451Seschrock physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 181*4451Seschrock minorname = NULL; 182*4451Seschrock if (ddi_dev_pathname(dev, otyp, physpath) == 0 && 183*4451Seschrock ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && 184*4451Seschrock (vd->vdev_physpath == NULL || 185*4451Seschrock strcmp(vd->vdev_physpath, physpath) != 0)) { 186*4451Seschrock if (vd->vdev_physpath) 187*4451Seschrock spa_strfree(vd->vdev_physpath); 188*4451Seschrock (void) strlcat(physpath, ":", MAXPATHLEN); 189*4451Seschrock (void) strlcat(physpath, minorname, MAXPATHLEN); 190*4451Seschrock vd->vdev_physpath = spa_strdup(physpath); 191*4451Seschrock } 192*4451Seschrock if (minorname) 193*4451Seschrock kmem_free(minorname, strlen(minorname) + 1); 194*4451Seschrock kmem_free(physpath, MAXPATHLEN); 195*4451Seschrock } 196*4451Seschrock 197*4451Seschrock /* 198789Sahrens * Determine the actual size of the device. 199789Sahrens */ 200789Sahrens if (ldi_get_size(dvd->vd_lh, psize) != 0) { 201789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 202789Sahrens return (EINVAL); 203789Sahrens } 204789Sahrens 2051732Sbonwick /* 2061732Sbonwick * If we own the whole disk, try to enable disk write caching. 2071732Sbonwick * We ignore errors because it's OK if we can't do it. 2081732Sbonwick */ 2091489Swebaker if (vd->vdev_wholedisk == 1) { 2101732Sbonwick int wce = 1; 2111732Sbonwick (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, 2121732Sbonwick FKIOCTL, kcred, NULL); 2131732Sbonwick } 2141489Swebaker 2151732Sbonwick /* 2161732Sbonwick * Determine the device's minimum transfer size. 2171732Sbonwick * If the ioctl isn't supported, assume DEV_BSIZE. 2181732Sbonwick */ 2191732Sbonwick if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFO, (intptr_t)&dkm, 2201732Sbonwick FKIOCTL, kcred, NULL) != 0) 2211732Sbonwick dkm.dki_lbsize = DEV_BSIZE; 2221489Swebaker 2231732Sbonwick *ashift = highbit(MAX(dkm.dki_lbsize, SPA_MINBLOCKSIZE)) - 1; 2241489Swebaker 2251773Seschrock /* 2261773Seschrock * Clear the nowritecache bit, so that on a vdev_reopen() we will 2271773Seschrock * try again. 2281773Seschrock */ 2291773Seschrock vd->vdev_nowritecache = B_FALSE; 2301773Seschrock 231789Sahrens return (0); 232789Sahrens } 233789Sahrens 234789Sahrens static void 235789Sahrens vdev_disk_close(vdev_t *vd) 236789Sahrens { 237789Sahrens vdev_disk_t *dvd = vd->vdev_tsd; 238789Sahrens 239789Sahrens if (dvd == NULL) 240789Sahrens return; 241789Sahrens 242789Sahrens if (dvd->vd_minor != NULL) 243789Sahrens ddi_devid_str_free(dvd->vd_minor); 244789Sahrens 245789Sahrens if (dvd->vd_devid != NULL) 246789Sahrens ddi_devid_free(dvd->vd_devid); 247789Sahrens 248789Sahrens if (dvd->vd_lh != NULL) 249789Sahrens (void) ldi_close(dvd->vd_lh, spa_mode, kcred); 250789Sahrens 251789Sahrens kmem_free(dvd, sizeof (vdev_disk_t)); 252789Sahrens vd->vdev_tsd = NULL; 253789Sahrens } 254789Sahrens 255789Sahrens static void 256789Sahrens vdev_disk_io_intr(buf_t *bp) 257789Sahrens { 258789Sahrens vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp; 259789Sahrens zio_t *zio = vdb->vdb_io; 260789Sahrens 261789Sahrens if ((zio->io_error = geterror(bp)) == 0 && bp->b_resid != 0) 262789Sahrens zio->io_error = EIO; 263789Sahrens 264789Sahrens kmem_free(vdb, sizeof (vdev_disk_buf_t)); 265789Sahrens 266789Sahrens zio_next_stage_async(zio); 267789Sahrens } 268789Sahrens 269789Sahrens static void 270789Sahrens vdev_disk_ioctl_done(void *zio_arg, int error) 271789Sahrens { 272789Sahrens zio_t *zio = zio_arg; 273789Sahrens 274789Sahrens zio->io_error = error; 275789Sahrens 276789Sahrens zio_next_stage_async(zio); 277789Sahrens } 278789Sahrens 279789Sahrens static void 280789Sahrens vdev_disk_io_start(zio_t *zio) 281789Sahrens { 282789Sahrens vdev_t *vd = zio->io_vd; 283789Sahrens vdev_disk_t *dvd = vd->vdev_tsd; 284789Sahrens vdev_disk_buf_t *vdb; 285789Sahrens buf_t *bp; 286789Sahrens int flags, error; 287789Sahrens 288789Sahrens if (zio->io_type == ZIO_TYPE_IOCTL) { 289789Sahrens zio_vdev_io_bypass(zio); 290789Sahrens 291789Sahrens /* XXPOLICY */ 292789Sahrens if (vdev_is_dead(vd)) { 293789Sahrens zio->io_error = ENXIO; 294789Sahrens zio_next_stage_async(zio); 295789Sahrens return; 296789Sahrens } 297789Sahrens 298789Sahrens switch (zio->io_cmd) { 299789Sahrens 300789Sahrens case DKIOCFLUSHWRITECACHE: 301789Sahrens 3022885Sahrens if (zfs_nocacheflush) 3032885Sahrens break; 3042885Sahrens 3051773Seschrock if (vd->vdev_nowritecache) { 3061773Seschrock zio->io_error = ENOTSUP; 3071773Seschrock break; 3081773Seschrock } 3091773Seschrock 310789Sahrens zio->io_dk_callback.dkc_callback = vdev_disk_ioctl_done; 311789Sahrens zio->io_dk_callback.dkc_cookie = zio; 312789Sahrens 313789Sahrens error = ldi_ioctl(dvd->vd_lh, zio->io_cmd, 314789Sahrens (uintptr_t)&zio->io_dk_callback, 315789Sahrens FKIOCTL, kcred, NULL); 316789Sahrens 317789Sahrens if (error == 0) { 318789Sahrens /* 319789Sahrens * The ioctl will be done asychronously, 320789Sahrens * and will call vdev_disk_ioctl_done() 321789Sahrens * upon completion. 322789Sahrens */ 323789Sahrens return; 3241773Seschrock } else if (error == ENOTSUP) { 3251773Seschrock /* 3261773Seschrock * If we get ENOTSUP, we know that no future 3271773Seschrock * attempts will ever succeed. In this case we 3281773Seschrock * set a persistent bit so that we don't bother 3291773Seschrock * with the ioctl in the future. 3301773Seschrock */ 3311773Seschrock vd->vdev_nowritecache = B_TRUE; 332789Sahrens } 333789Sahrens zio->io_error = error; 3341773Seschrock 335789Sahrens break; 336789Sahrens 337789Sahrens default: 338789Sahrens zio->io_error = ENOTSUP; 339789Sahrens } 340789Sahrens 341789Sahrens zio_next_stage_async(zio); 342789Sahrens return; 343789Sahrens } 344789Sahrens 345789Sahrens if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 346789Sahrens return; 347789Sahrens 348789Sahrens if ((zio = vdev_queue_io(zio)) == NULL) 349789Sahrens return; 350789Sahrens 351789Sahrens flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); 352789Sahrens flags |= B_BUSY | B_NOCACHE; 353789Sahrens if (zio->io_flags & ZIO_FLAG_FAILFAST) 354789Sahrens flags |= B_FAILFAST; 355789Sahrens 356789Sahrens vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP); 357789Sahrens 358789Sahrens vdb->vdb_io = zio; 359789Sahrens bp = &vdb->vdb_buf; 360789Sahrens 361789Sahrens bioinit(bp); 362789Sahrens bp->b_flags = flags; 363789Sahrens bp->b_bcount = zio->io_size; 364789Sahrens bp->b_un.b_addr = zio->io_data; 365789Sahrens bp->b_lblkno = lbtodb(zio->io_offset); 366789Sahrens bp->b_bufsize = zio->io_size; 367789Sahrens bp->b_iodone = (int (*)())vdev_disk_io_intr; 368789Sahrens 369789Sahrens /* XXPOLICY */ 370789Sahrens error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio); 371789Sahrens if (error) { 372789Sahrens zio->io_error = error; 373789Sahrens bioerror(bp, error); 374789Sahrens bp->b_resid = bp->b_bcount; 375789Sahrens bp->b_iodone(bp); 376789Sahrens return; 377789Sahrens } 378789Sahrens 379789Sahrens error = ldi_strategy(dvd->vd_lh, bp); 380789Sahrens /* ldi_strategy() will return non-zero only on programming errors */ 381789Sahrens ASSERT(error == 0); 382789Sahrens } 383789Sahrens 384789Sahrens static void 385789Sahrens vdev_disk_io_done(zio_t *zio) 386789Sahrens { 387*4451Seschrock vdev_t *vd = zio->io_vd; 388*4451Seschrock vdev_disk_t *dvd = vd->vdev_tsd; 389*4451Seschrock int state; 390*4451Seschrock 391789Sahrens vdev_queue_io_done(zio); 392789Sahrens 393789Sahrens if (zio->io_type == ZIO_TYPE_WRITE) 394789Sahrens vdev_cache_write(zio); 395789Sahrens 3961544Seschrock if (zio_injection_enabled && zio->io_error == 0) 3971544Seschrock zio->io_error = zio_handle_device_injection(zio->io_vd, EIO); 3981544Seschrock 399*4451Seschrock /* 400*4451Seschrock * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if 401*4451Seschrock * the device has been removed. If this is the case, then we trigger an 402*4451Seschrock * asynchronous removal of the device. 403*4451Seschrock */ 404*4451Seschrock if (zio->io_error == EIO) { 405*4451Seschrock state = DKIO_NONE; 406*4451Seschrock if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state, 407*4451Seschrock FKIOCTL, kcred, NULL) == 0 && 408*4451Seschrock state != DKIO_INSERTED) { 409*4451Seschrock vd->vdev_remove_wanted = B_TRUE; 410*4451Seschrock spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 411*4451Seschrock } 412*4451Seschrock } 413*4451Seschrock 414789Sahrens zio_next_stage(zio); 415789Sahrens } 416789Sahrens 417789Sahrens vdev_ops_t vdev_disk_ops = { 418789Sahrens vdev_disk_open, 419789Sahrens vdev_disk_close, 420789Sahrens vdev_default_asize, 421789Sahrens vdev_disk_io_start, 422789Sahrens vdev_disk_io_done, 423789Sahrens NULL, 424789Sahrens VDEV_TYPE_DISK, /* name of this vdev type */ 425789Sahrens B_TRUE /* leaf vdev */ 426789Sahrens }; 427