1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 23 * All rights reserved. 24 * 25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org> 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/param.h> 30 #include <sys/kernel.h> 31 #include <sys/bio.h> 32 #include <sys/buf.h> 33 #include <sys/file.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/vdev_impl.h> 37 #include <sys/vdev_os.h> 38 #include <sys/fs/zfs.h> 39 #include <sys/zio.h> 40 #include <vm/vm_page.h> 41 #include <geom/geom.h> 42 #include <geom/geom_disk.h> 43 #include <geom/geom_int.h> 44 45 #ifndef g_topology_locked 46 #define g_topology_locked() sx_xlocked(&topology_lock) 47 #endif 48 49 /* 50 * Virtual device vector for GEOM. 51 */ 52 53 static g_attrchanged_t vdev_geom_attrchanged; 54 struct g_class zfs_vdev_class = { 55 .name = "ZFS::VDEV", 56 .version = G_VERSION, 57 .attrchanged = vdev_geom_attrchanged, 58 }; 59 60 struct consumer_vdev_elem { 61 SLIST_ENTRY(consumer_vdev_elem) elems; 62 vdev_t *vd; 63 }; 64 65 SLIST_HEAD(consumer_priv_t, consumer_vdev_elem); 66 _Static_assert( 67 sizeof (((struct g_consumer *)NULL)->private) == 68 sizeof (struct consumer_priv_t *), 69 "consumer_priv_t* can't be stored in g_consumer.private"); 70 71 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev); 72 73 SYSCTL_DECL(_vfs_zfs_vdev); 74 /* Don't send BIO_FLUSH. */ 75 static int vdev_geom_bio_flush_disable; 76 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN, 77 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH"); 78 /* Don't send BIO_DELETE. */ 79 static int vdev_geom_bio_delete_disable; 80 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN, 81 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE"); 82 83 /* Declare local functions */ 84 static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read); 85 86 /* 87 * Thread local storage used to indicate when a thread is probing geoms 88 * for their guids. If NULL, this thread is not tasting geoms. If non NULL, 89 * it is looking for a replacement for the vdev_t* that is its value. 90 */ 91 uint_t zfs_geom_probe_vdev_key; 92 93 static void 94 vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp, 95 boolean_t do_null_update) 96 { 97 boolean_t needs_update = B_FALSE; 98 char *physpath; 99 int error, physpath_len; 100 101 physpath_len = MAXPATHLEN; 102 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO); 103 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath); 104 if (error == 0) { 105 char *old_physpath; 106 107 /* g_topology lock ensures that vdev has not been closed */ 108 g_topology_assert(); 109 old_physpath = vd->vdev_physpath; 110 vd->vdev_physpath = spa_strdup(physpath); 111 112 if (old_physpath != NULL) { 113 needs_update = (strcmp(old_physpath, 114 vd->vdev_physpath) != 0); 115 spa_strfree(old_physpath); 116 } else 117 needs_update = do_null_update; 118 } 119 g_free(physpath); 120 121 /* 122 * If the physical path changed, update the config. 123 * Only request an update for previously unset physpaths if 124 * requested by the caller. 125 */ 126 if (needs_update) 127 spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE); 128 129 } 130 131 static void 132 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr) 133 { 134 struct consumer_priv_t *priv; 135 struct consumer_vdev_elem *elem; 136 137 priv = (struct consumer_priv_t *)&cp->private; 138 if (SLIST_EMPTY(priv)) 139 return; 140 141 SLIST_FOREACH(elem, priv, elems) { 142 vdev_t *vd = elem->vd; 143 if (strcmp(attr, "GEOM::physpath") == 0) { 144 vdev_geom_set_physpath(vd, cp, /* null_update */B_TRUE); 145 return; 146 } 147 } 148 } 149 150 static void 151 vdev_geom_resize(struct g_consumer *cp) 152 { 153 struct consumer_priv_t *priv; 154 struct consumer_vdev_elem *elem; 155 spa_t *spa; 156 vdev_t *vd; 157 158 priv = (struct consumer_priv_t *)&cp->private; 159 if (SLIST_EMPTY(priv)) 160 return; 161 162 SLIST_FOREACH(elem, priv, elems) { 163 vd = elem->vd; 164 if (vd->vdev_state != VDEV_STATE_HEALTHY) 165 continue; 166 spa = vd->vdev_spa; 167 if (!spa->spa_autoexpand) 168 continue; 169 vdev_online(spa, vd->vdev_guid, ZFS_ONLINE_EXPAND, NULL); 170 } 171 } 172 173 static void 174 vdev_geom_orphan(struct g_consumer *cp) 175 { 176 struct consumer_priv_t *priv; 177 // cppcheck-suppress uninitvar 178 struct consumer_vdev_elem *elem; 179 180 g_topology_assert(); 181 182 priv = (struct consumer_priv_t *)&cp->private; 183 if (SLIST_EMPTY(priv)) 184 /* Vdev close in progress. Ignore the event. */ 185 return; 186 187 /* 188 * Orphan callbacks occur from the GEOM event thread. 189 * Concurrent with this call, new I/O requests may be 190 * working their way through GEOM about to find out 191 * (only once executed by the g_down thread) that we've 192 * been orphaned from our disk provider. These I/Os 193 * must be retired before we can detach our consumer. 194 * This is most easily achieved by acquiring the 195 * SPA ZIO configuration lock as a writer, but doing 196 * so with the GEOM topology lock held would cause 197 * a lock order reversal. Instead, rely on the SPA's 198 * async removal support to invoke a close on this 199 * vdev once it is safe to do so. 200 */ 201 SLIST_FOREACH(elem, priv, elems) { 202 // cppcheck-suppress uninitvar 203 vdev_t *vd = elem->vd; 204 205 vd->vdev_remove_wanted = B_TRUE; 206 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE); 207 } 208 } 209 210 static struct g_consumer * 211 vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity) 212 { 213 struct g_geom *gp; 214 struct g_consumer *cp; 215 int error; 216 217 g_topology_assert(); 218 219 ZFS_LOG(1, "Attaching to %s.", pp->name); 220 221 if (sanity) { 222 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) { 223 ZFS_LOG(1, "Failing attach of %s. " 224 "Incompatible sectorsize %d\n", 225 pp->name, pp->sectorsize); 226 return (NULL); 227 } else if (pp->mediasize < SPA_MINDEVSIZE) { 228 ZFS_LOG(1, "Failing attach of %s. " 229 "Incompatible mediasize %ju\n", 230 pp->name, pp->mediasize); 231 return (NULL); 232 } 233 } 234 235 /* Do we have geom already? No? Create one. */ 236 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) { 237 if (gp->flags & G_GEOM_WITHER) 238 continue; 239 if (strcmp(gp->name, "zfs::vdev") != 0) 240 continue; 241 break; 242 } 243 if (gp == NULL) { 244 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev"); 245 gp->orphan = vdev_geom_orphan; 246 gp->attrchanged = vdev_geom_attrchanged; 247 gp->resize = vdev_geom_resize; 248 cp = g_new_consumer(gp); 249 error = g_attach(cp, pp); 250 if (error != 0) { 251 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__, 252 __LINE__, error); 253 vdev_geom_detach(cp, B_FALSE); 254 return (NULL); 255 } 256 error = g_access(cp, 1, 0, 1); 257 if (error != 0) { 258 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__, 259 __LINE__, error); 260 vdev_geom_detach(cp, B_FALSE); 261 return (NULL); 262 } 263 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name); 264 } else { 265 /* Check if we are already connected to this provider. */ 266 LIST_FOREACH(cp, &gp->consumer, consumer) { 267 if (cp->provider == pp) { 268 ZFS_LOG(1, "Found consumer for %s.", pp->name); 269 break; 270 } 271 } 272 if (cp == NULL) { 273 cp = g_new_consumer(gp); 274 error = g_attach(cp, pp); 275 if (error != 0) { 276 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", 277 __func__, __LINE__, error); 278 vdev_geom_detach(cp, B_FALSE); 279 return (NULL); 280 } 281 error = g_access(cp, 1, 0, 1); 282 if (error != 0) { 283 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", 284 __func__, __LINE__, error); 285 vdev_geom_detach(cp, B_FALSE); 286 return (NULL); 287 } 288 ZFS_LOG(1, "Created consumer for %s.", pp->name); 289 } else { 290 error = g_access(cp, 1, 0, 1); 291 if (error != 0) { 292 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", 293 __func__, __LINE__, error); 294 return (NULL); 295 } 296 ZFS_LOG(1, "Used existing consumer for %s.", pp->name); 297 } 298 } 299 300 if (vd != NULL) 301 vd->vdev_tsd = cp; 302 303 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 304 return (cp); 305 } 306 307 static void 308 vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read) 309 { 310 struct g_geom *gp; 311 312 g_topology_assert(); 313 314 ZFS_LOG(1, "Detaching from %s.", 315 cp->provider && cp->provider->name ? cp->provider->name : "NULL"); 316 317 gp = cp->geom; 318 if (open_for_read) 319 g_access(cp, -1, 0, -1); 320 /* Destroy consumer on last close. */ 321 if (cp->acr == 0 && cp->ace == 0) { 322 if (cp->acw > 0) 323 g_access(cp, 0, -cp->acw, 0); 324 if (cp->provider != NULL) { 325 ZFS_LOG(1, "Destroying consumer for %s.", 326 cp->provider->name ? cp->provider->name : "NULL"); 327 g_detach(cp); 328 } 329 g_destroy_consumer(cp); 330 } 331 /* Destroy geom if there are no consumers left. */ 332 if (LIST_EMPTY(&gp->consumer)) { 333 ZFS_LOG(1, "Destroyed geom %s.", gp->name); 334 g_wither_geom(gp, ENXIO); 335 } 336 } 337 338 static void 339 vdev_geom_close_locked(vdev_t *vd) 340 { 341 struct g_consumer *cp; 342 struct consumer_priv_t *priv; 343 struct consumer_vdev_elem *elem, *elem_temp; 344 345 g_topology_assert(); 346 347 cp = vd->vdev_tsd; 348 vd->vdev_delayed_close = B_FALSE; 349 if (cp == NULL) 350 return; 351 352 ZFS_LOG(1, "Closing access to %s.", cp->provider->name); 353 KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__)); 354 priv = (struct consumer_priv_t *)&cp->private; 355 vd->vdev_tsd = NULL; 356 SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) { 357 if (elem->vd == vd) { 358 SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems); 359 g_free(elem); 360 } 361 } 362 363 vdev_geom_detach(cp, B_TRUE); 364 } 365 366 /* 367 * Issue one or more bios to the vdev in parallel 368 * cmds, datas, offsets, errors, and sizes are arrays of length ncmds. Each IO 369 * operation is described by parallel entries from each array. There may be 370 * more bios actually issued than entries in the array 371 */ 372 static void 373 vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets, 374 off_t *sizes, int *errors, int ncmds) 375 { 376 struct bio **bios; 377 uint8_t *p; 378 off_t off, maxio, s, end; 379 int i, n_bios, j; 380 size_t bios_size; 381 382 maxio = maxphys - (maxphys % cp->provider->sectorsize); 383 n_bios = 0; 384 385 /* How many bios are required for all commands ? */ 386 for (i = 0; i < ncmds; i++) 387 n_bios += (sizes[i] + maxio - 1) / maxio; 388 389 /* Allocate memory for the bios */ 390 bios_size = n_bios * sizeof (struct bio *); 391 bios = kmem_zalloc(bios_size, KM_SLEEP); 392 393 /* Prepare and issue all of the bios */ 394 for (i = j = 0; i < ncmds; i++) { 395 off = offsets[i]; 396 p = datas[i]; 397 s = sizes[i]; 398 end = off + s; 399 ASSERT0(off % cp->provider->sectorsize); 400 ASSERT0(s % cp->provider->sectorsize); 401 402 for (; off < end; off += maxio, p += maxio, s -= maxio, j++) { 403 bios[j] = g_alloc_bio(); 404 bios[j]->bio_cmd = cmds[i]; 405 bios[j]->bio_done = NULL; 406 bios[j]->bio_offset = off; 407 bios[j]->bio_length = MIN(s, maxio); 408 bios[j]->bio_data = (caddr_t)p; 409 g_io_request(bios[j], cp); 410 } 411 } 412 ASSERT3S(j, ==, n_bios); 413 414 /* Wait for all of the bios to complete, and clean them up */ 415 for (i = j = 0; i < ncmds; i++) { 416 off = offsets[i]; 417 s = sizes[i]; 418 end = off + s; 419 420 for (; off < end; off += maxio, s -= maxio, j++) { 421 errors[i] = biowait(bios[j], "vdev_geom_io") || 422 errors[i]; 423 g_destroy_bio(bios[j]); 424 } 425 } 426 kmem_free(bios, bios_size); 427 } 428 429 /* 430 * Read the vdev config from a device. Return the number of valid labels that 431 * were found. The vdev config will be returned in config if and only if at 432 * least one valid label was found. 433 */ 434 static int 435 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp) 436 { 437 struct g_provider *pp; 438 nvlist_t *config; 439 vdev_phys_t *vdev_lists[VDEV_LABELS]; 440 char *buf; 441 size_t buflen; 442 uint64_t psize, state, txg; 443 off_t offsets[VDEV_LABELS]; 444 off_t size; 445 off_t sizes[VDEV_LABELS]; 446 int cmds[VDEV_LABELS]; 447 int errors[VDEV_LABELS]; 448 int l, nlabels; 449 450 g_topology_assert_not(); 451 452 pp = cp->provider; 453 ZFS_LOG(1, "Reading config from %s...", pp->name); 454 455 psize = pp->mediasize; 456 psize = P2ALIGN_TYPED(psize, sizeof (vdev_label_t), uint64_t); 457 458 size = sizeof (*vdev_lists[0]) + pp->sectorsize - 459 ((sizeof (*vdev_lists[0]) - 1) % pp->sectorsize) - 1; 460 461 buflen = sizeof (vdev_lists[0]->vp_nvlist); 462 463 /* Create all of the IO requests */ 464 for (l = 0; l < VDEV_LABELS; l++) { 465 cmds[l] = BIO_READ; 466 vdev_lists[l] = kmem_alloc(size, KM_SLEEP); 467 offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE; 468 sizes[l] = size; 469 errors[l] = 0; 470 ASSERT0(offsets[l] % pp->sectorsize); 471 } 472 473 /* Issue the IO requests */ 474 vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors, 475 VDEV_LABELS); 476 477 /* Parse the labels */ 478 config = *configp = NULL; 479 nlabels = 0; 480 for (l = 0; l < VDEV_LABELS; l++) { 481 if (errors[l] != 0) 482 continue; 483 484 buf = vdev_lists[l]->vp_nvlist; 485 486 if (nvlist_unpack(buf, buflen, &config, 0) != 0) 487 continue; 488 489 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 490 &state) != 0 || state > POOL_STATE_L2CACHE) { 491 nvlist_free(config); 492 continue; 493 } 494 495 if (state != POOL_STATE_SPARE && 496 state != POOL_STATE_L2CACHE && 497 (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 498 &txg) != 0 || txg == 0)) { 499 nvlist_free(config); 500 continue; 501 } 502 503 if (*configp != NULL) 504 nvlist_free(*configp); 505 *configp = config; 506 nlabels++; 507 } 508 509 /* Free the label storage */ 510 for (l = 0; l < VDEV_LABELS; l++) 511 kmem_free(vdev_lists[l], size); 512 513 return (nlabels); 514 } 515 516 static void 517 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id) 518 { 519 nvlist_t **new_configs; 520 uint64_t i; 521 522 if (id < *count) 523 return; 524 new_configs = kmem_zalloc((id + 1) * sizeof (nvlist_t *), 525 KM_SLEEP); 526 for (i = 0; i < *count; i++) 527 new_configs[i] = (*configs)[i]; 528 if (*configs != NULL) 529 kmem_free(*configs, *count * sizeof (void *)); 530 *configs = new_configs; 531 *count = id + 1; 532 } 533 534 static void 535 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg, 536 const char *name, uint64_t *known_pool_guid) 537 { 538 nvlist_t *vdev_tree; 539 uint64_t pool_guid; 540 uint64_t vdev_guid; 541 uint64_t id, txg, known_txg; 542 const char *pname; 543 544 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 || 545 strcmp(pname, name) != 0) 546 goto ignore; 547 548 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0) 549 goto ignore; 550 551 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0) 552 goto ignore; 553 554 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) 555 goto ignore; 556 557 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0) 558 goto ignore; 559 560 txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG); 561 562 if (*known_pool_guid != 0) { 563 if (pool_guid != *known_pool_guid) 564 goto ignore; 565 } else 566 *known_pool_guid = pool_guid; 567 568 resize_configs(configs, count, id); 569 570 if ((*configs)[id] != NULL) { 571 known_txg = fnvlist_lookup_uint64((*configs)[id], 572 ZPOOL_CONFIG_POOL_TXG); 573 if (txg <= known_txg) 574 goto ignore; 575 nvlist_free((*configs)[id]); 576 } 577 578 (*configs)[id] = cfg; 579 return; 580 581 ignore: 582 nvlist_free(cfg); 583 } 584 585 int 586 vdev_geom_read_pool_label(const char *name, 587 nvlist_t ***configs, uint64_t *count) 588 { 589 struct g_class *mp; 590 struct g_geom *gp; 591 struct g_provider *pp; 592 struct g_consumer *zcp; 593 nvlist_t *vdev_cfg; 594 uint64_t pool_guid; 595 int nlabels; 596 597 DROP_GIANT(); 598 g_topology_lock(); 599 600 *configs = NULL; 601 *count = 0; 602 pool_guid = 0; 603 LIST_FOREACH(mp, &g_classes, class) { 604 if (mp == &zfs_vdev_class) 605 continue; 606 LIST_FOREACH(gp, &mp->geom, geom) { 607 if (gp->flags & G_GEOM_WITHER) 608 continue; 609 LIST_FOREACH(pp, &gp->provider, provider) { 610 if (pp->flags & G_PF_WITHER) 611 continue; 612 zcp = vdev_geom_attach(pp, NULL, B_TRUE); 613 if (zcp == NULL) 614 continue; 615 g_topology_unlock(); 616 nlabels = vdev_geom_read_config(zcp, &vdev_cfg); 617 g_topology_lock(); 618 vdev_geom_detach(zcp, B_TRUE); 619 if (nlabels == 0) 620 continue; 621 ZFS_LOG(1, "successfully read vdev config"); 622 623 process_vdev_config(configs, count, 624 vdev_cfg, name, &pool_guid); 625 } 626 } 627 } 628 g_topology_unlock(); 629 PICKUP_GIANT(); 630 631 return (*count > 0 ? 0 : ENOENT); 632 } 633 634 enum match { 635 NO_MATCH = 0, /* No matching labels found */ 636 TOPGUID_MATCH = 1, /* Labels match top guid, not vdev guid */ 637 ZERO_MATCH = 1, /* Should never be returned */ 638 ONE_MATCH = 2, /* 1 label matching the vdev_guid */ 639 TWO_MATCH = 3, /* 2 label matching the vdev_guid */ 640 THREE_MATCH = 4, /* 3 label matching the vdev_guid */ 641 FULL_MATCH = 5 /* all labels match the vdev_guid */ 642 }; 643 644 static enum match 645 vdev_attach_ok(vdev_t *vd, struct g_provider *pp) 646 { 647 nvlist_t *config; 648 uint64_t pool_guid, top_guid, vdev_guid; 649 struct g_consumer *cp; 650 int nlabels; 651 652 cp = vdev_geom_attach(pp, NULL, B_TRUE); 653 if (cp == NULL) { 654 ZFS_LOG(1, "Unable to attach tasting instance to %s.", 655 pp->name); 656 return (NO_MATCH); 657 } 658 g_topology_unlock(); 659 nlabels = vdev_geom_read_config(cp, &config); 660 g_topology_lock(); 661 vdev_geom_detach(cp, B_TRUE); 662 if (nlabels == 0) { 663 ZFS_LOG(1, "Unable to read config from %s.", pp->name); 664 return (NO_MATCH); 665 } 666 667 pool_guid = 0; 668 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid); 669 top_guid = 0; 670 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid); 671 vdev_guid = 0; 672 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid); 673 nvlist_free(config); 674 675 /* 676 * Check that the label's pool guid matches the desired guid. 677 * Inactive spares and L2ARCs do not have any pool guid in the label. 678 */ 679 if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) { 680 ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.", 681 pp->name, 682 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid); 683 return (NO_MATCH); 684 } 685 686 /* 687 * Check that the label's vdev guid matches the desired guid. 688 * The second condition handles possible race on vdev detach, when 689 * remaining vdev receives GUID of destroyed top level mirror vdev. 690 */ 691 if (vdev_guid == vd->vdev_guid) { 692 ZFS_LOG(1, "guids match for provider %s.", pp->name); 693 return (ZERO_MATCH + nlabels); 694 } else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) { 695 ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name); 696 return (TOPGUID_MATCH); 697 } 698 ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.", 699 pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid); 700 return (NO_MATCH); 701 } 702 703 static struct g_consumer * 704 vdev_geom_attach_by_guids(vdev_t *vd) 705 { 706 struct g_class *mp; 707 struct g_geom *gp; 708 struct g_provider *pp, *best_pp; 709 struct g_consumer *cp; 710 const char *vdpath; 711 enum match match, best_match; 712 713 g_topology_assert(); 714 715 vdpath = vd->vdev_path + sizeof ("/dev/") - 1; 716 cp = NULL; 717 best_pp = NULL; 718 best_match = NO_MATCH; 719 LIST_FOREACH(mp, &g_classes, class) { 720 if (mp == &zfs_vdev_class) 721 continue; 722 LIST_FOREACH(gp, &mp->geom, geom) { 723 if (gp->flags & G_GEOM_WITHER) 724 continue; 725 LIST_FOREACH(pp, &gp->provider, provider) { 726 match = vdev_attach_ok(vd, pp); 727 if (match > best_match) { 728 best_match = match; 729 best_pp = pp; 730 } else if (match == best_match) { 731 if (strcmp(pp->name, vdpath) == 0) { 732 best_pp = pp; 733 } 734 } 735 if (match == FULL_MATCH) 736 goto out; 737 } 738 } 739 } 740 741 out: 742 if (best_pp) { 743 cp = vdev_geom_attach(best_pp, vd, B_TRUE); 744 if (cp == NULL) { 745 printf("ZFS WARNING: Unable to attach to %s.\n", 746 best_pp->name); 747 } 748 } 749 return (cp); 750 } 751 752 static struct g_consumer * 753 vdev_geom_open_by_guids(vdev_t *vd) 754 { 755 struct g_consumer *cp; 756 char *buf; 757 size_t len; 758 759 g_topology_assert(); 760 761 ZFS_LOG(1, "Searching by guids [%ju:%ju].", 762 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid); 763 cp = vdev_geom_attach_by_guids(vd); 764 if (cp != NULL) { 765 len = strlen(cp->provider->name) + strlen("/dev/") + 1; 766 buf = kmem_alloc(len, KM_SLEEP); 767 768 snprintf(buf, len, "/dev/%s", cp->provider->name); 769 spa_strfree(vd->vdev_path); 770 vd->vdev_path = buf; 771 772 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.", 773 (uintmax_t)spa_guid(vd->vdev_spa), 774 (uintmax_t)vd->vdev_guid, cp->provider->name); 775 } else { 776 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.", 777 (uintmax_t)spa_guid(vd->vdev_spa), 778 (uintmax_t)vd->vdev_guid); 779 } 780 781 return (cp); 782 } 783 784 static struct g_consumer * 785 vdev_geom_open_by_path(vdev_t *vd, int check_guid) 786 { 787 struct g_provider *pp; 788 struct g_consumer *cp; 789 790 g_topology_assert(); 791 792 cp = NULL; 793 pp = g_provider_by_name(vd->vdev_path + sizeof ("/dev/") - 1); 794 if (pp != NULL) { 795 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path); 796 if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH) 797 cp = vdev_geom_attach(pp, vd, B_FALSE); 798 } 799 800 return (cp); 801 } 802 803 static int 804 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 805 uint64_t *logical_ashift, uint64_t *physical_ashift) 806 { 807 struct g_provider *pp; 808 struct g_consumer *cp; 809 int error, has_trim; 810 uint16_t rate; 811 812 /* 813 * Set the TLS to indicate downstack that we 814 * should not access zvols 815 */ 816 VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd)); 817 818 /* 819 * We must have a pathname, and it must be absolute. 820 */ 821 if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) { 822 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 823 return (EINVAL); 824 } 825 826 /* 827 * Reopen the device if it's not currently open. Otherwise, 828 * just update the physical size of the device. 829 */ 830 if ((cp = vd->vdev_tsd) != NULL) { 831 ASSERT(vd->vdev_reopening); 832 goto skip_open; 833 } 834 835 DROP_GIANT(); 836 g_topology_lock(); 837 error = 0; 838 839 if (vd->vdev_spa->spa_is_splitting || 840 ((vd->vdev_prevstate == VDEV_STATE_UNKNOWN && 841 (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE || 842 vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)))) { 843 /* 844 * We are dealing with a vdev that hasn't been previously 845 * opened (since boot), and we are not loading an 846 * existing pool configuration. This looks like a 847 * vdev add operation to a new or existing pool. 848 * Assume the user really wants to do this, and find 849 * GEOM provider by its name, ignoring GUID mismatches. 850 * 851 * XXPOLICY: It would be safer to only allow a device 852 * that is unlabeled or labeled but missing 853 * GUID information to be opened in this fashion, 854 * unless we are doing a split, in which case we 855 * should allow any guid. 856 */ 857 cp = vdev_geom_open_by_path(vd, 0); 858 } else { 859 /* 860 * Try using the recorded path for this device, but only 861 * accept it if its label data contains the expected GUIDs. 862 */ 863 cp = vdev_geom_open_by_path(vd, 1); 864 if (cp == NULL) { 865 /* 866 * The device at vd->vdev_path doesn't have the 867 * expected GUIDs. The disks might have merely 868 * moved around so try all other GEOM providers 869 * to find one with the right GUIDs. 870 */ 871 cp = vdev_geom_open_by_guids(vd); 872 } 873 } 874 875 /* Clear the TLS now that tasting is done */ 876 VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL)); 877 878 if (cp == NULL) { 879 ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path); 880 error = ENOENT; 881 } else { 882 struct consumer_priv_t *priv; 883 struct consumer_vdev_elem *elem; 884 int spamode; 885 886 priv = (struct consumer_priv_t *)&cp->private; 887 if (cp->private == NULL) 888 SLIST_INIT(priv); 889 elem = g_malloc(sizeof (*elem), M_WAITOK|M_ZERO); 890 elem->vd = vd; 891 SLIST_INSERT_HEAD(priv, elem, elems); 892 893 spamode = spa_mode(vd->vdev_spa); 894 if (cp->provider->sectorsize > VDEV_PAD_SIZE || 895 !ISP2(cp->provider->sectorsize)) { 896 ZFS_LOG(1, "Provider %s has unsupported sectorsize.", 897 cp->provider->name); 898 899 vdev_geom_close_locked(vd); 900 error = EINVAL; 901 cp = NULL; 902 } else if (cp->acw == 0 && (spamode & FWRITE) != 0) { 903 int i; 904 905 for (i = 0; i < 5; i++) { 906 error = g_access(cp, 0, 1, 0); 907 if (error == 0) 908 break; 909 g_topology_unlock(); 910 tsleep(vd, 0, "vdev", hz / 2); 911 g_topology_lock(); 912 } 913 if (error != 0) { 914 printf("ZFS WARNING: Unable to open %s for " 915 "writing (error=%d).\n", 916 cp->provider->name, error); 917 vdev_geom_close_locked(vd); 918 cp = NULL; 919 } 920 } 921 } 922 923 /* Fetch initial physical path information for this device. */ 924 if (cp != NULL) { 925 vdev_geom_attrchanged(cp, "GEOM::physpath"); 926 927 /* Set other GEOM characteristics */ 928 vdev_geom_set_physpath(vd, cp, /* do_null_update */B_FALSE); 929 } 930 931 g_topology_unlock(); 932 PICKUP_GIANT(); 933 if (cp == NULL) { 934 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 935 vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]", 936 error); 937 return (error); 938 } 939 skip_open: 940 pp = cp->provider; 941 942 /* 943 * Determine the actual size of the device. 944 */ 945 *max_psize = *psize = pp->mediasize; 946 947 /* 948 * Determine the device's minimum transfer size and preferred 949 * transfer size. 950 */ 951 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1; 952 *physical_ashift = 0; 953 if (pp->stripesize && pp->stripesize > (1 << *logical_ashift) && 954 ISP2(pp->stripesize) && pp->stripeoffset == 0) 955 *physical_ashift = highbit(pp->stripesize) - 1; 956 957 /* 958 * Clear the nowritecache settings, so that on a vdev_reopen() 959 * we will try again. 960 */ 961 vd->vdev_nowritecache = B_FALSE; 962 963 /* Inform the ZIO pipeline that we are non-rotational. */ 964 error = g_getattr("GEOM::rotation_rate", cp, &rate); 965 if (error == 0 && rate == DISK_RR_NON_ROTATING) 966 vd->vdev_nonrot = B_TRUE; 967 else 968 vd->vdev_nonrot = B_FALSE; 969 970 /* Set when device reports it supports TRIM. */ 971 error = g_getattr("GEOM::candelete", cp, &has_trim); 972 vd->vdev_has_trim = (error == 0 && has_trim); 973 974 /* Set when device reports it supports secure TRIM. */ 975 /* unavailable on FreeBSD */ 976 vd->vdev_has_securetrim = B_FALSE; 977 978 return (0); 979 } 980 981 static void 982 vdev_geom_close(vdev_t *vd) 983 { 984 struct g_consumer *cp; 985 boolean_t locked; 986 987 cp = vd->vdev_tsd; 988 989 DROP_GIANT(); 990 locked = g_topology_locked(); 991 if (!locked) 992 g_topology_lock(); 993 994 if (!vd->vdev_reopening || 995 (cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 || 996 (cp->provider != NULL && cp->provider->error != 0)))) 997 vdev_geom_close_locked(vd); 998 999 if (!locked) 1000 g_topology_unlock(); 1001 PICKUP_GIANT(); 1002 } 1003 1004 static void 1005 vdev_geom_io_intr(struct bio *bp) 1006 { 1007 vdev_t *vd; 1008 zio_t *zio; 1009 1010 zio = bp->bio_caller1; 1011 vd = zio->io_vd; 1012 zio->io_error = bp->bio_error; 1013 if (zio->io_error == 0 && bp->bio_resid != 0) 1014 zio->io_error = SET_ERROR(EIO); 1015 1016 switch (zio->io_error) { 1017 case ENXIO: 1018 if (!vd->vdev_remove_wanted) { 1019 /* 1020 * If provider's error is set we assume it is being 1021 * removed. 1022 */ 1023 if (bp->bio_to->error != 0) { 1024 vd->vdev_remove_wanted = B_TRUE; 1025 spa_async_request(zio->io_spa, 1026 SPA_ASYNC_REMOVE); 1027 } else if (!vd->vdev_delayed_close) { 1028 vd->vdev_delayed_close = B_TRUE; 1029 } 1030 } 1031 break; 1032 } 1033 1034 /* 1035 * We have to split bio freeing into two parts, because the ABD code 1036 * cannot be called in this context and vdev_op_io_done is not called 1037 * for ZIO_TYPE_FLUSH zio-s. 1038 */ 1039 if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) { 1040 g_destroy_bio(bp); 1041 zio->io_bio = NULL; 1042 } 1043 zio_delay_interrupt(zio); 1044 } 1045 1046 struct vdev_geom_check_unmapped_cb_state { 1047 int pages; 1048 uint_t end; 1049 }; 1050 1051 /* 1052 * Callback to check the ABD segment size/alignment and count the pages. 1053 * GEOM requires data buffer to look virtually contiguous. It means only 1054 * the first page of the buffer may not start and only the last may not 1055 * end on a page boundary. All other physical pages must be full. 1056 */ 1057 static int 1058 vdev_geom_check_unmapped_cb(void *buf, size_t len, void *priv) 1059 { 1060 struct vdev_geom_check_unmapped_cb_state *s = priv; 1061 vm_offset_t off = (vm_offset_t)buf & PAGE_MASK; 1062 1063 if (s->pages != 0 && off != 0) 1064 return (1); 1065 if (s->end != 0) 1066 return (1); 1067 s->end = (off + len) & PAGE_MASK; 1068 s->pages += (off + len + PAGE_MASK) >> PAGE_SHIFT; 1069 return (0); 1070 } 1071 1072 /* 1073 * Check whether we can use unmapped I/O for this ZIO on this device to 1074 * avoid data copying between scattered and/or gang ABD buffer and linear. 1075 */ 1076 static int 1077 vdev_geom_check_unmapped(zio_t *zio, struct g_consumer *cp) 1078 { 1079 struct vdev_geom_check_unmapped_cb_state s; 1080 1081 /* If unmapped I/O is administratively disabled, respect that. */ 1082 if (!unmapped_buf_allowed) 1083 return (0); 1084 1085 /* If the buffer is already linear, then nothing to do here. */ 1086 if (abd_is_linear(zio->io_abd)) 1087 return (0); 1088 1089 /* 1090 * If unmapped I/O is not supported by the GEOM provider, 1091 * then we can't do anything and have to copy the data. 1092 */ 1093 if ((cp->provider->flags & G_PF_ACCEPT_UNMAPPED) == 0) 1094 return (0); 1095 1096 /* Check the buffer chunks sizes/alignments and count pages. */ 1097 s.pages = s.end = 0; 1098 if (abd_iterate_func(zio->io_abd, 0, zio->io_size, 1099 vdev_geom_check_unmapped_cb, &s)) 1100 return (0); 1101 return (s.pages); 1102 } 1103 1104 /* 1105 * Callback to translate the ABD segment into array of physical pages. 1106 */ 1107 static int 1108 vdev_geom_fill_unmap_cb(void *buf, size_t len, void *priv) 1109 { 1110 struct bio *bp = priv; 1111 vm_offset_t addr = (vm_offset_t)buf; 1112 vm_offset_t end = addr + len; 1113 1114 if (bp->bio_ma_n == 0) { 1115 bp->bio_ma_offset = addr & PAGE_MASK; 1116 addr &= ~PAGE_MASK; 1117 } else { 1118 ASSERT0(P2PHASE(addr, PAGE_SIZE)); 1119 } 1120 do { 1121 bp->bio_ma[bp->bio_ma_n++] = 1122 PHYS_TO_VM_PAGE(pmap_kextract(addr)); 1123 addr += PAGE_SIZE; 1124 } while (addr < end); 1125 return (0); 1126 } 1127 1128 static void 1129 vdev_geom_io_start(zio_t *zio) 1130 { 1131 vdev_t *vd; 1132 struct g_consumer *cp; 1133 struct bio *bp; 1134 1135 vd = zio->io_vd; 1136 1137 if (zio->io_type == ZIO_TYPE_FLUSH) { 1138 /* XXPOLICY */ 1139 if (!vdev_readable(vd)) { 1140 zio->io_error = SET_ERROR(ENXIO); 1141 zio_interrupt(zio); 1142 return; 1143 } 1144 1145 if (zfs_nocacheflush || vdev_geom_bio_flush_disable) { 1146 zio_execute(zio); 1147 return; 1148 } 1149 1150 if (vd->vdev_nowritecache) { 1151 zio->io_error = SET_ERROR(ENOTSUP); 1152 zio_execute(zio); 1153 return; 1154 } 1155 } else if (zio->io_type == ZIO_TYPE_TRIM) { 1156 if (vdev_geom_bio_delete_disable) { 1157 zio_execute(zio); 1158 return; 1159 } 1160 } 1161 1162 ASSERT(zio->io_type == ZIO_TYPE_READ || 1163 zio->io_type == ZIO_TYPE_WRITE || 1164 zio->io_type == ZIO_TYPE_TRIM || 1165 zio->io_type == ZIO_TYPE_FLUSH); 1166 1167 cp = vd->vdev_tsd; 1168 if (cp == NULL) { 1169 zio->io_error = SET_ERROR(ENXIO); 1170 zio_interrupt(zio); 1171 return; 1172 } 1173 bp = g_alloc_bio(); 1174 bp->bio_caller1 = zio; 1175 switch (zio->io_type) { 1176 case ZIO_TYPE_READ: 1177 case ZIO_TYPE_WRITE: 1178 zio->io_target_timestamp = zio_handle_io_delay(zio); 1179 bp->bio_offset = zio->io_offset; 1180 bp->bio_length = zio->io_size; 1181 if (zio->io_type == ZIO_TYPE_READ) 1182 bp->bio_cmd = BIO_READ; 1183 else 1184 bp->bio_cmd = BIO_WRITE; 1185 1186 /* 1187 * If possible, represent scattered and/or gang ABD buffer to 1188 * GEOM as an array of physical pages. It allows to satisfy 1189 * requirement of virtually contiguous buffer without copying. 1190 */ 1191 int pgs = vdev_geom_check_unmapped(zio, cp); 1192 if (pgs > 0) { 1193 bp->bio_ma = malloc(sizeof (struct vm_page *) * pgs, 1194 M_DEVBUF, M_WAITOK); 1195 bp->bio_ma_n = 0; 1196 bp->bio_ma_offset = 0; 1197 abd_iterate_func(zio->io_abd, 0, zio->io_size, 1198 vdev_geom_fill_unmap_cb, bp); 1199 bp->bio_data = unmapped_buf; 1200 bp->bio_flags |= BIO_UNMAPPED; 1201 } else { 1202 if (zio->io_type == ZIO_TYPE_READ) { 1203 bp->bio_data = abd_borrow_buf(zio->io_abd, 1204 zio->io_size); 1205 } else { 1206 bp->bio_data = abd_borrow_buf_copy(zio->io_abd, 1207 zio->io_size); 1208 } 1209 } 1210 break; 1211 case ZIO_TYPE_TRIM: 1212 bp->bio_cmd = BIO_DELETE; 1213 bp->bio_data = NULL; 1214 bp->bio_offset = zio->io_offset; 1215 bp->bio_length = zio->io_size; 1216 break; 1217 case ZIO_TYPE_FLUSH: 1218 bp->bio_cmd = BIO_FLUSH; 1219 bp->bio_data = NULL; 1220 bp->bio_offset = cp->provider->mediasize; 1221 bp->bio_length = 0; 1222 break; 1223 default: 1224 panic("invalid zio->io_type: %d\n", zio->io_type); 1225 } 1226 bp->bio_done = vdev_geom_io_intr; 1227 zio->io_bio = bp; 1228 1229 g_io_request(bp, cp); 1230 } 1231 1232 static void 1233 vdev_geom_io_done(zio_t *zio) 1234 { 1235 struct bio *bp = zio->io_bio; 1236 1237 if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) { 1238 ASSERT3P(bp, ==, NULL); 1239 return; 1240 } 1241 1242 if (bp == NULL) { 1243 ASSERT3S(zio->io_error, ==, ENXIO); 1244 return; 1245 } 1246 1247 if (bp->bio_ma != NULL) { 1248 free(bp->bio_ma, M_DEVBUF); 1249 } else { 1250 if (zio->io_type == ZIO_TYPE_READ) { 1251 abd_return_buf_copy(zio->io_abd, bp->bio_data, 1252 zio->io_size); 1253 } else { 1254 abd_return_buf(zio->io_abd, bp->bio_data, 1255 zio->io_size); 1256 } 1257 } 1258 1259 g_destroy_bio(bp); 1260 zio->io_bio = NULL; 1261 } 1262 1263 static void 1264 vdev_geom_hold(vdev_t *vd) 1265 { 1266 } 1267 1268 static void 1269 vdev_geom_rele(vdev_t *vd) 1270 { 1271 } 1272 1273 vdev_ops_t vdev_disk_ops = { 1274 .vdev_op_init = NULL, 1275 .vdev_op_fini = NULL, 1276 .vdev_op_open = vdev_geom_open, 1277 .vdev_op_close = vdev_geom_close, 1278 .vdev_op_asize = vdev_default_asize, 1279 .vdev_op_min_asize = vdev_default_min_asize, 1280 .vdev_op_min_alloc = NULL, 1281 .vdev_op_io_start = vdev_geom_io_start, 1282 .vdev_op_io_done = vdev_geom_io_done, 1283 .vdev_op_state_change = NULL, 1284 .vdev_op_need_resilver = NULL, 1285 .vdev_op_hold = vdev_geom_hold, 1286 .vdev_op_rele = vdev_geom_rele, 1287 .vdev_op_remap = NULL, 1288 .vdev_op_xlate = vdev_default_xlate, 1289 .vdev_op_rebuild_asize = NULL, 1290 .vdev_op_metaslab_init = NULL, 1291 .vdev_op_config_generate = NULL, 1292 .vdev_op_nparity = NULL, 1293 .vdev_op_ndisks = NULL, 1294 .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */ 1295 .vdev_op_leaf = B_TRUE /* leaf vdev */ 1296 }; 1297