Lines Matching defs:vdev

35  * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
99 * Find a vdev within a tree with a matching GUID.
112 "matched vdev %llu", guid);
190 * Given a vdev guid, find and remove all spares associated with it.
205 * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
216 * Find the corresponding pool and make sure the vdev still exists.
241 * Given a vdev, attempt to replace it with every known spare until one
246 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
277 dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
322 * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
360 fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
381 nvlist_t *vdev = NULL;
399 * l2arc vdev, in which case we just offline it.
435 &vdev)) == NULL)
438 devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
440 nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
444 * If state removed is requested for already removed vdev,
452 /* Remove the vdev since device is unplugged */
460 /* Replace the vdev with a spare if its not a l2arc */
463 replace_with_spare(hdl, zhp, vdev) == B_FALSE)) {
517 * for faults targeting a specific vdev (open failure or SERD
521 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
524 "fault.fs.zfs.vdev.checksum")) {
527 "fault.fs.zfs.vdev.slow_io")) {
544 * attempt to find the matching vdev.
568 &vdev)) == NULL)
586 * If this is a repair event, then mark the vdev as repaired and
591 fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
607 fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
614 (void) replace_with_spare(hdl, zhp, vdev);