1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ZFS fault injection 28 * 29 * To handle fault injection, we keep track of a series of zinject_record_t 30 * structures which describe which logical block(s) should be injected with a 31 * fault. These are kept in a global list. Each record corresponds to a given 32 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted 33 * or exported while the injection record exists. 34 * 35 * Device level injection is done using the 'zi_guid' field. If this is set, it 36 * means that the error is destined for a particular device, not a piece of 37 * data. 38 * 39 * This is a rather poor data structure and algorithm, but we don't expect more 40 * than a few faults at any one time, so it should be sufficient for our needs. 41 */ 42 43 #include <sys/arc.h> 44 #include <sys/zio_impl.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/spa_impl.h> 47 #include <sys/vdev_impl.h> 48 #include <sys/fs/zfs.h> 49 50 uint32_t zio_injection_enabled; 51 52 typedef struct inject_handler { 53 int zi_id; 54 spa_t *zi_spa; 55 zinject_record_t zi_record; 56 list_node_t zi_link; 57 } inject_handler_t; 58 59 static list_t inject_handlers; 60 static krwlock_t inject_lock; 61 static int inject_next_id = 1; 62 63 /* 64 * Returns true if the given record matches the I/O in progress. 65 */ 66 static boolean_t 67 zio_match_handler(zbookmark_t *zb, uint64_t type, 68 zinject_record_t *record, int error) 69 { 70 /* 71 * Check for a match against the MOS, which is based on type 72 */ 73 if (zb->zb_objset == 0 && record->zi_objset == 0 && 74 record->zi_object == 0) { 75 if (record->zi_type == DMU_OT_NONE || 76 type == record->zi_type) 77 return (record->zi_freq == 0 || 78 spa_get_random(100) < record->zi_freq); 79 else 80 return (B_FALSE); 81 } 82 83 /* 84 * Check for an exact match. 85 */ 86 if (zb->zb_objset == record->zi_objset && 87 zb->zb_object == record->zi_object && 88 zb->zb_level == record->zi_level && 89 zb->zb_blkid >= record->zi_start && 90 zb->zb_blkid <= record->zi_end && 91 error == record->zi_error) 92 return (record->zi_freq == 0 || 93 spa_get_random(100) < record->zi_freq); 94 95 return (B_FALSE); 96 } 97 98 /* 99 * Panic the system when a config change happens in the function 100 * specified by tag. 101 */ 102 void 103 zio_handle_panic_injection(spa_t *spa, char *tag) 104 { 105 inject_handler_t *handler; 106 107 rw_enter(&inject_lock, RW_READER); 108 109 for (handler = list_head(&inject_handlers); handler != NULL; 110 handler = list_next(&inject_handlers, handler)) { 111 112 if (spa != handler->zi_spa) 113 continue; 114 115 if (strcmp(tag, handler->zi_record.zi_func) == 0) 116 panic("Panic requested in function %s\n", tag); 117 } 118 119 rw_exit(&inject_lock); 120 } 121 122 /* 123 * Determine if the I/O in question should return failure. Returns the errno 124 * to be returned to the caller. 125 */ 126 int 127 zio_handle_fault_injection(zio_t *zio, int error) 128 { 129 int ret = 0; 130 inject_handler_t *handler; 131 132 /* 133 * Ignore I/O not associated with any logical data. 134 */ 135 if (zio->io_logical == NULL) 136 return (0); 137 138 /* 139 * Currently, we only support fault injection on reads. 140 */ 141 if (zio->io_type != ZIO_TYPE_READ) 142 return (0); 143 144 rw_enter(&inject_lock, RW_READER); 145 146 for (handler = list_head(&inject_handlers); handler != NULL; 147 handler = list_next(&inject_handlers, handler)) { 148 149 /* Ignore errors not destined for this pool */ 150 if (zio->io_spa != handler->zi_spa) 151 continue; 152 153 /* Ignore device errors and panic injection */ 154 if (handler->zi_record.zi_guid != 0 || 155 handler->zi_record.zi_func[0] != '\0' || 156 handler->zi_record.zi_duration != 0) 157 continue; 158 159 /* If this handler matches, return EIO */ 160 if (zio_match_handler(&zio->io_logical->io_bookmark, 161 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE, 162 &handler->zi_record, error)) { 163 ret = error; 164 break; 165 } 166 } 167 168 rw_exit(&inject_lock); 169 170 return (ret); 171 } 172 173 /* 174 * Determine if the zio is part of a label update and has an injection 175 * handler associated with that portion of the label. Currently, we 176 * allow error injection in either the nvlist or the uberblock region of 177 * of the vdev label. 178 */ 179 int 180 zio_handle_label_injection(zio_t *zio, int error) 181 { 182 inject_handler_t *handler; 183 vdev_t *vd = zio->io_vd; 184 uint64_t offset = zio->io_offset; 185 int label; 186 int ret = 0; 187 188 if (offset >= VDEV_LABEL_START_SIZE && 189 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE) 190 return (0); 191 192 rw_enter(&inject_lock, RW_READER); 193 194 for (handler = list_head(&inject_handlers); handler != NULL; 195 handler = list_next(&inject_handlers, handler)) { 196 uint64_t start = handler->zi_record.zi_start; 197 uint64_t end = handler->zi_record.zi_end; 198 199 /* Ignore device only faults or panic injection */ 200 if (handler->zi_record.zi_start == 0 || 201 handler->zi_record.zi_func[0] != '\0' || 202 handler->zi_record.zi_duration != 0) 203 continue; 204 205 /* 206 * The injection region is the relative offsets within a 207 * vdev label. We must determine the label which is being 208 * updated and adjust our region accordingly. 209 */ 210 label = vdev_label_number(vd->vdev_psize, offset); 211 start = vdev_label_offset(vd->vdev_psize, label, start); 212 end = vdev_label_offset(vd->vdev_psize, label, end); 213 214 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid && 215 (offset >= start && offset <= end)) { 216 ret = error; 217 break; 218 } 219 } 220 rw_exit(&inject_lock); 221 return (ret); 222 } 223 224 225 int 226 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error) 227 { 228 inject_handler_t *handler; 229 int ret = 0; 230 231 /* 232 * We skip over faults in the labels unless it's during 233 * device open (i.e. zio == NULL). 234 */ 235 if (zio != NULL) { 236 uint64_t offset = zio->io_offset; 237 238 if (offset < VDEV_LABEL_START_SIZE || 239 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE) 240 return (0); 241 } 242 243 rw_enter(&inject_lock, RW_READER); 244 245 for (handler = list_head(&inject_handlers); handler != NULL; 246 handler = list_next(&inject_handlers, handler)) { 247 248 /* 249 * Ignore label specific faults, panic injection 250 * or fake writes 251 */ 252 if (handler->zi_record.zi_start != 0 || 253 handler->zi_record.zi_func[0] != '\0' || 254 handler->zi_record.zi_duration != 0) 255 continue; 256 257 if (vd->vdev_guid == handler->zi_record.zi_guid) { 258 if (handler->zi_record.zi_failfast && 259 (zio == NULL || (zio->io_flags & 260 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) { 261 continue; 262 } 263 264 /* Handle type specific I/O failures */ 265 if (zio != NULL && 266 handler->zi_record.zi_iotype != ZIO_TYPES && 267 handler->zi_record.zi_iotype != zio->io_type) 268 continue; 269 270 if (handler->zi_record.zi_error == error) { 271 /* 272 * For a failed open, pretend like the device 273 * has gone away. 274 */ 275 if (error == ENXIO) 276 vd->vdev_stat.vs_aux = 277 VDEV_AUX_OPEN_FAILED; 278 ret = error; 279 break; 280 } 281 if (handler->zi_record.zi_error == ENXIO) { 282 ret = EIO; 283 break; 284 } 285 } 286 } 287 288 rw_exit(&inject_lock); 289 290 return (ret); 291 } 292 293 /* 294 * Simulate hardware that ignores cache flushes. For requested number 295 * of seconds nix the actual writing to disk. 296 */ 297 void 298 zio_handle_ignored_writes(zio_t *zio) 299 { 300 inject_handler_t *handler; 301 302 rw_enter(&inject_lock, RW_READER); 303 304 for (handler = list_head(&inject_handlers); handler != NULL; 305 handler = list_next(&inject_handlers, handler)) { 306 307 /* Ignore errors not destined for this pool */ 308 if (zio->io_spa != handler->zi_spa) 309 continue; 310 311 if (handler->zi_record.zi_duration == 0) 312 continue; 313 314 /* 315 * Positive duration implies # of seconds, negative 316 * a number of txgs 317 */ 318 if (handler->zi_record.zi_timer == 0) { 319 if (handler->zi_record.zi_duration > 0) 320 handler->zi_record.zi_timer = lbolt64; 321 else 322 handler->zi_record.zi_timer = zio->io_txg; 323 } 324 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 325 break; 326 } 327 328 rw_exit(&inject_lock); 329 } 330 331 void 332 spa_handle_ignored_writes(spa_t *spa) 333 { 334 inject_handler_t *handler; 335 336 if (zio_injection_enabled == 0) 337 return; 338 339 rw_enter(&inject_lock, RW_READER); 340 341 for (handler = list_head(&inject_handlers); handler != NULL; 342 handler = list_next(&inject_handlers, handler)) { 343 344 /* Ignore errors not destined for this pool */ 345 if (spa != handler->zi_spa) 346 continue; 347 348 if (handler->zi_record.zi_duration == 0) 349 continue; 350 351 if (handler->zi_record.zi_duration > 0) { 352 VERIFY(handler->zi_record.zi_timer == 0 || 353 handler->zi_record.zi_timer + 354 handler->zi_record.zi_duration * hz > lbolt64); 355 } else { 356 /* duration is negative so the subtraction here adds */ 357 VERIFY(handler->zi_record.zi_timer == 0 || 358 handler->zi_record.zi_timer - 359 handler->zi_record.zi_duration >= 360 spa->spa_syncing_txg); 361 } 362 } 363 364 rw_exit(&inject_lock); 365 } 366 367 /* 368 * Create a new handler for the given record. We add it to the list, adding 369 * a reference to the spa_t in the process. We increment zio_injection_enabled, 370 * which is the switch to trigger all fault injection. 371 */ 372 int 373 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record) 374 { 375 inject_handler_t *handler; 376 int error; 377 spa_t *spa; 378 379 /* 380 * If this is pool-wide metadata, make sure we unload the corresponding 381 * spa_t, so that the next attempt to load it will trigger the fault. 382 * We call spa_reset() to unload the pool appropriately. 383 */ 384 if (flags & ZINJECT_UNLOAD_SPA) 385 if ((error = spa_reset(name)) != 0) 386 return (error); 387 388 if (!(flags & ZINJECT_NULL)) { 389 /* 390 * spa_inject_ref() will add an injection reference, which will 391 * prevent the pool from being removed from the namespace while 392 * still allowing it to be unloaded. 393 */ 394 if ((spa = spa_inject_addref(name)) == NULL) 395 return (ENOENT); 396 397 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP); 398 399 rw_enter(&inject_lock, RW_WRITER); 400 401 *id = handler->zi_id = inject_next_id++; 402 handler->zi_spa = spa; 403 handler->zi_record = *record; 404 list_insert_tail(&inject_handlers, handler); 405 atomic_add_32(&zio_injection_enabled, 1); 406 407 rw_exit(&inject_lock); 408 } 409 410 /* 411 * Flush the ARC, so that any attempts to read this data will end up 412 * going to the ZIO layer. Note that this is a little overkill, but 413 * we don't have the necessary ARC interfaces to do anything else, and 414 * fault injection isn't a performance critical path. 415 */ 416 if (flags & ZINJECT_FLUSH_ARC) 417 arc_flush(NULL); 418 419 return (0); 420 } 421 422 /* 423 * Returns the next record with an ID greater than that supplied to the 424 * function. Used to iterate over all handlers in the system. 425 */ 426 int 427 zio_inject_list_next(int *id, char *name, size_t buflen, 428 zinject_record_t *record) 429 { 430 inject_handler_t *handler; 431 int ret; 432 433 mutex_enter(&spa_namespace_lock); 434 rw_enter(&inject_lock, RW_READER); 435 436 for (handler = list_head(&inject_handlers); handler != NULL; 437 handler = list_next(&inject_handlers, handler)) 438 if (handler->zi_id > *id) 439 break; 440 441 if (handler) { 442 *record = handler->zi_record; 443 *id = handler->zi_id; 444 (void) strncpy(name, spa_name(handler->zi_spa), buflen); 445 ret = 0; 446 } else { 447 ret = ENOENT; 448 } 449 450 rw_exit(&inject_lock); 451 mutex_exit(&spa_namespace_lock); 452 453 return (ret); 454 } 455 456 /* 457 * Clear the fault handler with the given identifier, or return ENOENT if none 458 * exists. 459 */ 460 int 461 zio_clear_fault(int id) 462 { 463 inject_handler_t *handler; 464 int ret; 465 466 rw_enter(&inject_lock, RW_WRITER); 467 468 for (handler = list_head(&inject_handlers); handler != NULL; 469 handler = list_next(&inject_handlers, handler)) 470 if (handler->zi_id == id) 471 break; 472 473 if (handler == NULL) { 474 ret = ENOENT; 475 } else { 476 list_remove(&inject_handlers, handler); 477 spa_inject_delref(handler->zi_spa); 478 kmem_free(handler, sizeof (inject_handler_t)); 479 atomic_add_32(&zio_injection_enabled, -1); 480 ret = 0; 481 } 482 483 rw_exit(&inject_lock); 484 485 return (ret); 486 } 487 488 void 489 zio_inject_init(void) 490 { 491 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL); 492 list_create(&inject_handlers, sizeof (inject_handler_t), 493 offsetof(inject_handler_t, zi_link)); 494 } 495 496 void 497 zio_inject_fini(void) 498 { 499 list_destroy(&inject_handlers); 500 rw_destroy(&inject_lock); 501 } 502