11544Seschrock /*
21544Seschrock * CDDL HEADER START
31544Seschrock *
41544Seschrock * The contents of this file are subject to the terms of the
51544Seschrock * Common Development and Distribution License (the "License").
61544Seschrock * You may not use this file except in compliance with the License.
71544Seschrock *
81544Seschrock * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91544Seschrock * or http://www.opensolaris.org/os/licensing.
101544Seschrock * See the License for the specific language governing permissions
111544Seschrock * and limitations under the License.
121544Seschrock *
131544Seschrock * When distributing Covered Code, include this CDDL HEADER in each
141544Seschrock * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151544Seschrock * If applicable, add the following below this CDDL HEADER, with the
161544Seschrock * fields enclosed by brackets "[]" replaced with your own identifying
171544Seschrock * information: Portions Copyright [yyyy] [name of copyright owner]
181544Seschrock *
191544Seschrock * CDDL HEADER END
201544Seschrock */
211544Seschrock /*
22*12296SLin.Ling@Sun.COM * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
231544Seschrock */
241544Seschrock
251544Seschrock /*
261544Seschrock * Routines to manage the on-disk persistent error log.
271544Seschrock *
281544Seschrock * Each pool stores a log of all logical data errors seen during normal
291544Seschrock * operation. This is actually the union of two distinct logs: the last log,
301544Seschrock * and the current log. All errors seen are logged to the current log. When a
311544Seschrock * scrub completes, the current log becomes the last log, the last log is thrown
321544Seschrock * out, and the current log is reinitialized. This way, if an error is somehow
331544Seschrock * corrected, a new scrub will show that that it no longer exists, and will be
341544Seschrock * deleted from the log when the scrub completes.
351544Seschrock *
361544Seschrock * The log is stored using a ZAP object whose key is a string form of the
371544Seschrock * zbookmark tuple (objset, object, level, blkid), and whose contents is an
381544Seschrock * optional 'objset:object' human-readable string describing the data. When an
391544Seschrock * error is first logged, this string will be empty, indicating that no name is
401544Seschrock * known. This prevents us from having to issue a potentially large amount of
411544Seschrock * I/O to discover the object name during an error path. Instead, we do the
421544Seschrock * calculation when the data is requested, storing the result so future queries
431544Seschrock * will be faster.
441544Seschrock *
451544Seschrock * This log is then shipped into an nvlist where the key is the dataset name and
461544Seschrock * the value is the object name. Userland is then responsible for uniquifying
471544Seschrock * this list and displaying it to the user.
481544Seschrock */
491544Seschrock
501544Seschrock #include <sys/dmu_tx.h>
511544Seschrock #include <sys/spa.h>
521544Seschrock #include <sys/spa_impl.h>
531544Seschrock #include <sys/zap.h>
541544Seschrock #include <sys/zio.h>
551544Seschrock
561544Seschrock
571544Seschrock /*
581544Seschrock * Convert a bookmark to a string.
591544Seschrock */
601544Seschrock static void
bookmark_to_name(zbookmark_t * zb,char * buf,size_t len)611544Seschrock bookmark_to_name(zbookmark_t *zb, char *buf, size_t len)
621544Seschrock {
631544Seschrock (void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
641544Seschrock (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
651544Seschrock (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
661544Seschrock }
671544Seschrock
681544Seschrock /*
691544Seschrock * Convert a string to a bookmark
701544Seschrock */
712856Snd150628 #ifdef _KERNEL
721544Seschrock static void
name_to_bookmark(char * buf,zbookmark_t * zb)731544Seschrock name_to_bookmark(char *buf, zbookmark_t *zb)
741544Seschrock {
751544Seschrock zb->zb_objset = strtonum(buf, &buf);
761544Seschrock ASSERT(*buf == ':');
771544Seschrock zb->zb_object = strtonum(buf + 1, &buf);
781544Seschrock ASSERT(*buf == ':');
791544Seschrock zb->zb_level = (int)strtonum(buf + 1, &buf);
801544Seschrock ASSERT(*buf == ':');
811544Seschrock zb->zb_blkid = strtonum(buf + 1, &buf);
821544Seschrock ASSERT(*buf == '\0');
831544Seschrock }
842856Snd150628 #endif
851544Seschrock
861544Seschrock /*
871544Seschrock * Log an uncorrectable error to the persistent error log. We add it to the
881544Seschrock * spa's list of pending errors. The changes are actually synced out to disk
891544Seschrock * during spa_errlog_sync().
901544Seschrock */
911544Seschrock void
spa_log_error(spa_t * spa,zio_t * zio)921544Seschrock spa_log_error(spa_t *spa, zio_t *zio)
931544Seschrock {
941544Seschrock zbookmark_t *zb = &zio->io_logical->io_bookmark;
951544Seschrock spa_error_entry_t search;
961544Seschrock spa_error_entry_t *new;
971544Seschrock avl_tree_t *tree;
981544Seschrock avl_index_t where;
991544Seschrock
1001544Seschrock /*
1011544Seschrock * If we are trying to import a pool, ignore any errors, as we won't be
1021544Seschrock * writing to the pool any time soon.
1031544Seschrock */
10411147SGeorge.Wilson@Sun.COM if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
1051544Seschrock return;
1061544Seschrock
1071544Seschrock mutex_enter(&spa->spa_errlist_lock);
1081544Seschrock
1091544Seschrock /*
1101544Seschrock * If we have had a request to rotate the log, log it to the next list
1111544Seschrock * instead of the current one.
1121544Seschrock */
1131544Seschrock if (spa->spa_scrub_active || spa->spa_scrub_finished)
1141544Seschrock tree = &spa->spa_errlist_scrub;
1151544Seschrock else
1161544Seschrock tree = &spa->spa_errlist_last;
1171544Seschrock
1181544Seschrock search.se_bookmark = *zb;
1191544Seschrock if (avl_find(tree, &search, &where) != NULL) {
1201544Seschrock mutex_exit(&spa->spa_errlist_lock);
1211544Seschrock return;
1221544Seschrock }
1231544Seschrock
1241544Seschrock new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
1251544Seschrock new->se_bookmark = *zb;
1261544Seschrock avl_insert(tree, new, where);
1271544Seschrock
1281544Seschrock mutex_exit(&spa->spa_errlist_lock);
1291544Seschrock }
1301544Seschrock
1311544Seschrock /*
1321544Seschrock * Return the number of errors currently in the error log. This is actually the
1331544Seschrock * sum of both the last log and the current log, since we don't know the union
1341544Seschrock * of these logs until we reach userland.
1351544Seschrock */
1361544Seschrock uint64_t
spa_get_errlog_size(spa_t * spa)1371544Seschrock spa_get_errlog_size(spa_t *spa)
1381544Seschrock {
1391544Seschrock uint64_t total = 0, count;
1401544Seschrock
1411544Seschrock mutex_enter(&spa->spa_errlog_lock);
1421544Seschrock if (spa->spa_errlog_scrub != 0 &&
1431544Seschrock zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
1441544Seschrock &count) == 0)
1451544Seschrock total += count;
1461544Seschrock
1471544Seschrock if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
1481544Seschrock zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
1491544Seschrock &count) == 0)
1501544Seschrock total += count;
1511544Seschrock mutex_exit(&spa->spa_errlog_lock);
1521544Seschrock
1531544Seschrock mutex_enter(&spa->spa_errlist_lock);
1541544Seschrock total += avl_numnodes(&spa->spa_errlist_last);
1551544Seschrock total += avl_numnodes(&spa->spa_errlist_scrub);
1561544Seschrock mutex_exit(&spa->spa_errlist_lock);
1571544Seschrock
1581544Seschrock return (total);
1591544Seschrock }
1601544Seschrock
1611544Seschrock #ifdef _KERNEL
1621544Seschrock static int
process_error_log(spa_t * spa,uint64_t obj,void * addr,size_t * count)1631544Seschrock process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
1641544Seschrock {
1651544Seschrock zap_cursor_t zc;
1661544Seschrock zap_attribute_t za;
1671544Seschrock zbookmark_t zb;
1681544Seschrock
1691544Seschrock if (obj == 0)
1701544Seschrock return (0);
1711544Seschrock
1721544Seschrock for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
1731544Seschrock zap_cursor_retrieve(&zc, &za) == 0;
1741544Seschrock zap_cursor_advance(&zc)) {
1751544Seschrock
1761544Seschrock if (*count == 0) {
1771544Seschrock zap_cursor_fini(&zc);
1781544Seschrock return (ENOMEM);
1791544Seschrock }
1801544Seschrock
1811544Seschrock name_to_bookmark(za.za_name, &zb);
1821544Seschrock
1831544Seschrock if (copyout(&zb, (char *)addr +
1841544Seschrock (*count - 1) * sizeof (zbookmark_t),
1851544Seschrock sizeof (zbookmark_t)) != 0)
1861544Seschrock return (EFAULT);
1871544Seschrock
1881544Seschrock *count -= 1;
1891544Seschrock }
1901544Seschrock
1911544Seschrock zap_cursor_fini(&zc);
1921544Seschrock
1931544Seschrock return (0);
1941544Seschrock }
1951544Seschrock
1961544Seschrock static int
process_error_list(avl_tree_t * list,void * addr,size_t * count)1971544Seschrock process_error_list(avl_tree_t *list, void *addr, size_t *count)
1981544Seschrock {
1991544Seschrock spa_error_entry_t *se;
2001544Seschrock
2011544Seschrock for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
2021544Seschrock
2031544Seschrock if (*count == 0)
2041544Seschrock return (ENOMEM);
2051544Seschrock
2061544Seschrock if (copyout(&se->se_bookmark, (char *)addr +
2071544Seschrock (*count - 1) * sizeof (zbookmark_t),
2081544Seschrock sizeof (zbookmark_t)) != 0)
2091544Seschrock return (EFAULT);
2101544Seschrock
2111544Seschrock *count -= 1;
2121544Seschrock }
2131544Seschrock
2141544Seschrock return (0);
2151544Seschrock }
2161544Seschrock #endif
2171544Seschrock
2181544Seschrock /*
2191544Seschrock * Copy all known errors to userland as an array of bookmarks. This is
2201544Seschrock * actually a union of the on-disk last log and current log, as well as any
2211544Seschrock * pending error requests.
2221544Seschrock *
2231544Seschrock * Because the act of reading the on-disk log could cause errors to be
2241544Seschrock * generated, we have two separate locks: one for the error log and one for the
2251544Seschrock * in-core error lists. We only need the error list lock to log and error, so
2261544Seschrock * we grab the error log lock while we read the on-disk logs, and only pick up
2271544Seschrock * the error list lock when we are finished.
2281544Seschrock */
2291544Seschrock int
spa_get_errlog(spa_t * spa,void * uaddr,size_t * count)2301544Seschrock spa_get_errlog(spa_t *spa, void *uaddr, size_t *count)
2311544Seschrock {
2321544Seschrock int ret = 0;
2331544Seschrock
2341544Seschrock #ifdef _KERNEL
2351544Seschrock mutex_enter(&spa->spa_errlog_lock);
2361544Seschrock
2371544Seschrock ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
2381544Seschrock
2391544Seschrock if (!ret && !spa->spa_scrub_finished)
2401544Seschrock ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
2411544Seschrock count);
2421544Seschrock
2431544Seschrock mutex_enter(&spa->spa_errlist_lock);
2441544Seschrock if (!ret)
2451544Seschrock ret = process_error_list(&spa->spa_errlist_scrub, uaddr,
2461544Seschrock count);
2471544Seschrock if (!ret)
2481544Seschrock ret = process_error_list(&spa->spa_errlist_last, uaddr,
2491544Seschrock count);
2501544Seschrock mutex_exit(&spa->spa_errlist_lock);
2511544Seschrock
2521544Seschrock mutex_exit(&spa->spa_errlog_lock);
2531544Seschrock #endif
2541544Seschrock
2551544Seschrock return (ret);
2561544Seschrock }
2571544Seschrock
2581544Seschrock /*
2591544Seschrock * Called when a scrub completes. This simply set a bit which tells which AVL
2601544Seschrock * tree to add new errors. spa_errlog_sync() is responsible for actually
2611544Seschrock * syncing the changes to the underlying objects.
2621544Seschrock */
2631544Seschrock void
spa_errlog_rotate(spa_t * spa)2641544Seschrock spa_errlog_rotate(spa_t *spa)
2651544Seschrock {
2661544Seschrock mutex_enter(&spa->spa_errlist_lock);
2671544Seschrock spa->spa_scrub_finished = B_TRUE;
2681544Seschrock mutex_exit(&spa->spa_errlist_lock);
2691544Seschrock }
2701544Seschrock
2711544Seschrock /*
2721544Seschrock * Discard any pending errors from the spa_t. Called when unloading a faulted
2731544Seschrock * pool, as the errors encountered during the open cannot be synced to disk.
2741544Seschrock */
2751544Seschrock void
spa_errlog_drain(spa_t * spa)2761544Seschrock spa_errlog_drain(spa_t *spa)
2771544Seschrock {
2781544Seschrock spa_error_entry_t *se;
2791544Seschrock void *cookie;
2801544Seschrock
2811544Seschrock mutex_enter(&spa->spa_errlist_lock);
2821544Seschrock
2831544Seschrock cookie = NULL;
2841544Seschrock while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
2851544Seschrock &cookie)) != NULL)
2861544Seschrock kmem_free(se, sizeof (spa_error_entry_t));
2871544Seschrock cookie = NULL;
2881544Seschrock while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
2891544Seschrock &cookie)) != NULL)
2901544Seschrock kmem_free(se, sizeof (spa_error_entry_t));
2911544Seschrock
2921544Seschrock mutex_exit(&spa->spa_errlist_lock);
2931544Seschrock }
2941544Seschrock
2951544Seschrock /*
2961544Seschrock * Process a list of errors into the current on-disk log.
2971544Seschrock */
2981544Seschrock static void
sync_error_list(spa_t * spa,avl_tree_t * t,uint64_t * obj,dmu_tx_t * tx)2991544Seschrock sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
3001544Seschrock {
3011544Seschrock spa_error_entry_t *se;
3021544Seschrock char buf[64];
3031544Seschrock void *cookie;
3041544Seschrock
3051544Seschrock if (avl_numnodes(t) != 0) {
3061544Seschrock /* create log if necessary */
3071544Seschrock if (*obj == 0)
3081544Seschrock *obj = zap_create(spa->spa_meta_objset,
3091544Seschrock DMU_OT_ERROR_LOG, DMU_OT_NONE,
3101544Seschrock 0, tx);
3111544Seschrock
3121544Seschrock /* add errors to the current log */
3131544Seschrock for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
3141544Seschrock char *name = se->se_name ? se->se_name : "";
3151544Seschrock
3161544Seschrock bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
3171544Seschrock
3181544Seschrock (void) zap_update(spa->spa_meta_objset,
3191544Seschrock *obj, buf, 1, strlen(name) + 1, name, tx);
3201544Seschrock }
3211544Seschrock
3221544Seschrock /* purge the error list */
3231544Seschrock cookie = NULL;
3241544Seschrock while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
3251544Seschrock kmem_free(se, sizeof (spa_error_entry_t));
3261544Seschrock }
3271544Seschrock }
3281544Seschrock
3291544Seschrock /*
3301544Seschrock * Sync the error log out to disk. This is a little tricky because the act of
3311544Seschrock * writing the error log requires the spa_errlist_lock. So, we need to lock the
3321544Seschrock * error lists, take a copy of the lists, and then reinitialize them. Then, we
3331544Seschrock * drop the error list lock and take the error log lock, at which point we
3341544Seschrock * do the errlog processing. Then, if we encounter an I/O error during this
3351544Seschrock * process, we can successfully add the error to the list. Note that this will
3361544Seschrock * result in the perpetual recycling of errors, but it is an unlikely situation
3371544Seschrock * and not a performance critical operation.
3381544Seschrock */
3391544Seschrock void
spa_errlog_sync(spa_t * spa,uint64_t txg)3401544Seschrock spa_errlog_sync(spa_t *spa, uint64_t txg)
3411544Seschrock {
3421544Seschrock dmu_tx_t *tx;
3431544Seschrock avl_tree_t scrub, last;
3441544Seschrock int scrub_finished;
3451544Seschrock
3461544Seschrock mutex_enter(&spa->spa_errlist_lock);
3471544Seschrock
3481544Seschrock /*
3491544Seschrock * Bail out early under normal circumstances.
3501544Seschrock */
3511544Seschrock if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
3521544Seschrock avl_numnodes(&spa->spa_errlist_last) == 0 &&
3531544Seschrock !spa->spa_scrub_finished) {
3541544Seschrock mutex_exit(&spa->spa_errlist_lock);
3551544Seschrock return;
3561544Seschrock }
3571544Seschrock
3581544Seschrock spa_get_errlists(spa, &last, &scrub);
3591544Seschrock scrub_finished = spa->spa_scrub_finished;
3601544Seschrock spa->spa_scrub_finished = B_FALSE;
3611544Seschrock
3621544Seschrock mutex_exit(&spa->spa_errlist_lock);
3631544Seschrock mutex_enter(&spa->spa_errlog_lock);
3641544Seschrock
3651544Seschrock tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3661544Seschrock
3671544Seschrock /*
3681544Seschrock * Sync out the current list of errors.
3691544Seschrock */
3701544Seschrock sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
3711544Seschrock
3721544Seschrock /*
3731544Seschrock * Rotate the log if necessary.
3741544Seschrock */
3751544Seschrock if (scrub_finished) {
3761544Seschrock if (spa->spa_errlog_last != 0)
3771544Seschrock VERIFY(dmu_object_free(spa->spa_meta_objset,
3781544Seschrock spa->spa_errlog_last, tx) == 0);
3791544Seschrock spa->spa_errlog_last = spa->spa_errlog_scrub;
3801544Seschrock spa->spa_errlog_scrub = 0;
3811544Seschrock
3821544Seschrock sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
3831544Seschrock }
3841544Seschrock
3851544Seschrock /*
3861544Seschrock * Sync out any pending scrub errors.
3871544Seschrock */
3881544Seschrock sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
3891544Seschrock
3901544Seschrock /*
3911544Seschrock * Update the MOS to reflect the new values.
3921544Seschrock */
3931544Seschrock (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3941544Seschrock DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
3951544Seschrock &spa->spa_errlog_last, tx);
3961544Seschrock (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3971544Seschrock DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
3981544Seschrock &spa->spa_errlog_scrub, tx);
3991544Seschrock
4001544Seschrock dmu_tx_commit(tx);
4011544Seschrock
4021544Seschrock mutex_exit(&spa->spa_errlog_lock);
4031544Seschrock }
404