10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52951Selowe * Common Development and Distribution License (the "License").
62951Selowe * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*9105SStephen.Hanson@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * Kernel Error Queues
280Sstevel@tonic-gate *
290Sstevel@tonic-gate * A common problem when handling hardware error traps and interrupts is that
300Sstevel@tonic-gate * these errors frequently must be handled at high interrupt level, where
310Sstevel@tonic-gate * reliably producing error messages and safely examining and manipulating
320Sstevel@tonic-gate * other kernel state may not be possible. The kernel error queue primitive is
330Sstevel@tonic-gate * a common set of routines that allow a subsystem to maintain a queue of
340Sstevel@tonic-gate * errors that can be processed by an explicit call from a safe context or by a
350Sstevel@tonic-gate * soft interrupt that fires at a specific lower interrupt level. The queue
360Sstevel@tonic-gate * management code also ensures that if the system panics, all in-transit
370Sstevel@tonic-gate * errors are logged prior to reset. Each queue has an associated kstat for
380Sstevel@tonic-gate * observing the number of errors dispatched and logged, and mdb(1) debugging
390Sstevel@tonic-gate * support is provided for live and post-mortem observability.
400Sstevel@tonic-gate *
410Sstevel@tonic-gate * Memory Allocation
420Sstevel@tonic-gate *
430Sstevel@tonic-gate * All of the queue data structures are allocated in advance as part of
440Sstevel@tonic-gate * the errorq_create() call. No additional memory allocations are
450Sstevel@tonic-gate * performed as part of errorq_dispatch(), errorq_reserve(),
460Sstevel@tonic-gate * errorq_commit() or errorq_drain(). This design
470Sstevel@tonic-gate * facilitates reliable error queue processing even when the system is low
480Sstevel@tonic-gate * on memory, and ensures that errorq_dispatch() can be called from any
490Sstevel@tonic-gate * context. When the queue is created, the maximum queue length is
50*9105SStephen.Hanson@Sun.COM * specified as a parameter to errorq_create() and errorq_nvcreate(). This
510Sstevel@tonic-gate * length should represent a reasonable upper bound on the number of
520Sstevel@tonic-gate * simultaneous errors. If errorq_dispatch() or errorq_reserve() is
530Sstevel@tonic-gate * invoked and no free queue elements are available, the error is
540Sstevel@tonic-gate * dropped and will not be logged. Typically, the queue will only be
550Sstevel@tonic-gate * exhausted by an error storm, and in this case
560Sstevel@tonic-gate * the earlier errors provide the most important data for analysis.
570Sstevel@tonic-gate * When a new error is dispatched, the error data is copied into the
580Sstevel@tonic-gate * preallocated queue element so that the caller's buffer can be reused.
590Sstevel@tonic-gate *
60*9105SStephen.Hanson@Sun.COM * When a new error is reserved, an element is moved from the free pool
610Sstevel@tonic-gate * and returned to the caller. The element buffer data, eqe_data, may be
620Sstevel@tonic-gate * managed by the caller and dispatched to the errorq by calling
630Sstevel@tonic-gate * errorq_commit(). This is useful for additions to errorq's
640Sstevel@tonic-gate * created with errorq_nvcreate() to handle name-value pair (nvpair) data.
650Sstevel@tonic-gate * See below for a discussion on nvlist errorq's.
660Sstevel@tonic-gate *
670Sstevel@tonic-gate * Queue Drain Callback
680Sstevel@tonic-gate *
690Sstevel@tonic-gate * When the error queue is drained, the caller's queue drain callback is
700Sstevel@tonic-gate * invoked with a pointer to the saved error data. This function may be
710Sstevel@tonic-gate * called from passive kernel context or soft interrupt context at or
720Sstevel@tonic-gate * below LOCK_LEVEL, or as part of panic(). As such, the callback should
730Sstevel@tonic-gate * basically only be calling cmn_err (but NOT with the CE_PANIC flag).
740Sstevel@tonic-gate * The callback must not call panic(), attempt to allocate memory, or wait
750Sstevel@tonic-gate * on a condition variable. The callback may not call errorq_destroy()
760Sstevel@tonic-gate * or errorq_drain() on the same error queue that called it.
770Sstevel@tonic-gate *
780Sstevel@tonic-gate * The queue drain callback will always be called for each pending error
790Sstevel@tonic-gate * in the order in which errors were enqueued (oldest to newest). The
800Sstevel@tonic-gate * queue drain callback is guaranteed to provide at *least* once semantics
810Sstevel@tonic-gate * for all errors that are successfully dispatched (i.e. for which
820Sstevel@tonic-gate * errorq_dispatch() has successfully completed). If an unrelated panic
830Sstevel@tonic-gate * occurs while the queue drain callback is running on a vital queue, the
840Sstevel@tonic-gate * panic subsystem will continue the queue drain and the callback may be
850Sstevel@tonic-gate * invoked again for the same error. Therefore, the callback should
860Sstevel@tonic-gate * restrict itself to logging messages and taking other actions that are
870Sstevel@tonic-gate * not destructive if repeated.
880Sstevel@tonic-gate *
890Sstevel@tonic-gate * Name-Value Pair Error Queues
900Sstevel@tonic-gate *
910Sstevel@tonic-gate * During error handling, it may be more convenient to store error
920Sstevel@tonic-gate * queue element data as a fixed buffer of name-value pairs. The
93*9105SStephen.Hanson@Sun.COM * nvpair library allows construction and destruction of nvlists
940Sstevel@tonic-gate * in pre-allocated memory buffers.
950Sstevel@tonic-gate *
960Sstevel@tonic-gate * Error queues created via errorq_nvcreate() store queue element
970Sstevel@tonic-gate * data as fixed buffer nvlists (ereports). errorq_reserve()
98*9105SStephen.Hanson@Sun.COM * allocates an errorq element from eqp->eq_bitmap and returns a valid
990Sstevel@tonic-gate * pointer to a errorq_elem_t (queue element) and a pre-allocated
1000Sstevel@tonic-gate * fixed buffer nvlist. errorq_elem_nvl() is used to gain access
1010Sstevel@tonic-gate * to the nvlist to add name-value ereport members prior to
1020Sstevel@tonic-gate * dispatching the error queue element in errorq_commit().
1030Sstevel@tonic-gate *
1040Sstevel@tonic-gate * Once dispatched, the drain function will return the element to
105*9105SStephen.Hanson@Sun.COM * eqp->eq_bitmap and reset the associated nv_alloc structure.
1060Sstevel@tonic-gate * error_cancel() may be called to cancel an element reservation
1070Sstevel@tonic-gate * element that was never dispatched (committed). This is useful in
1080Sstevel@tonic-gate * cases where a programming error prevents a queue element from being
1090Sstevel@tonic-gate * dispatched.
1100Sstevel@tonic-gate *
1110Sstevel@tonic-gate * Queue Management
1120Sstevel@tonic-gate *
1130Sstevel@tonic-gate * The queue element structures and error data buffers are allocated in
1140Sstevel@tonic-gate * two contiguous chunks as part of errorq_create() or errorq_nvcreate().
1150Sstevel@tonic-gate * Each queue element structure contains a next pointer,
1160Sstevel@tonic-gate * a previous pointer, and a pointer to the corresponding error data
1170Sstevel@tonic-gate * buffer. The data buffer for a nvlist errorq is a shared buffer
1180Sstevel@tonic-gate * for the allocation of name-value pair lists. The elements are kept on
119*9105SStephen.Hanson@Sun.COM * one of four lists:
1200Sstevel@tonic-gate *
121*9105SStephen.Hanson@Sun.COM * Unused elements are kept in the free pool, managed by eqp->eq_bitmap.
122*9105SStephen.Hanson@Sun.COM * The eqe_prev and eqe_next pointers are not used while in the free pool
123*9105SStephen.Hanson@Sun.COM * and will be set to NULL.
1240Sstevel@tonic-gate *
1250Sstevel@tonic-gate * Pending errors are kept on the pending list, a singly-linked list
1260Sstevel@tonic-gate * pointed to by eqp->eq_pend, and linked together using eqe_prev. This
1270Sstevel@tonic-gate * list is maintained in order from newest error to oldest. The eqe_next
1280Sstevel@tonic-gate * pointer is not used by the pending list and will be set to NULL.
1290Sstevel@tonic-gate *
1300Sstevel@tonic-gate * The processing list is a doubly-linked list pointed to by eqp->eq_phead
1310Sstevel@tonic-gate * (the oldest element) and eqp->eq_ptail (the newest element). The
1320Sstevel@tonic-gate * eqe_next pointer is used to traverse from eq_phead to eq_ptail, and the
1330Sstevel@tonic-gate * eqe_prev pointer is used to traverse from eq_ptail to eq_phead. Once a
1340Sstevel@tonic-gate * queue drain operation begins, the current pending list is moved to the
135*9105SStephen.Hanson@Sun.COM * processing list in a two-phase commit fashion (eq_ptail being cleared
136*9105SStephen.Hanson@Sun.COM * at the beginning but eq_phead only at the end), allowing the panic code
1370Sstevel@tonic-gate * to always locate and process all pending errors in the event that a
1380Sstevel@tonic-gate * panic occurs in the middle of queue processing.
1390Sstevel@tonic-gate *
1400Sstevel@tonic-gate * A fourth list is maintained for nvlist errorqs. The dump list,
1410Sstevel@tonic-gate * eq_dump is used to link all errorq elements that should be stored
1420Sstevel@tonic-gate * in a crash dump file in the event of a system panic. During
1430Sstevel@tonic-gate * errorq_panic(), the list is created and subsequently traversed
1440Sstevel@tonic-gate * in errorq_dump() during the final phases of a crash dump.
1450Sstevel@tonic-gate *
1460Sstevel@tonic-gate * Platform Considerations
1470Sstevel@tonic-gate *
1480Sstevel@tonic-gate * In order to simplify their implementation, error queues make use of the
1490Sstevel@tonic-gate * C wrappers for compare-and-swap. If the platform itself does not
1500Sstevel@tonic-gate * support compare-and-swap in hardware and the kernel emulation routines
1510Sstevel@tonic-gate * are used instead, then the context in which errorq_dispatch() can be
1520Sstevel@tonic-gate * safely invoked is further constrained by the implementation of the
1530Sstevel@tonic-gate * compare-and-swap emulation. Specifically, if errorq_dispatch() is
1540Sstevel@tonic-gate * called from a code path that can be executed above ATOMIC_LEVEL on such
1550Sstevel@tonic-gate * a platform, the dispatch code could potentially deadlock unless the
1560Sstevel@tonic-gate * corresponding error interrupt is blocked or disabled prior to calling
1570Sstevel@tonic-gate * errorq_dispatch(). Error queues should therefore be deployed with
1580Sstevel@tonic-gate * caution on these platforms.
1590Sstevel@tonic-gate *
1600Sstevel@tonic-gate * Interfaces
1610Sstevel@tonic-gate *
1620Sstevel@tonic-gate * errorq_t *errorq_create(name, func, private, qlen, eltsize, ipl, flags);
1630Sstevel@tonic-gate * errorq_t *errorq_nvcreate(name, func, private, qlen, eltsize, ipl, flags);
1640Sstevel@tonic-gate *
1650Sstevel@tonic-gate * Create a new error queue with the specified name, callback, and
1660Sstevel@tonic-gate * properties. A pointer to the new error queue is returned upon success,
1670Sstevel@tonic-gate * or NULL is returned to indicate that the queue could not be created.
1680Sstevel@tonic-gate * This function must be called from passive kernel context with no locks
1690Sstevel@tonic-gate * held that can prevent a sleeping memory allocation from occurring.
1700Sstevel@tonic-gate * errorq_create() will return failure if the queue kstats cannot be
1710Sstevel@tonic-gate * created, or if a soft interrupt handler cannot be registered.
1720Sstevel@tonic-gate *
1730Sstevel@tonic-gate * The queue 'name' is a string that is recorded for live and post-mortem
1740Sstevel@tonic-gate * examination by a debugger. The queue callback 'func' will be invoked
1750Sstevel@tonic-gate * for each error drained from the queue, and will receive the 'private'
1760Sstevel@tonic-gate * pointer as its first argument. The callback must obey the rules for
1770Sstevel@tonic-gate * callbacks described above. The queue will have maximum length 'qlen'
1780Sstevel@tonic-gate * and each element will be able to record up to 'eltsize' bytes of data.
1790Sstevel@tonic-gate * The queue's soft interrupt (see errorq_dispatch(), below) will fire
1800Sstevel@tonic-gate * at 'ipl', which should not exceed LOCK_LEVEL. The queue 'flags' may
1810Sstevel@tonic-gate * include the following flag:
1820Sstevel@tonic-gate *
1830Sstevel@tonic-gate * ERRORQ_VITAL - This queue contains information that is considered
1840Sstevel@tonic-gate * vital to problem diagnosis. Error queues that are marked vital will
1850Sstevel@tonic-gate * be automatically drained by the panic subsystem prior to printing
1860Sstevel@tonic-gate * the panic messages to the console.
1870Sstevel@tonic-gate *
1880Sstevel@tonic-gate * void errorq_destroy(errorq);
1890Sstevel@tonic-gate *
1900Sstevel@tonic-gate * Destroy the specified error queue. The queue is drained of any
1910Sstevel@tonic-gate * pending elements and these are logged before errorq_destroy returns.
1920Sstevel@tonic-gate * Once errorq_destroy() begins draining the queue, any simultaneous
1930Sstevel@tonic-gate * calls to dispatch errors will result in the errors being dropped.
1940Sstevel@tonic-gate * The caller must invoke a higher-level abstraction (e.g. disabling
1950Sstevel@tonic-gate * an error interrupt) to ensure that error handling code does not
1960Sstevel@tonic-gate * attempt to dispatch errors to the queue while it is being freed.
1970Sstevel@tonic-gate *
1980Sstevel@tonic-gate * void errorq_dispatch(errorq, data, len, flag);
1990Sstevel@tonic-gate *
2000Sstevel@tonic-gate * Attempt to enqueue the specified error data. If a free queue element
2010Sstevel@tonic-gate * is available, the data is copied into a free element and placed on a
2020Sstevel@tonic-gate * pending list. If no free queue element is available, the error is
2030Sstevel@tonic-gate * dropped. The data length (len) is specified in bytes and should not
2040Sstevel@tonic-gate * exceed the queue's maximum element size. If the data length is less
2050Sstevel@tonic-gate * than the maximum element size, the remainder of the queue element is
2060Sstevel@tonic-gate * filled with zeroes. The flag parameter should be one of:
2070Sstevel@tonic-gate *
2080Sstevel@tonic-gate * ERRORQ_ASYNC - Schedule a soft interrupt at the previously specified
2090Sstevel@tonic-gate * IPL to asynchronously drain the queue on behalf of the caller.
2100Sstevel@tonic-gate *
2110Sstevel@tonic-gate * ERRORQ_SYNC - Do not schedule a soft interrupt to drain the queue.
2120Sstevel@tonic-gate * The caller is presumed to be calling errorq_drain() or panic() in
2130Sstevel@tonic-gate * the near future in order to drain the queue and log the error.
2140Sstevel@tonic-gate *
2150Sstevel@tonic-gate * The errorq_dispatch() function may be called from any context, subject
2160Sstevel@tonic-gate * to the Platform Considerations described above.
2170Sstevel@tonic-gate *
2180Sstevel@tonic-gate * void errorq_drain(errorq);
2190Sstevel@tonic-gate *
2200Sstevel@tonic-gate * Drain the error queue of all pending errors. The queue's callback
2210Sstevel@tonic-gate * function is invoked for each error in order from oldest to newest.
2220Sstevel@tonic-gate * This function may be used at or below LOCK_LEVEL or from panic context.
2230Sstevel@tonic-gate *
2240Sstevel@tonic-gate * errorq_elem_t *errorq_reserve(errorq);
2250Sstevel@tonic-gate *
2260Sstevel@tonic-gate * Reserve an error queue element for later processing and dispatching.
2270Sstevel@tonic-gate * The element is returned to the caller who may add error-specific data
228*9105SStephen.Hanson@Sun.COM * to element. The element is retured to the free pool when either
2290Sstevel@tonic-gate * errorq_commit() is called and the element asynchronously processed
2300Sstevel@tonic-gate * or immediately when errorq_cancel() is called.
2310Sstevel@tonic-gate *
2320Sstevel@tonic-gate * void errorq_commit(errorq, errorq_elem, flag);
2330Sstevel@tonic-gate *
2340Sstevel@tonic-gate * Commit an errorq element (eqep) for dispatching, see
2350Sstevel@tonic-gate * errorq_dispatch().
2360Sstevel@tonic-gate *
2370Sstevel@tonic-gate * void errorq_cancel(errorq, errorq_elem);
2380Sstevel@tonic-gate *
2390Sstevel@tonic-gate * Cancel a pending errorq element reservation. The errorq element is
240*9105SStephen.Hanson@Sun.COM * returned to the free pool upon cancelation.
2410Sstevel@tonic-gate */
2420Sstevel@tonic-gate
2430Sstevel@tonic-gate #include <sys/errorq_impl.h>
2440Sstevel@tonic-gate #include <sys/sysmacros.h>
2450Sstevel@tonic-gate #include <sys/machlock.h>
2460Sstevel@tonic-gate #include <sys/cmn_err.h>
2470Sstevel@tonic-gate #include <sys/atomic.h>
2480Sstevel@tonic-gate #include <sys/systm.h>
2490Sstevel@tonic-gate #include <sys/kmem.h>
2500Sstevel@tonic-gate #include <sys/conf.h>
2510Sstevel@tonic-gate #include <sys/ddi.h>
2520Sstevel@tonic-gate #include <sys/sunddi.h>
2530Sstevel@tonic-gate #include <sys/bootconf.h>
2540Sstevel@tonic-gate #include <sys/spl.h>
2550Sstevel@tonic-gate #include <sys/dumphdr.h>
2560Sstevel@tonic-gate #include <sys/compress.h>
2570Sstevel@tonic-gate #include <sys/time.h>
2580Sstevel@tonic-gate #include <sys/panic.h>
259*9105SStephen.Hanson@Sun.COM #include <sys/bitmap.h>
2600Sstevel@tonic-gate #include <sys/fm/protocol.h>
2610Sstevel@tonic-gate #include <sys/fm/util.h>
2620Sstevel@tonic-gate
2630Sstevel@tonic-gate static struct errorq_kstat errorq_kstat_template = {
2640Sstevel@tonic-gate { "dispatched", KSTAT_DATA_UINT64 },
2650Sstevel@tonic-gate { "dropped", KSTAT_DATA_UINT64 },
2660Sstevel@tonic-gate { "logged", KSTAT_DATA_UINT64 },
2670Sstevel@tonic-gate { "reserved", KSTAT_DATA_UINT64 },
2680Sstevel@tonic-gate { "reserve_fail", KSTAT_DATA_UINT64 },
2690Sstevel@tonic-gate { "committed", KSTAT_DATA_UINT64 },
2700Sstevel@tonic-gate { "commit_fail", KSTAT_DATA_UINT64 },
2710Sstevel@tonic-gate { "cancelled", KSTAT_DATA_UINT64 }
2720Sstevel@tonic-gate };
2730Sstevel@tonic-gate
2740Sstevel@tonic-gate static uint64_t errorq_lost = 0;
2750Sstevel@tonic-gate static errorq_t *errorq_list = NULL;
2760Sstevel@tonic-gate static kmutex_t errorq_lock;
2770Sstevel@tonic-gate static uint64_t errorq_vitalmin = 5;
2780Sstevel@tonic-gate
2790Sstevel@tonic-gate static uint_t
errorq_intr(caddr_t eqp)2800Sstevel@tonic-gate errorq_intr(caddr_t eqp)
2810Sstevel@tonic-gate {
2820Sstevel@tonic-gate errorq_drain((errorq_t *)eqp);
2830Sstevel@tonic-gate return (DDI_INTR_CLAIMED);
2840Sstevel@tonic-gate }
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate /*
2870Sstevel@tonic-gate * Create a new error queue with the specified properties and add a software
2880Sstevel@tonic-gate * interrupt handler and kstat for it. This function must be called from
2890Sstevel@tonic-gate * passive kernel context with no locks held that can prevent a sleeping
2900Sstevel@tonic-gate * memory allocation from occurring. This function will return NULL if the
2910Sstevel@tonic-gate * softint or kstat for this queue cannot be created.
2920Sstevel@tonic-gate */
2930Sstevel@tonic-gate errorq_t *
errorq_create(const char * name,errorq_func_t func,void * private,ulong_t qlen,size_t size,uint_t ipl,uint_t flags)2940Sstevel@tonic-gate errorq_create(const char *name, errorq_func_t func, void *private,
2950Sstevel@tonic-gate ulong_t qlen, size_t size, uint_t ipl, uint_t flags)
2960Sstevel@tonic-gate {
2970Sstevel@tonic-gate errorq_t *eqp = kmem_alloc(sizeof (errorq_t), KM_SLEEP);
2980Sstevel@tonic-gate ddi_iblock_cookie_t ibc = (ddi_iblock_cookie_t)(uintptr_t)ipltospl(ipl);
2990Sstevel@tonic-gate dev_info_t *dip = ddi_root_node();
3000Sstevel@tonic-gate
3010Sstevel@tonic-gate errorq_elem_t *eep;
3020Sstevel@tonic-gate ddi_softintr_t id = NULL;
3030Sstevel@tonic-gate caddr_t data;
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate ASSERT(qlen != 0 && size != 0);
3060Sstevel@tonic-gate ASSERT(ipl > 0 && ipl <= LOCK_LEVEL);
3070Sstevel@tonic-gate
3080Sstevel@tonic-gate /*
3090Sstevel@tonic-gate * If a queue is created very early in boot before device tree services
3100Sstevel@tonic-gate * are available, the queue softint handler cannot be created. We
3110Sstevel@tonic-gate * manually drain these queues and create their softint handlers when
3120Sstevel@tonic-gate * it is safe to do so as part of errorq_init(), below.
3130Sstevel@tonic-gate */
3140Sstevel@tonic-gate if (modrootloaded && ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id,
3150Sstevel@tonic-gate &ibc, NULL, errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
3160Sstevel@tonic-gate cmn_err(CE_WARN, "errorq_create: failed to register "
3170Sstevel@tonic-gate "IPL %u softint for queue %s", ipl, name);
3180Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t));
3190Sstevel@tonic-gate return (NULL);
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate
3222951Selowe if ((eqp->eq_ksp = kstat_create("unix", 0, name, "errorq",
3230Sstevel@tonic-gate KSTAT_TYPE_NAMED, sizeof (struct errorq_kstat) /
3240Sstevel@tonic-gate sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) == NULL) {
3250Sstevel@tonic-gate cmn_err(CE_WARN, "errorq_create: failed to create kstat "
3260Sstevel@tonic-gate "for queue %s", name);
3270Sstevel@tonic-gate if (id != NULL)
3280Sstevel@tonic-gate ddi_remove_softintr(id);
3290Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t));
3300Sstevel@tonic-gate return (NULL);
3310Sstevel@tonic-gate }
3320Sstevel@tonic-gate
3330Sstevel@tonic-gate bcopy(&errorq_kstat_template, &eqp->eq_kstat,
3340Sstevel@tonic-gate sizeof (struct errorq_kstat));
3350Sstevel@tonic-gate eqp->eq_ksp->ks_data = &eqp->eq_kstat;
3360Sstevel@tonic-gate eqp->eq_ksp->ks_private = eqp;
3370Sstevel@tonic-gate kstat_install(eqp->eq_ksp);
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate (void) strncpy(eqp->eq_name, name, ERRORQ_NAMELEN);
3400Sstevel@tonic-gate eqp->eq_name[ERRORQ_NAMELEN] = '\0';
3410Sstevel@tonic-gate eqp->eq_func = func;
3420Sstevel@tonic-gate eqp->eq_private = private;
3430Sstevel@tonic-gate eqp->eq_data = kmem_alloc(qlen * size, KM_SLEEP);
3440Sstevel@tonic-gate eqp->eq_qlen = qlen;
3450Sstevel@tonic-gate eqp->eq_size = size;
3460Sstevel@tonic-gate eqp->eq_ipl = ipl;
3470Sstevel@tonic-gate eqp->eq_flags = flags | ERRORQ_ACTIVE;
3480Sstevel@tonic-gate eqp->eq_id = id;
3490Sstevel@tonic-gate mutex_init(&eqp->eq_lock, NULL, MUTEX_DEFAULT, NULL);
3500Sstevel@tonic-gate eqp->eq_elems = kmem_alloc(qlen * sizeof (errorq_elem_t), KM_SLEEP);
3510Sstevel@tonic-gate eqp->eq_phead = NULL;
3520Sstevel@tonic-gate eqp->eq_ptail = NULL;
3530Sstevel@tonic-gate eqp->eq_pend = NULL;
3540Sstevel@tonic-gate eqp->eq_dump = NULL;
355*9105SStephen.Hanson@Sun.COM eqp->eq_bitmap = kmem_zalloc(BT_SIZEOFMAP(qlen), KM_SLEEP);
356*9105SStephen.Hanson@Sun.COM eqp->eq_rotor = 0;
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate /*
359*9105SStephen.Hanson@Sun.COM * Iterate over the array of errorq_elem_t structures and set its
360*9105SStephen.Hanson@Sun.COM * data pointer.
3610Sstevel@tonic-gate */
362*9105SStephen.Hanson@Sun.COM for (eep = eqp->eq_elems, data = eqp->eq_data; qlen > 1; qlen--) {
3630Sstevel@tonic-gate eep->eqe_next = NULL;
3640Sstevel@tonic-gate eep->eqe_dump = NULL;
365*9105SStephen.Hanson@Sun.COM eep->eqe_prev = NULL;
3660Sstevel@tonic-gate eep->eqe_data = data;
3670Sstevel@tonic-gate data += size;
3680Sstevel@tonic-gate eep++;
3690Sstevel@tonic-gate }
3700Sstevel@tonic-gate eep->eqe_next = NULL;
3710Sstevel@tonic-gate eep->eqe_prev = NULL;
3720Sstevel@tonic-gate eep->eqe_data = data;
3730Sstevel@tonic-gate eep->eqe_dump = NULL;
3740Sstevel@tonic-gate
3750Sstevel@tonic-gate /*
3760Sstevel@tonic-gate * Once the errorq is initialized, add it to the global list of queues,
3770Sstevel@tonic-gate * and then return a pointer to the new queue to the caller.
3780Sstevel@tonic-gate */
3790Sstevel@tonic-gate mutex_enter(&errorq_lock);
3800Sstevel@tonic-gate eqp->eq_next = errorq_list;
3810Sstevel@tonic-gate errorq_list = eqp;
3820Sstevel@tonic-gate mutex_exit(&errorq_lock);
3830Sstevel@tonic-gate
3840Sstevel@tonic-gate return (eqp);
3850Sstevel@tonic-gate }
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate /*
3880Sstevel@tonic-gate * Create a new errorq as if by errorq_create(), but set the ERRORQ_NVLIST
3890Sstevel@tonic-gate * flag and initialize each element to have the start of its data region used
3900Sstevel@tonic-gate * as an errorq_nvelem_t with a nvlist allocator that consumes the data region.
3910Sstevel@tonic-gate */
3920Sstevel@tonic-gate errorq_t *
errorq_nvcreate(const char * name,errorq_func_t func,void * private,ulong_t qlen,size_t size,uint_t ipl,uint_t flags)3930Sstevel@tonic-gate errorq_nvcreate(const char *name, errorq_func_t func, void *private,
3940Sstevel@tonic-gate ulong_t qlen, size_t size, uint_t ipl, uint_t flags)
3950Sstevel@tonic-gate {
3960Sstevel@tonic-gate errorq_t *eqp;
3970Sstevel@tonic-gate errorq_elem_t *eep;
3980Sstevel@tonic-gate
3990Sstevel@tonic-gate eqp = errorq_create(name, func, private, qlen,
4000Sstevel@tonic-gate size + sizeof (errorq_nvelem_t), ipl, flags | ERRORQ_NVLIST);
4010Sstevel@tonic-gate
4020Sstevel@tonic-gate if (eqp == NULL)
4030Sstevel@tonic-gate return (NULL);
4040Sstevel@tonic-gate
4050Sstevel@tonic-gate mutex_enter(&eqp->eq_lock);
4060Sstevel@tonic-gate
4070Sstevel@tonic-gate for (eep = eqp->eq_elems; qlen != 0; eep++, qlen--) {
4080Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data;
4090Sstevel@tonic-gate eqnp->eqn_buf = (char *)eqnp + sizeof (errorq_nvelem_t);
4100Sstevel@tonic-gate eqnp->eqn_nva = fm_nva_xcreate(eqnp->eqn_buf, size);
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate mutex_exit(&eqp->eq_lock);
4140Sstevel@tonic-gate return (eqp);
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate
4170Sstevel@tonic-gate /*
4180Sstevel@tonic-gate * To destroy an error queue, we mark it as disabled and then explicitly drain
4190Sstevel@tonic-gate * all pending errors. Once the drain is complete, we can remove the queue
4200Sstevel@tonic-gate * from the global list of queues examined by errorq_panic(), and then free
4210Sstevel@tonic-gate * the various queue data structures. The caller must use some higher-level
4220Sstevel@tonic-gate * abstraction (e.g. disabling an error interrupt) to ensure that no one will
4230Sstevel@tonic-gate * attempt to enqueue new errors while we are freeing this queue.
4240Sstevel@tonic-gate */
4250Sstevel@tonic-gate void
errorq_destroy(errorq_t * eqp)4260Sstevel@tonic-gate errorq_destroy(errorq_t *eqp)
4270Sstevel@tonic-gate {
4280Sstevel@tonic-gate errorq_t *p, **pp;
4290Sstevel@tonic-gate errorq_elem_t *eep;
4300Sstevel@tonic-gate ulong_t i;
4310Sstevel@tonic-gate
4320Sstevel@tonic-gate ASSERT(eqp != NULL);
4330Sstevel@tonic-gate eqp->eq_flags &= ~ERRORQ_ACTIVE;
4340Sstevel@tonic-gate errorq_drain(eqp);
4350Sstevel@tonic-gate
4360Sstevel@tonic-gate mutex_enter(&errorq_lock);
4370Sstevel@tonic-gate pp = &errorq_list;
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate for (p = errorq_list; p != NULL; p = p->eq_next) {
4400Sstevel@tonic-gate if (p == eqp) {
4410Sstevel@tonic-gate *pp = p->eq_next;
4420Sstevel@tonic-gate break;
4430Sstevel@tonic-gate }
4440Sstevel@tonic-gate pp = &p->eq_next;
4450Sstevel@tonic-gate }
4460Sstevel@tonic-gate
4470Sstevel@tonic-gate mutex_exit(&errorq_lock);
4480Sstevel@tonic-gate ASSERT(p != NULL);
4490Sstevel@tonic-gate
4500Sstevel@tonic-gate if (eqp->eq_flags & ERRORQ_NVLIST) {
4510Sstevel@tonic-gate for (eep = eqp->eq_elems, i = 0; i < eqp->eq_qlen; i++, eep++) {
4520Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data;
4530Sstevel@tonic-gate fm_nva_xdestroy(eqnp->eqn_nva);
4540Sstevel@tonic-gate }
4550Sstevel@tonic-gate }
4560Sstevel@tonic-gate
4570Sstevel@tonic-gate mutex_destroy(&eqp->eq_lock);
4580Sstevel@tonic-gate kstat_delete(eqp->eq_ksp);
4590Sstevel@tonic-gate
4600Sstevel@tonic-gate if (eqp->eq_id != NULL)
4610Sstevel@tonic-gate ddi_remove_softintr(eqp->eq_id);
4620Sstevel@tonic-gate
4630Sstevel@tonic-gate kmem_free(eqp->eq_elems, eqp->eq_qlen * sizeof (errorq_elem_t));
464*9105SStephen.Hanson@Sun.COM kmem_free(eqp->eq_bitmap, BT_SIZEOFMAP(eqp->eq_qlen));
4650Sstevel@tonic-gate kmem_free(eqp->eq_data, eqp->eq_qlen * eqp->eq_size);
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t));
4680Sstevel@tonic-gate }
4690Sstevel@tonic-gate
4700Sstevel@tonic-gate /*
471*9105SStephen.Hanson@Sun.COM * private version of bt_availbit which makes a best-efforts attempt
472*9105SStephen.Hanson@Sun.COM * at allocating in a round-robin fashion in order to facilitate post-mortem
473*9105SStephen.Hanson@Sun.COM * diagnosis.
474*9105SStephen.Hanson@Sun.COM */
475*9105SStephen.Hanson@Sun.COM static index_t
errorq_availbit(ulong_t * bitmap,size_t nbits,index_t curindex)476*9105SStephen.Hanson@Sun.COM errorq_availbit(ulong_t *bitmap, size_t nbits, index_t curindex)
477*9105SStephen.Hanson@Sun.COM {
478*9105SStephen.Hanson@Sun.COM ulong_t bit, maxbit, bx;
479*9105SStephen.Hanson@Sun.COM index_t rval, nextindex = curindex + 1;
480*9105SStephen.Hanson@Sun.COM index_t nextword = nextindex >> BT_ULSHIFT;
481*9105SStephen.Hanson@Sun.COM ulong_t nextbitindex = nextindex & BT_ULMASK;
482*9105SStephen.Hanson@Sun.COM index_t maxindex = nbits - 1;
483*9105SStephen.Hanson@Sun.COM index_t maxword = maxindex >> BT_ULSHIFT;
484*9105SStephen.Hanson@Sun.COM ulong_t maxbitindex = maxindex & BT_ULMASK;
485*9105SStephen.Hanson@Sun.COM
486*9105SStephen.Hanson@Sun.COM /*
487*9105SStephen.Hanson@Sun.COM * First check if there are still some bits remaining in the current
488*9105SStephen.Hanson@Sun.COM * word, and see if any of those are available. We need to do this by
489*9105SStephen.Hanson@Sun.COM * hand as the bt_availbit() function always starts at the beginning
490*9105SStephen.Hanson@Sun.COM * of a word.
491*9105SStephen.Hanson@Sun.COM */
492*9105SStephen.Hanson@Sun.COM if (nextindex <= maxindex && nextbitindex != 0) {
493*9105SStephen.Hanson@Sun.COM maxbit = (nextword == maxword) ? maxbitindex : BT_ULMASK;
494*9105SStephen.Hanson@Sun.COM for (bx = 0, bit = 1; bx <= maxbit; bx++, bit <<= 1)
495*9105SStephen.Hanson@Sun.COM if (bx >= nextbitindex && !(bitmap[nextword] & bit))
496*9105SStephen.Hanson@Sun.COM return ((nextword << BT_ULSHIFT) + bx);
497*9105SStephen.Hanson@Sun.COM nextword++;
498*9105SStephen.Hanson@Sun.COM }
499*9105SStephen.Hanson@Sun.COM /*
500*9105SStephen.Hanson@Sun.COM * Now check if there are any words remaining before the end of the
501*9105SStephen.Hanson@Sun.COM * bitmap. Use bt_availbit() to find any free bits.
502*9105SStephen.Hanson@Sun.COM */
503*9105SStephen.Hanson@Sun.COM if (nextword <= maxword)
504*9105SStephen.Hanson@Sun.COM if ((rval = bt_availbit(&bitmap[nextword],
505*9105SStephen.Hanson@Sun.COM nbits - (nextword << BT_ULSHIFT))) != -1)
506*9105SStephen.Hanson@Sun.COM return ((nextword << BT_ULSHIFT) + rval);
507*9105SStephen.Hanson@Sun.COM /*
508*9105SStephen.Hanson@Sun.COM * Finally loop back to the start and look for any free bits starting
509*9105SStephen.Hanson@Sun.COM * from the beginning of the bitmap to the current rotor position.
510*9105SStephen.Hanson@Sun.COM */
511*9105SStephen.Hanson@Sun.COM return (bt_availbit(bitmap, nextindex));
512*9105SStephen.Hanson@Sun.COM }
513*9105SStephen.Hanson@Sun.COM
514*9105SStephen.Hanson@Sun.COM /*
5150Sstevel@tonic-gate * Dispatch a new error into the queue for later processing. The specified
5160Sstevel@tonic-gate * data buffer is copied into a preallocated queue element. If 'len' is
5170Sstevel@tonic-gate * smaller than the queue element size, the remainder of the queue element is
5180Sstevel@tonic-gate * filled with zeroes. This function may be called from any context subject
5190Sstevel@tonic-gate * to the Platform Considerations described above.
5200Sstevel@tonic-gate */
5210Sstevel@tonic-gate void
errorq_dispatch(errorq_t * eqp,const void * data,size_t len,uint_t flag)5220Sstevel@tonic-gate errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
5230Sstevel@tonic-gate {
5240Sstevel@tonic-gate errorq_elem_t *eep, *old;
5250Sstevel@tonic-gate
5260Sstevel@tonic-gate if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
5270Sstevel@tonic-gate atomic_add_64(&errorq_lost, 1);
5280Sstevel@tonic-gate return; /* drop error if queue is uninitialized or disabled */
5290Sstevel@tonic-gate }
5300Sstevel@tonic-gate
531*9105SStephen.Hanson@Sun.COM for (;;) {
532*9105SStephen.Hanson@Sun.COM int i, rval;
533*9105SStephen.Hanson@Sun.COM
534*9105SStephen.Hanson@Sun.COM if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
535*9105SStephen.Hanson@Sun.COM eqp->eq_rotor)) == -1) {
536*9105SStephen.Hanson@Sun.COM atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
537*9105SStephen.Hanson@Sun.COM return;
538*9105SStephen.Hanson@Sun.COM }
539*9105SStephen.Hanson@Sun.COM BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
540*9105SStephen.Hanson@Sun.COM if (rval == 0) {
541*9105SStephen.Hanson@Sun.COM eqp->eq_rotor = i;
542*9105SStephen.Hanson@Sun.COM eep = &eqp->eq_elems[i];
5430Sstevel@tonic-gate break;
544*9105SStephen.Hanson@Sun.COM }
5450Sstevel@tonic-gate }
5460Sstevel@tonic-gate
5470Sstevel@tonic-gate ASSERT(len <= eqp->eq_size);
5480Sstevel@tonic-gate bcopy(data, eep->eqe_data, MIN(eqp->eq_size, len));
5490Sstevel@tonic-gate
5500Sstevel@tonic-gate if (len < eqp->eq_size)
5510Sstevel@tonic-gate bzero((caddr_t)eep->eqe_data + len, eqp->eq_size - len);
5520Sstevel@tonic-gate
5530Sstevel@tonic-gate for (;;) {
5540Sstevel@tonic-gate old = eqp->eq_pend;
5550Sstevel@tonic-gate eep->eqe_prev = old;
5560Sstevel@tonic-gate membar_producer();
5570Sstevel@tonic-gate
5580Sstevel@tonic-gate if (casptr(&eqp->eq_pend, old, eep) == old)
5590Sstevel@tonic-gate break;
5600Sstevel@tonic-gate }
5610Sstevel@tonic-gate
5620Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1);
5630Sstevel@tonic-gate
5640Sstevel@tonic-gate if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
5650Sstevel@tonic-gate ddi_trigger_softintr(eqp->eq_id);
5660Sstevel@tonic-gate }
5670Sstevel@tonic-gate
5680Sstevel@tonic-gate /*
5690Sstevel@tonic-gate * Drain the specified error queue by calling eq_func() for each pending error.
5700Sstevel@tonic-gate * This function must be called at or below LOCK_LEVEL or from panic context.
5710Sstevel@tonic-gate * In order to synchronize with other attempts to drain the queue, we acquire
5720Sstevel@tonic-gate * the adaptive eq_lock, blocking other consumers. Once this lock is held,
5730Sstevel@tonic-gate * we must use compare-and-swap to move the pending list to the processing
574*9105SStephen.Hanson@Sun.COM * list and to return elements to the free pool in order to synchronize
575*9105SStephen.Hanson@Sun.COM * with producers, who do not acquire any locks and only use atomic set/clear.
5760Sstevel@tonic-gate *
5770Sstevel@tonic-gate * An additional constraint on this function is that if the system panics
5780Sstevel@tonic-gate * while this function is running, the panic code must be able to detect and
5790Sstevel@tonic-gate * handle all intermediate states and correctly dequeue all errors. The
5800Sstevel@tonic-gate * errorq_panic() function below will be used for detecting and handling
5810Sstevel@tonic-gate * these intermediate states. The comments in errorq_drain() below explain
5820Sstevel@tonic-gate * how we make sure each intermediate state is distinct and consistent.
5830Sstevel@tonic-gate */
5840Sstevel@tonic-gate void
errorq_drain(errorq_t * eqp)5850Sstevel@tonic-gate errorq_drain(errorq_t *eqp)
5860Sstevel@tonic-gate {
587*9105SStephen.Hanson@Sun.COM errorq_elem_t *eep, *dep;
5880Sstevel@tonic-gate
5890Sstevel@tonic-gate ASSERT(eqp != NULL);
5900Sstevel@tonic-gate mutex_enter(&eqp->eq_lock);
5910Sstevel@tonic-gate
5920Sstevel@tonic-gate /*
5930Sstevel@tonic-gate * If there are one or more pending errors, set eq_ptail to point to
5940Sstevel@tonic-gate * the first element on the pending list and then attempt to compare-
5950Sstevel@tonic-gate * and-swap NULL to the pending list. We use membar_producer() to
5960Sstevel@tonic-gate * make sure that eq_ptail will be visible to errorq_panic() below
5970Sstevel@tonic-gate * before the pending list is NULLed out. This section is labeled
5980Sstevel@tonic-gate * case (1) for errorq_panic, below. If eq_ptail is not yet set (1A)
5990Sstevel@tonic-gate * eq_pend has all the pending errors. If casptr fails or has not
6000Sstevel@tonic-gate * been called yet (1B), eq_pend still has all the pending errors.
6010Sstevel@tonic-gate * If casptr succeeds (1C), eq_ptail has all the pending errors.
6020Sstevel@tonic-gate */
6030Sstevel@tonic-gate while ((eep = eqp->eq_pend) != NULL) {
6040Sstevel@tonic-gate eqp->eq_ptail = eep;
6050Sstevel@tonic-gate membar_producer();
6060Sstevel@tonic-gate
6070Sstevel@tonic-gate if (casptr(&eqp->eq_pend, eep, NULL) == eep)
6080Sstevel@tonic-gate break;
6090Sstevel@tonic-gate }
6100Sstevel@tonic-gate
6110Sstevel@tonic-gate /*
6120Sstevel@tonic-gate * If no errors were pending, assert that eq_ptail is set to NULL,
6130Sstevel@tonic-gate * drop the consumer lock, and return without doing anything.
6140Sstevel@tonic-gate */
6150Sstevel@tonic-gate if (eep == NULL) {
6160Sstevel@tonic-gate ASSERT(eqp->eq_ptail == NULL);
6170Sstevel@tonic-gate mutex_exit(&eqp->eq_lock);
6180Sstevel@tonic-gate return;
6190Sstevel@tonic-gate }
6200Sstevel@tonic-gate
6210Sstevel@tonic-gate /*
6220Sstevel@tonic-gate * Now iterate from eq_ptail (a.k.a. eep, the newest error) to the
6230Sstevel@tonic-gate * oldest error, setting the eqe_next pointer so that we can iterate
6240Sstevel@tonic-gate * over the errors from oldest to newest. We use membar_producer()
6250Sstevel@tonic-gate * to make sure that these stores are visible before we set eq_phead.
6260Sstevel@tonic-gate * If we panic before, during, or just after this loop (case 2),
6270Sstevel@tonic-gate * errorq_panic() will simply redo this work, as described below.
6280Sstevel@tonic-gate */
6290Sstevel@tonic-gate for (eep->eqe_next = NULL; eep->eqe_prev != NULL; eep = eep->eqe_prev)
6300Sstevel@tonic-gate eep->eqe_prev->eqe_next = eep;
6310Sstevel@tonic-gate membar_producer();
6320Sstevel@tonic-gate
6330Sstevel@tonic-gate /*
6340Sstevel@tonic-gate * Now set eq_phead to the head of the processing list (the oldest
6350Sstevel@tonic-gate * error) and issue another membar_producer() to make sure that
6360Sstevel@tonic-gate * eq_phead is seen as non-NULL before we clear eq_ptail. If we panic
6370Sstevel@tonic-gate * after eq_phead is set (case 3), we will detect and log these errors
6380Sstevel@tonic-gate * in errorq_panic(), as described below.
6390Sstevel@tonic-gate */
6400Sstevel@tonic-gate eqp->eq_phead = eep;
6410Sstevel@tonic-gate membar_producer();
6420Sstevel@tonic-gate
6430Sstevel@tonic-gate eqp->eq_ptail = NULL;
6440Sstevel@tonic-gate membar_producer();
6450Sstevel@tonic-gate
6460Sstevel@tonic-gate /*
6470Sstevel@tonic-gate * If we enter from errorq_panic_drain(), we may already have
6480Sstevel@tonic-gate * errorq elements on the dump list. Find the tail of
6490Sstevel@tonic-gate * the list ready for append.
6500Sstevel@tonic-gate */
6510Sstevel@tonic-gate if (panicstr && (dep = eqp->eq_dump) != NULL) {
6520Sstevel@tonic-gate while (dep->eqe_dump != NULL)
6530Sstevel@tonic-gate dep = dep->eqe_dump;
6540Sstevel@tonic-gate }
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate /*
6570Sstevel@tonic-gate * Now iterate over the processing list from oldest (eq_phead) to
6580Sstevel@tonic-gate * newest and log each error. Once an error is logged, we use
659*9105SStephen.Hanson@Sun.COM * atomic clear to return it to the free pool. If we panic before,
6600Sstevel@tonic-gate * during, or after calling eq_func() (case 4), the error will still be
6610Sstevel@tonic-gate * found on eq_phead and will be logged in errorq_panic below.
6620Sstevel@tonic-gate */
6630Sstevel@tonic-gate
6640Sstevel@tonic-gate while ((eep = eqp->eq_phead) != NULL) {
6650Sstevel@tonic-gate eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
6660Sstevel@tonic-gate eqp->eq_kstat.eqk_logged.value.ui64++;
6670Sstevel@tonic-gate
6680Sstevel@tonic-gate eqp->eq_phead = eep->eqe_next;
6690Sstevel@tonic-gate membar_producer();
6700Sstevel@tonic-gate
6710Sstevel@tonic-gate eep->eqe_next = NULL;
6720Sstevel@tonic-gate
6735197Sstephh /*
6745197Sstephh * On panic, we add the element to the dump list for each
6755197Sstephh * nvlist errorq. Elements are stored oldest to newest.
6765197Sstephh * Then continue, so we don't free and subsequently overwrite
6775197Sstephh * any elements which we've put on the dump queue.
6785197Sstephh */
6795197Sstephh if (panicstr && (eqp->eq_flags & ERRORQ_NVLIST)) {
6805197Sstephh if (eqp->eq_dump == NULL)
6815197Sstephh dep = eqp->eq_dump = eep;
6825197Sstephh else
6835197Sstephh dep = dep->eqe_dump = eep;
6845197Sstephh membar_producer();
6855197Sstephh continue;
6865197Sstephh }
6875197Sstephh
688*9105SStephen.Hanson@Sun.COM eep->eqe_prev = NULL;
689*9105SStephen.Hanson@Sun.COM BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
6900Sstevel@tonic-gate }
6910Sstevel@tonic-gate
6920Sstevel@tonic-gate mutex_exit(&eqp->eq_lock);
6930Sstevel@tonic-gate }
6940Sstevel@tonic-gate
6950Sstevel@tonic-gate /*
6960Sstevel@tonic-gate * Now that device tree services are available, set up the soft interrupt
6970Sstevel@tonic-gate * handlers for any queues that were created early in boot. We then
6980Sstevel@tonic-gate * manually drain these queues to report any pending early errors.
6990Sstevel@tonic-gate */
7000Sstevel@tonic-gate void
errorq_init(void)7010Sstevel@tonic-gate errorq_init(void)
7020Sstevel@tonic-gate {
7030Sstevel@tonic-gate dev_info_t *dip = ddi_root_node();
7040Sstevel@tonic-gate ddi_softintr_t id;
7050Sstevel@tonic-gate errorq_t *eqp;
7060Sstevel@tonic-gate
7070Sstevel@tonic-gate ASSERT(modrootloaded != 0);
7080Sstevel@tonic-gate ASSERT(dip != NULL);
7090Sstevel@tonic-gate
7100Sstevel@tonic-gate mutex_enter(&errorq_lock);
7110Sstevel@tonic-gate
7120Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
7130Sstevel@tonic-gate ddi_iblock_cookie_t ibc =
7140Sstevel@tonic-gate (ddi_iblock_cookie_t)(uintptr_t)ipltospl(eqp->eq_ipl);
7150Sstevel@tonic-gate
7160Sstevel@tonic-gate if (eqp->eq_id != NULL)
7170Sstevel@tonic-gate continue; /* softint already initialized */
7180Sstevel@tonic-gate
7190Sstevel@tonic-gate if (ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id, &ibc, NULL,
7200Sstevel@tonic-gate errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
7210Sstevel@tonic-gate panic("errorq_init: failed to register IPL %u softint "
7220Sstevel@tonic-gate "for queue %s", eqp->eq_ipl, eqp->eq_name);
7230Sstevel@tonic-gate }
7240Sstevel@tonic-gate
7250Sstevel@tonic-gate eqp->eq_id = id;
7260Sstevel@tonic-gate errorq_drain(eqp);
7270Sstevel@tonic-gate }
7280Sstevel@tonic-gate
7290Sstevel@tonic-gate mutex_exit(&errorq_lock);
7300Sstevel@tonic-gate }
7310Sstevel@tonic-gate
7320Sstevel@tonic-gate /*
7330Sstevel@tonic-gate * This function is designed to be called from panic context only, and
7340Sstevel@tonic-gate * therefore does not need to acquire errorq_lock when iterating over
7350Sstevel@tonic-gate * errorq_list. This function must be called no more than once for each
7360Sstevel@tonic-gate * 'what' value (if you change this then review the manipulation of 'dep'.
7370Sstevel@tonic-gate */
7380Sstevel@tonic-gate static uint64_t
errorq_panic_drain(uint_t what)7390Sstevel@tonic-gate errorq_panic_drain(uint_t what)
7400Sstevel@tonic-gate {
741*9105SStephen.Hanson@Sun.COM errorq_elem_t *eep, *nep, *dep;
7420Sstevel@tonic-gate errorq_t *eqp;
7430Sstevel@tonic-gate uint64_t loggedtmp;
7440Sstevel@tonic-gate uint64_t logged = 0;
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
7470Sstevel@tonic-gate if ((eqp->eq_flags & (ERRORQ_VITAL | ERRORQ_NVLIST)) != what)
7480Sstevel@tonic-gate continue; /* do not drain this queue on this pass */
7490Sstevel@tonic-gate
7500Sstevel@tonic-gate loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64;
7510Sstevel@tonic-gate
7520Sstevel@tonic-gate /*
7530Sstevel@tonic-gate * In case (1B) above, eq_ptail may be set but the casptr may
7540Sstevel@tonic-gate * not have been executed yet or may have failed. Either way,
7550Sstevel@tonic-gate * we must log errors in chronological order. So we search
7560Sstevel@tonic-gate * the pending list for the error pointed to by eq_ptail. If
7570Sstevel@tonic-gate * it is found, we know that all subsequent errors are also
7580Sstevel@tonic-gate * still on the pending list, so just NULL out eq_ptail and let
7590Sstevel@tonic-gate * errorq_drain(), below, take care of the logging.
7600Sstevel@tonic-gate */
7610Sstevel@tonic-gate for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) {
7620Sstevel@tonic-gate if (eep == eqp->eq_ptail) {
7630Sstevel@tonic-gate ASSERT(eqp->eq_phead == NULL);
7640Sstevel@tonic-gate eqp->eq_ptail = NULL;
7650Sstevel@tonic-gate break;
7660Sstevel@tonic-gate }
7670Sstevel@tonic-gate }
7680Sstevel@tonic-gate
7690Sstevel@tonic-gate /*
7700Sstevel@tonic-gate * In cases (1C) and (2) above, eq_ptail will be set to the
7710Sstevel@tonic-gate * newest error on the processing list but eq_phead will still
7720Sstevel@tonic-gate * be NULL. We set the eqe_next pointers so we can iterate
7730Sstevel@tonic-gate * over the processing list in order from oldest error to the
7740Sstevel@tonic-gate * newest error. We then set eq_phead to point to the oldest
7750Sstevel@tonic-gate * error and fall into the for-loop below.
7760Sstevel@tonic-gate */
7770Sstevel@tonic-gate if (eqp->eq_phead == NULL && (eep = eqp->eq_ptail) != NULL) {
7780Sstevel@tonic-gate for (eep->eqe_next = NULL; eep->eqe_prev != NULL;
7790Sstevel@tonic-gate eep = eep->eqe_prev)
7800Sstevel@tonic-gate eep->eqe_prev->eqe_next = eep;
7810Sstevel@tonic-gate
7820Sstevel@tonic-gate eqp->eq_phead = eep;
7830Sstevel@tonic-gate eqp->eq_ptail = NULL;
7840Sstevel@tonic-gate }
7850Sstevel@tonic-gate
7860Sstevel@tonic-gate /*
7870Sstevel@tonic-gate * In cases (3) and (4) above (or after case (1C/2) handling),
7880Sstevel@tonic-gate * eq_phead will be set to the oldest error on the processing
789*9105SStephen.Hanson@Sun.COM * list. We log each error and return it to the free pool.
7900Sstevel@tonic-gate *
7910Sstevel@tonic-gate * Unlike errorq_drain(), we don't need to worry about updating
7920Sstevel@tonic-gate * eq_phead because errorq_panic() will be called at most once.
7930Sstevel@tonic-gate * However, we must use casptr to update the freelist in case
7940Sstevel@tonic-gate * errors are still being enqueued during panic.
7950Sstevel@tonic-gate */
7960Sstevel@tonic-gate for (eep = eqp->eq_phead; eep != NULL; eep = nep) {
7970Sstevel@tonic-gate eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
7980Sstevel@tonic-gate eqp->eq_kstat.eqk_logged.value.ui64++;
7990Sstevel@tonic-gate
8000Sstevel@tonic-gate nep = eep->eqe_next;
8010Sstevel@tonic-gate eep->eqe_next = NULL;
8020Sstevel@tonic-gate
8035197Sstephh /*
8045197Sstephh * On panic, we add the element to the dump list for
8055197Sstephh * each nvlist errorq, stored oldest to newest. Then
8065197Sstephh * continue, so we don't free and subsequently overwrite
8075197Sstephh * any elements which we've put on the dump queue.
8085197Sstephh */
8095197Sstephh if (eqp->eq_flags & ERRORQ_NVLIST) {
8105197Sstephh if (eqp->eq_dump == NULL)
8115197Sstephh dep = eqp->eq_dump = eep;
8125197Sstephh else
8135197Sstephh dep = dep->eqe_dump = eep;
8145197Sstephh membar_producer();
8155197Sstephh continue;
8165197Sstephh }
8175197Sstephh
818*9105SStephen.Hanson@Sun.COM eep->eqe_prev = NULL;
819*9105SStephen.Hanson@Sun.COM BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate
8220Sstevel@tonic-gate /*
8230Sstevel@tonic-gate * Now go ahead and drain any other errors on the pending list.
8240Sstevel@tonic-gate * This call transparently handles case (1A) above, as well as
8250Sstevel@tonic-gate * any other errors that were dispatched after errorq_drain()
8260Sstevel@tonic-gate * completed its first compare-and-swap.
8270Sstevel@tonic-gate */
8280Sstevel@tonic-gate errorq_drain(eqp);
8290Sstevel@tonic-gate
8300Sstevel@tonic-gate logged += eqp->eq_kstat.eqk_logged.value.ui64 - loggedtmp;
8310Sstevel@tonic-gate }
8320Sstevel@tonic-gate return (logged);
8330Sstevel@tonic-gate }
8340Sstevel@tonic-gate
8350Sstevel@tonic-gate /*
8360Sstevel@tonic-gate * Drain all error queues - called only from panic context. Some drain
8370Sstevel@tonic-gate * functions may enqueue errors to ERRORQ_NVLIST error queues so that
8380Sstevel@tonic-gate * they may be written out in the panic dump - so ERRORQ_NVLIST queues
8390Sstevel@tonic-gate * must be drained last. Drain ERRORQ_VITAL queues before nonvital queues
8400Sstevel@tonic-gate * so that vital errors get to fill the ERRORQ_NVLIST queues first, and
8410Sstevel@tonic-gate * do not drain the nonvital queues if there are many vital errors.
8420Sstevel@tonic-gate */
8430Sstevel@tonic-gate void
errorq_panic(void)8440Sstevel@tonic-gate errorq_panic(void)
8450Sstevel@tonic-gate {
8460Sstevel@tonic-gate ASSERT(panicstr != NULL);
8470Sstevel@tonic-gate
8480Sstevel@tonic-gate if (errorq_panic_drain(ERRORQ_VITAL) <= errorq_vitalmin)
8490Sstevel@tonic-gate (void) errorq_panic_drain(0);
8500Sstevel@tonic-gate (void) errorq_panic_drain(ERRORQ_VITAL | ERRORQ_NVLIST);
8510Sstevel@tonic-gate (void) errorq_panic_drain(ERRORQ_NVLIST);
8520Sstevel@tonic-gate }
8530Sstevel@tonic-gate
8540Sstevel@tonic-gate /*
8550Sstevel@tonic-gate * Reserve an error queue element for later processing and dispatching. The
8560Sstevel@tonic-gate * element is returned to the caller who may add error-specific data to
857*9105SStephen.Hanson@Sun.COM * element. The element is retured to the free pool when either
8580Sstevel@tonic-gate * errorq_commit() is called and the element asynchronously processed
8590Sstevel@tonic-gate * or immediately when errorq_cancel() is called.
8600Sstevel@tonic-gate */
8610Sstevel@tonic-gate errorq_elem_t *
errorq_reserve(errorq_t * eqp)8620Sstevel@tonic-gate errorq_reserve(errorq_t *eqp)
8630Sstevel@tonic-gate {
8640Sstevel@tonic-gate errorq_elem_t *eqep;
8650Sstevel@tonic-gate
8660Sstevel@tonic-gate if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
8670Sstevel@tonic-gate atomic_add_64(&errorq_lost, 1);
8680Sstevel@tonic-gate return (NULL);
8690Sstevel@tonic-gate }
8700Sstevel@tonic-gate
871*9105SStephen.Hanson@Sun.COM for (;;) {
872*9105SStephen.Hanson@Sun.COM int i, rval;
873*9105SStephen.Hanson@Sun.COM
874*9105SStephen.Hanson@Sun.COM if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
875*9105SStephen.Hanson@Sun.COM eqp->eq_rotor)) == -1) {
876*9105SStephen.Hanson@Sun.COM atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
877*9105SStephen.Hanson@Sun.COM return (NULL);
878*9105SStephen.Hanson@Sun.COM }
879*9105SStephen.Hanson@Sun.COM BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
880*9105SStephen.Hanson@Sun.COM if (rval == 0) {
881*9105SStephen.Hanson@Sun.COM eqp->eq_rotor = i;
882*9105SStephen.Hanson@Sun.COM eqep = &eqp->eq_elems[i];
8830Sstevel@tonic-gate break;
884*9105SStephen.Hanson@Sun.COM }
8850Sstevel@tonic-gate }
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate if (eqp->eq_flags & ERRORQ_NVLIST) {
8880Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data;
8890Sstevel@tonic-gate nv_alloc_reset(eqnp->eqn_nva);
8900Sstevel@tonic-gate eqnp->eqn_nvl = fm_nvlist_create(eqnp->eqn_nva);
8910Sstevel@tonic-gate }
8920Sstevel@tonic-gate
8930Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_reserved.value.ui64, 1);
8940Sstevel@tonic-gate return (eqep);
8950Sstevel@tonic-gate }
8960Sstevel@tonic-gate
8970Sstevel@tonic-gate /*
8980Sstevel@tonic-gate * Commit an errorq element (eqep) for dispatching.
8990Sstevel@tonic-gate * This function may be called from any context subject
9000Sstevel@tonic-gate * to the Platform Considerations described above.
9010Sstevel@tonic-gate */
9020Sstevel@tonic-gate void
errorq_commit(errorq_t * eqp,errorq_elem_t * eqep,uint_t flag)9030Sstevel@tonic-gate errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
9040Sstevel@tonic-gate {
9050Sstevel@tonic-gate errorq_elem_t *old;
9060Sstevel@tonic-gate
9070Sstevel@tonic-gate if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
9080Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64, 1);
9090Sstevel@tonic-gate return;
9100Sstevel@tonic-gate }
9110Sstevel@tonic-gate
9120Sstevel@tonic-gate for (;;) {
9130Sstevel@tonic-gate old = eqp->eq_pend;
9140Sstevel@tonic-gate eqep->eqe_prev = old;
9150Sstevel@tonic-gate membar_producer();
9160Sstevel@tonic-gate
9170Sstevel@tonic-gate if (casptr(&eqp->eq_pend, old, eqep) == old)
9180Sstevel@tonic-gate break;
9190Sstevel@tonic-gate }
9200Sstevel@tonic-gate
9210Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1);
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
9240Sstevel@tonic-gate ddi_trigger_softintr(eqp->eq_id);
9250Sstevel@tonic-gate }
9260Sstevel@tonic-gate
9270Sstevel@tonic-gate /*
9280Sstevel@tonic-gate * Cancel an errorq element reservation by returning the specified element
929*9105SStephen.Hanson@Sun.COM * to the free pool. Duplicate or invalid frees are not supported.
9300Sstevel@tonic-gate */
9310Sstevel@tonic-gate void
errorq_cancel(errorq_t * eqp,errorq_elem_t * eqep)9320Sstevel@tonic-gate errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep)
9330Sstevel@tonic-gate {
9340Sstevel@tonic-gate if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE))
9350Sstevel@tonic-gate return;
9360Sstevel@tonic-gate
937*9105SStephen.Hanson@Sun.COM BT_ATOMIC_CLEAR(eqp->eq_bitmap, eqep - eqp->eq_elems);
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_cancelled.value.ui64, 1);
9400Sstevel@tonic-gate }
9410Sstevel@tonic-gate
9420Sstevel@tonic-gate /*
9430Sstevel@tonic-gate * Write elements on the dump list of each nvlist errorq to the dump device.
9440Sstevel@tonic-gate * Upon reboot, fmd(1M) will extract and replay them for diagnosis.
9450Sstevel@tonic-gate */
9460Sstevel@tonic-gate void
errorq_dump(void)9470Sstevel@tonic-gate errorq_dump(void)
9480Sstevel@tonic-gate {
9490Sstevel@tonic-gate errorq_elem_t *eep;
9500Sstevel@tonic-gate errorq_t *eqp;
9510Sstevel@tonic-gate
9520Sstevel@tonic-gate if (ereport_dumpbuf == NULL)
9530Sstevel@tonic-gate return; /* reboot or panic before errorq is even set up */
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
9560Sstevel@tonic-gate if (!(eqp->eq_flags & ERRORQ_NVLIST) ||
9570Sstevel@tonic-gate !(eqp->eq_flags & ERRORQ_ACTIVE))
9580Sstevel@tonic-gate continue; /* do not dump this queue on panic */
9590Sstevel@tonic-gate
9600Sstevel@tonic-gate for (eep = eqp->eq_dump; eep != NULL; eep = eep->eqe_dump) {
9610Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data;
9620Sstevel@tonic-gate size_t len = 0;
9630Sstevel@tonic-gate erpt_dump_t ed;
9640Sstevel@tonic-gate int err;
9650Sstevel@tonic-gate
9660Sstevel@tonic-gate (void) nvlist_size(eqnp->eqn_nvl,
9670Sstevel@tonic-gate &len, NV_ENCODE_NATIVE);
9680Sstevel@tonic-gate
9690Sstevel@tonic-gate if (len > ereport_dumplen || len == 0) {
9700Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unable to save error "
9710Sstevel@tonic-gate "report %p due to size %lu\n",
9720Sstevel@tonic-gate eqp->eq_name, (void *)eep, len);
9730Sstevel@tonic-gate continue;
9740Sstevel@tonic-gate }
9750Sstevel@tonic-gate
9760Sstevel@tonic-gate if ((err = nvlist_pack(eqnp->eqn_nvl,
9770Sstevel@tonic-gate (char **)&ereport_dumpbuf, &ereport_dumplen,
9780Sstevel@tonic-gate NV_ENCODE_NATIVE, KM_NOSLEEP)) != 0) {
9790Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unable to save error "
9800Sstevel@tonic-gate "report %p due to pack error %d\n",
9810Sstevel@tonic-gate eqp->eq_name, (void *)eep, err);
9820Sstevel@tonic-gate continue;
9830Sstevel@tonic-gate }
9840Sstevel@tonic-gate
9850Sstevel@tonic-gate ed.ed_magic = ERPT_MAGIC;
9860Sstevel@tonic-gate ed.ed_chksum = checksum32(ereport_dumpbuf, len);
9870Sstevel@tonic-gate ed.ed_size = (uint32_t)len;
9880Sstevel@tonic-gate ed.ed_pad = 0;
9890Sstevel@tonic-gate ed.ed_hrt_nsec = 0;
9900Sstevel@tonic-gate ed.ed_hrt_base = panic_hrtime;
9910Sstevel@tonic-gate ed.ed_tod_base.sec = panic_hrestime.tv_sec;
9920Sstevel@tonic-gate ed.ed_tod_base.nsec = panic_hrestime.tv_nsec;
9930Sstevel@tonic-gate
9940Sstevel@tonic-gate dumpvp_write(&ed, sizeof (ed));
9950Sstevel@tonic-gate dumpvp_write(ereport_dumpbuf, len);
9960Sstevel@tonic-gate }
9970Sstevel@tonic-gate }
9980Sstevel@tonic-gate }
9990Sstevel@tonic-gate
10000Sstevel@tonic-gate nvlist_t *
errorq_elem_nvl(errorq_t * eqp,const errorq_elem_t * eqep)10010Sstevel@tonic-gate errorq_elem_nvl(errorq_t *eqp, const errorq_elem_t *eqep)
10020Sstevel@tonic-gate {
10030Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data;
10040Sstevel@tonic-gate
10050Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
10060Sstevel@tonic-gate
10070Sstevel@tonic-gate return (eqnp->eqn_nvl);
10080Sstevel@tonic-gate }
10090Sstevel@tonic-gate
10100Sstevel@tonic-gate nv_alloc_t *
errorq_elem_nva(errorq_t * eqp,const errorq_elem_t * eqep)10110Sstevel@tonic-gate errorq_elem_nva(errorq_t *eqp, const errorq_elem_t *eqep)
10120Sstevel@tonic-gate {
10130Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data;
10140Sstevel@tonic-gate
10150Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
10160Sstevel@tonic-gate
10170Sstevel@tonic-gate return (eqnp->eqn_nva);
10180Sstevel@tonic-gate }
10190Sstevel@tonic-gate
10200Sstevel@tonic-gate /*
10210Sstevel@tonic-gate * Reserve a new element and duplicate the data of the original into it.
10220Sstevel@tonic-gate */
10230Sstevel@tonic-gate void *
errorq_elem_dup(errorq_t * eqp,const errorq_elem_t * eqep,errorq_elem_t ** neqep)10240Sstevel@tonic-gate errorq_elem_dup(errorq_t *eqp, const errorq_elem_t *eqep, errorq_elem_t **neqep)
10250Sstevel@tonic-gate {
10260Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE);
10270Sstevel@tonic-gate ASSERT(!(eqp->eq_flags & ERRORQ_NVLIST));
10280Sstevel@tonic-gate
10290Sstevel@tonic-gate if ((*neqep = errorq_reserve(eqp)) == NULL)
10300Sstevel@tonic-gate return (NULL);
10310Sstevel@tonic-gate
10320Sstevel@tonic-gate bcopy(eqep->eqe_data, (*neqep)->eqe_data, eqp->eq_size);
10330Sstevel@tonic-gate return ((*neqep)->eqe_data);
10340Sstevel@tonic-gate }
1035