xref: /onnv-gate/usr/src/cmd/fm/fmd/common/fmd_eventq.c (revision 12066:b5d1c83f5cfc)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
53159Sstephh  * Common Development and Distribution License (the "License").
63159Sstephh  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211193Smws 
220Sstevel@tonic-gate /*
23*12066SRobert.Johnston@Sun.COM  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <fmd_alloc.h>
270Sstevel@tonic-gate #include <fmd_eventq.h>
280Sstevel@tonic-gate #include <fmd_module.h>
291193Smws #include <fmd_dispq.h>
301193Smws #include <fmd_subr.h>
311193Smws 
321193Smws #include <fmd.h>
330Sstevel@tonic-gate 
340Sstevel@tonic-gate fmd_eventq_t *
fmd_eventq_create(fmd_module_t * mp,fmd_eventqstat_t * stats,pthread_mutex_t * stats_lock,uint_t limit)351193Smws fmd_eventq_create(fmd_module_t *mp, fmd_eventqstat_t *stats,
361193Smws     pthread_mutex_t *stats_lock, uint_t limit)
370Sstevel@tonic-gate {
380Sstevel@tonic-gate 	fmd_eventq_t *eq = fmd_zalloc(sizeof (fmd_eventq_t), FMD_SLEEP);
390Sstevel@tonic-gate 
400Sstevel@tonic-gate 	(void) pthread_mutex_init(&eq->eq_lock, NULL);
410Sstevel@tonic-gate 	(void) pthread_cond_init(&eq->eq_cv, NULL);
420Sstevel@tonic-gate 
430Sstevel@tonic-gate 	eq->eq_mod = mp;
441193Smws 	eq->eq_stats = stats;
451193Smws 	eq->eq_stats_lock = stats_lock;
460Sstevel@tonic-gate 	eq->eq_limit = limit;
471193Smws 	eq->eq_sgid = fmd_dispq_getgid(fmd.d_disp, eq);
480Sstevel@tonic-gate 
490Sstevel@tonic-gate 	return (eq);
500Sstevel@tonic-gate }
510Sstevel@tonic-gate 
520Sstevel@tonic-gate void
fmd_eventq_destroy(fmd_eventq_t * eq)530Sstevel@tonic-gate fmd_eventq_destroy(fmd_eventq_t *eq)
540Sstevel@tonic-gate {
550Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
560Sstevel@tonic-gate 
570Sstevel@tonic-gate 	while ((eqe = fmd_list_next(&eq->eq_list)) != NULL) {
580Sstevel@tonic-gate 		fmd_list_delete(&eq->eq_list, eqe);
590Sstevel@tonic-gate 		fmd_event_rele(eqe->eqe_event);
600Sstevel@tonic-gate 		fmd_free(eqe, sizeof (fmd_eventqelem_t));
610Sstevel@tonic-gate 	}
620Sstevel@tonic-gate 
631193Smws 	fmd_dispq_delgid(fmd.d_disp, eq->eq_sgid);
640Sstevel@tonic-gate 	fmd_free(eq, sizeof (fmd_eventq_t));
650Sstevel@tonic-gate }
660Sstevel@tonic-gate 
670Sstevel@tonic-gate static void
fmd_eventq_drop(fmd_eventq_t * eq,fmd_eventqelem_t * eqe)680Sstevel@tonic-gate fmd_eventq_drop(fmd_eventq_t *eq, fmd_eventqelem_t *eqe)
690Sstevel@tonic-gate {
701193Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
711193Smws 	eq->eq_stats->eqs_dropped.fmds_value.ui64++;
721193Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
730Sstevel@tonic-gate 
740Sstevel@tonic-gate 	fmd_event_rele(eqe->eqe_event);
750Sstevel@tonic-gate 	fmd_free(eqe, sizeof (fmd_eventqelem_t));
760Sstevel@tonic-gate }
770Sstevel@tonic-gate 
78*12066SRobert.Johnston@Sun.COM void
fmd_eventq_drop_topo(fmd_eventq_t * eq)79*12066SRobert.Johnston@Sun.COM fmd_eventq_drop_topo(fmd_eventq_t *eq)
80*12066SRobert.Johnston@Sun.COM {
81*12066SRobert.Johnston@Sun.COM 	fmd_eventqelem_t *eqe, *tmp;
82*12066SRobert.Johnston@Sun.COM 	boolean_t got_fm_events = B_FALSE;
83*12066SRobert.Johnston@Sun.COM 
84*12066SRobert.Johnston@Sun.COM 	/*
85*12066SRobert.Johnston@Sun.COM 	 * Here we iterate through the per-module event queue in order to remove
86*12066SRobert.Johnston@Sun.COM 	 * redundant FMD_EVT_TOPO events.  The trick is to not drop a given
87*12066SRobert.Johnston@Sun.COM 	 * topo event if there are any FM protocol events in the queue after
88*12066SRobert.Johnston@Sun.COM 	 * it, as those events need to be processed with the correct topology.
89*12066SRobert.Johnston@Sun.COM 	 */
90*12066SRobert.Johnston@Sun.COM 	(void) pthread_mutex_lock(&eq->eq_lock);
91*12066SRobert.Johnston@Sun.COM 	eqe = fmd_list_prev(&eq->eq_list);
92*12066SRobert.Johnston@Sun.COM 	while (eqe) {
93*12066SRobert.Johnston@Sun.COM 		if (FMD_EVENT_TYPE(eqe->eqe_event) == FMD_EVT_TOPO) {
94*12066SRobert.Johnston@Sun.COM 			if (!got_fm_events) {
95*12066SRobert.Johnston@Sun.COM 				tmp = eqe;
96*12066SRobert.Johnston@Sun.COM 				eqe = fmd_list_prev(eqe);
97*12066SRobert.Johnston@Sun.COM 				fmd_list_delete(&eq->eq_list, tmp);
98*12066SRobert.Johnston@Sun.COM 				eq->eq_size--;
99*12066SRobert.Johnston@Sun.COM 				fmd_eventq_drop(eq, tmp);
100*12066SRobert.Johnston@Sun.COM 			} else {
101*12066SRobert.Johnston@Sun.COM 				got_fm_events = B_FALSE;
102*12066SRobert.Johnston@Sun.COM 				eqe = fmd_list_prev(eqe);
103*12066SRobert.Johnston@Sun.COM 			}
104*12066SRobert.Johnston@Sun.COM 		} else if (FMD_EVENT_TYPE(eqe->eqe_event) == FMD_EVT_PROTOCOL) {
105*12066SRobert.Johnston@Sun.COM 			got_fm_events = B_TRUE;
106*12066SRobert.Johnston@Sun.COM 			eqe = fmd_list_prev(eqe);
107*12066SRobert.Johnston@Sun.COM 		} else
108*12066SRobert.Johnston@Sun.COM 			eqe = fmd_list_prev(eqe);
109*12066SRobert.Johnston@Sun.COM 	}
110*12066SRobert.Johnston@Sun.COM 	(void) pthread_mutex_unlock(&eq->eq_lock);
111*12066SRobert.Johnston@Sun.COM }
112*12066SRobert.Johnston@Sun.COM 
1131193Smws /*
1141193Smws  * Update statistics when an event is dispatched and placed on a module's event
1151193Smws  * queue.  This is essentially the same code as kstat_waitq_enter(9F).
1161193Smws  */
1171193Smws static void
fmd_eventqstat_dispatch(fmd_eventq_t * eq)1181193Smws fmd_eventqstat_dispatch(fmd_eventq_t *eq)
1191193Smws {
1201193Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
1211193Smws 	hrtime_t new, delta;
1221193Smws 	uint32_t wcnt;
1231193Smws 
1241193Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
1251193Smws 
1261193Smws 	new = gethrtime();
1271193Smws 	delta = new - eqs->eqs_wlastupdate.fmds_value.ui64;
1281193Smws 	eqs->eqs_wlastupdate.fmds_value.ui64 = new;
1291193Smws 	wcnt = eqs->eqs_wcnt.fmds_value.ui32++;
1301193Smws 
1311193Smws 	if (wcnt != 0) {
1321193Smws 		eqs->eqs_wlentime.fmds_value.ui64 += delta * wcnt;
1331193Smws 		eqs->eqs_wtime.fmds_value.ui64 += delta;
1341193Smws 	}
1351193Smws 
1361193Smws 	eqs->eqs_dispatched.fmds_value.ui64++;
1371193Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
1381193Smws }
1391193Smws 
1400Sstevel@tonic-gate void
fmd_eventq_insert_at_head(fmd_eventq_t * eq,fmd_event_t * ep)1410Sstevel@tonic-gate fmd_eventq_insert_at_head(fmd_eventq_t *eq, fmd_event_t *ep)
1420Sstevel@tonic-gate {
1431193Smws 	uint_t evt = FMD_EVENT_TYPE(ep);
1441193Smws 	fmd_eventqelem_t *eqe;
1450Sstevel@tonic-gate 	int ok;
1460Sstevel@tonic-gate 
1471193Smws 	/*
1481193Smws 	 * If this event queue is acting as /dev/null, bounce the reference
1491193Smws 	 * count to free an unreferenced event and just return immediately.
1501193Smws 	 */
1511193Smws 	if (eq->eq_limit == 0) {
1521193Smws 		fmd_event_hold(ep);
1531193Smws 		fmd_event_rele(ep);
1541193Smws 		return;
1551193Smws 	}
1561193Smws 
1571193Smws 	eqe = fmd_alloc(sizeof (fmd_eventqelem_t), FMD_SLEEP);
1580Sstevel@tonic-gate 	fmd_event_hold(ep);
1590Sstevel@tonic-gate 	eqe->eqe_event = ep;
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate 	if ((ok = eq->eq_size < eq->eq_limit || evt != FMD_EVT_PROTOCOL) != 0) {
1640Sstevel@tonic-gate 		if (evt != FMD_EVT_CTL)
1651193Smws 			fmd_eventqstat_dispatch(eq);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 		fmd_list_prepend(&eq->eq_list, eqe);
1680Sstevel@tonic-gate 		eq->eq_size++;
1690Sstevel@tonic-gate 	}
1700Sstevel@tonic-gate 
1711193Smws 	(void) pthread_cond_broadcast(&eq->eq_cv);
1720Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate 	if (!ok)
1750Sstevel@tonic-gate 		fmd_eventq_drop(eq, eqe);
1760Sstevel@tonic-gate }
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate void
fmd_eventq_insert_at_time(fmd_eventq_t * eq,fmd_event_t * ep)1790Sstevel@tonic-gate fmd_eventq_insert_at_time(fmd_eventq_t *eq, fmd_event_t *ep)
1800Sstevel@tonic-gate {
1811193Smws 	uint_t evt = FMD_EVENT_TYPE(ep);
1820Sstevel@tonic-gate 	hrtime_t hrt = fmd_event_hrtime(ep);
1831193Smws 	fmd_eventqelem_t *eqe, *oqe;
1840Sstevel@tonic-gate 	int ok;
1850Sstevel@tonic-gate 
1861193Smws 	/*
1871193Smws 	 * If this event queue is acting as /dev/null, bounce the reference
1881193Smws 	 * count to free an unreferenced event and just return immediately.
1891193Smws 	 */
1901193Smws 	if (eq->eq_limit == 0) {
1911193Smws 		fmd_event_hold(ep);
1921193Smws 		fmd_event_rele(ep);
1931193Smws 		return;
1941193Smws 	}
1951193Smws 
1961193Smws 	eqe = fmd_alloc(sizeof (fmd_eventqelem_t), FMD_SLEEP);
1970Sstevel@tonic-gate 	fmd_event_hold(ep);
1980Sstevel@tonic-gate 	eqe->eqe_event = ep;
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
2010Sstevel@tonic-gate 
2020Sstevel@tonic-gate 	/*
2030Sstevel@tonic-gate 	 * fmd makes no guarantees that events will be delivered in time order
2040Sstevel@tonic-gate 	 * because its transport can make no such guarantees.  Instead we make
2050Sstevel@tonic-gate 	 * a looser guarantee that an enqueued event will be dequeued before
2060Sstevel@tonic-gate 	 * any newer *pending* events according to event time.  This permits us
2070Sstevel@tonic-gate 	 * to state, for example, that a timer expiry event will be delivered
2080Sstevel@tonic-gate 	 * prior to any enqueued event whose time is after the timer expired.
2090Sstevel@tonic-gate 	 * We use a simple insertion sort for this task, as queue lengths are
2100Sstevel@tonic-gate 	 * typically short and events do *tend* to be received chronologically.
2110Sstevel@tonic-gate 	 */
2120Sstevel@tonic-gate 	for (oqe = fmd_list_prev(&eq->eq_list); oqe; oqe = fmd_list_prev(oqe)) {
2130Sstevel@tonic-gate 		if (hrt >= fmd_event_hrtime(oqe->eqe_event))
2140Sstevel@tonic-gate 			break; /* 'ep' is newer than the event in 'oqe' */
2150Sstevel@tonic-gate 	}
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	if ((ok = eq->eq_size < eq->eq_limit || evt != FMD_EVT_PROTOCOL) != 0) {
2180Sstevel@tonic-gate 		if (evt != FMD_EVT_CTL)
2191193Smws 			fmd_eventqstat_dispatch(eq);
2200Sstevel@tonic-gate 
2213159Sstephh 		if (oqe == NULL)
2223159Sstephh 			fmd_list_prepend(&eq->eq_list, eqe);
2233159Sstephh 		else
2243159Sstephh 			fmd_list_insert_after(&eq->eq_list, oqe, eqe);
2250Sstevel@tonic-gate 		eq->eq_size++;
2260Sstevel@tonic-gate 	}
2270Sstevel@tonic-gate 
2281193Smws 	(void) pthread_cond_broadcast(&eq->eq_cv);
2290Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	if (!ok)
2320Sstevel@tonic-gate 		fmd_eventq_drop(eq, eqe);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate fmd_event_t *
fmd_eventq_delete(fmd_eventq_t * eq)2360Sstevel@tonic-gate fmd_eventq_delete(fmd_eventq_t *eq)
2370Sstevel@tonic-gate {
2381193Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
2391193Smws 	hrtime_t new, delta;
2401193Smws 	uint32_t wcnt;
2411193Smws 
2420Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
2430Sstevel@tonic-gate 	fmd_event_t *ep;
2441193Smws top:
2450Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
2460Sstevel@tonic-gate 
2471193Smws 	while (!(eq->eq_flags & FMD_EVENTQ_ABORT) &&
2481193Smws 	    (eq->eq_size == 0 || (eq->eq_flags & FMD_EVENTQ_SUSPEND)))
2490Sstevel@tonic-gate 		(void) pthread_cond_wait(&eq->eq_cv, &eq->eq_lock);
2500Sstevel@tonic-gate 
2511193Smws 	if (eq->eq_flags & FMD_EVENTQ_ABORT) {
2520Sstevel@tonic-gate 		(void) pthread_mutex_unlock(&eq->eq_lock);
2530Sstevel@tonic-gate 		return (NULL);
2540Sstevel@tonic-gate 	}
2550Sstevel@tonic-gate 
2560Sstevel@tonic-gate 	eqe = fmd_list_next(&eq->eq_list);
2570Sstevel@tonic-gate 	fmd_list_delete(&eq->eq_list, eqe);
2580Sstevel@tonic-gate 	eq->eq_size--;
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate 	ep = eqe->eqe_event;
2630Sstevel@tonic-gate 	fmd_free(eqe, sizeof (fmd_eventqelem_t));
2640Sstevel@tonic-gate 
2651193Smws 	/*
2661193Smws 	 * If we dequeued a control event, release it and go back to sleep.
2671193Smws 	 * fmd_event_rele() on the event will block as described in fmd_ctl.c.
2681193Smws 	 * This effectively renders control events invisible to our callers
2691193Smws 	 * as well as to statistics and observability tools (e.g. fmstat(1M)).
2701193Smws 	 */
2711193Smws 	if (FMD_EVENT_TYPE(ep) == FMD_EVT_CTL) {
2721193Smws 		fmd_event_rele(ep);
2731193Smws 		goto top;
2741193Smws 	}
2751193Smws 
2761193Smws 	/*
2771193Smws 	 * Before returning, update our statistics.  This code is essentially
2781193Smws 	 * kstat_waitq_to_runq(9F), except simplified because our queues are
2791193Smws 	 * always consumed by a single thread (i.e. runq len == 1).
2801193Smws 	 */
2811193Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
2821193Smws 
2831193Smws 	new = gethrtime();
2841193Smws 	delta = new - eqs->eqs_wlastupdate.fmds_value.ui64;
2851193Smws 
2861193Smws 	eqs->eqs_wlastupdate.fmds_value.ui64 = new;
2871193Smws 	eqs->eqs_dlastupdate.fmds_value.ui64 = new;
2881193Smws 
2891193Smws 	ASSERT(eqs->eqs_wcnt.fmds_value.ui32 != 0);
2901193Smws 	wcnt = eqs->eqs_wcnt.fmds_value.ui32--;
2911193Smws 
2921193Smws 	eqs->eqs_wlentime.fmds_value.ui64 += delta * wcnt;
2931193Smws 	eqs->eqs_wtime.fmds_value.ui64 += delta;
2941193Smws 
2951193Smws 	if (FMD_EVENT_TYPE(ep) == FMD_EVT_PROTOCOL)
2961193Smws 		eqs->eqs_prdequeued.fmds_value.ui64++;
2971193Smws 
2981193Smws 	eqs->eqs_dequeued.fmds_value.ui64++;
2991193Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
3001193Smws 
3010Sstevel@tonic-gate 	return (ep);
3020Sstevel@tonic-gate }
3030Sstevel@tonic-gate 
3041193Smws /*
3051193Smws  * Update statistics when an event is done being processed by the eventq's
3061193Smws  * consumer thread.  This is essentially kstat_runq_exit(9F) simplified for
3071193Smws  * our principle that a single thread consumes the queue (i.e. runq len == 1).
3081193Smws  */
3091193Smws void
fmd_eventq_done(fmd_eventq_t * eq)3101193Smws fmd_eventq_done(fmd_eventq_t *eq)
3111193Smws {
3121193Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
3131193Smws 	hrtime_t new, delta;
3141193Smws 
3151193Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
3161193Smws 
3171193Smws 	new = gethrtime();
3181193Smws 	delta = new - eqs->eqs_dlastupdate.fmds_value.ui64;
3191193Smws 
3201193Smws 	eqs->eqs_dlastupdate.fmds_value.ui64 = new;
3211193Smws 	eqs->eqs_dtime.fmds_value.ui64 += delta;
3221193Smws 
3231193Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
3241193Smws }
3251193Smws 
3260Sstevel@tonic-gate void
fmd_eventq_cancel(fmd_eventq_t * eq,uint_t type,void * data)3270Sstevel@tonic-gate fmd_eventq_cancel(fmd_eventq_t *eq, uint_t type, void *data)
3280Sstevel@tonic-gate {
3290Sstevel@tonic-gate 	fmd_eventqelem_t *eqe, *nqe;
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
3320Sstevel@tonic-gate 
3330Sstevel@tonic-gate 	for (eqe = fmd_list_next(&eq->eq_list); eqe != NULL; eqe = nqe) {
3340Sstevel@tonic-gate 		nqe = fmd_list_next(eqe);
3350Sstevel@tonic-gate 
3360Sstevel@tonic-gate 		if (fmd_event_match(eqe->eqe_event, type, data)) {
3370Sstevel@tonic-gate 			fmd_list_delete(&eq->eq_list, eqe);
3380Sstevel@tonic-gate 			eq->eq_size--;
3390Sstevel@tonic-gate 			fmd_event_rele(eqe->eqe_event);
3400Sstevel@tonic-gate 			fmd_free(eqe, sizeof (fmd_eventqelem_t));
3410Sstevel@tonic-gate 		}
3420Sstevel@tonic-gate 	}
3430Sstevel@tonic-gate 
3440Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate 
3470Sstevel@tonic-gate void
fmd_eventq_suspend(fmd_eventq_t * eq)3481193Smws fmd_eventq_suspend(fmd_eventq_t *eq)
3491193Smws {
3501193Smws 	(void) pthread_mutex_lock(&eq->eq_lock);
3511193Smws 	eq->eq_flags |= FMD_EVENTQ_SUSPEND;
3521193Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
3531193Smws }
3541193Smws 
3551193Smws void
fmd_eventq_resume(fmd_eventq_t * eq)3561193Smws fmd_eventq_resume(fmd_eventq_t *eq)
3571193Smws {
3581193Smws 	(void) pthread_mutex_lock(&eq->eq_lock);
3591193Smws 	eq->eq_flags &= ~FMD_EVENTQ_SUSPEND;
3601193Smws 	(void) pthread_cond_broadcast(&eq->eq_cv);
3611193Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
3621193Smws }
3631193Smws 
3641193Smws void
fmd_eventq_abort(fmd_eventq_t * eq)3650Sstevel@tonic-gate fmd_eventq_abort(fmd_eventq_t *eq)
3660Sstevel@tonic-gate {
3670Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate 	while ((eqe = fmd_list_next(&eq->eq_list)) != NULL) {
3720Sstevel@tonic-gate 		fmd_list_delete(&eq->eq_list, eqe);
3730Sstevel@tonic-gate 		fmd_event_rele(eqe->eqe_event);
3740Sstevel@tonic-gate 		fmd_free(eqe, sizeof (fmd_eventqelem_t));
3750Sstevel@tonic-gate 	}
3760Sstevel@tonic-gate 
3771193Smws 	eq->eq_flags |= FMD_EVENTQ_ABORT;
3781193Smws 	(void) pthread_cond_broadcast(&eq->eq_cv);
3790Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
3800Sstevel@tonic-gate }
381