xref: /onnv-gate/usr/src/lib/libc/port/threads/cancel.c (revision 11411:c2fe1bf96826)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
55891Sraf  * Common Development and Distribution License (the "License").
65891Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
215891Sraf 
220Sstevel@tonic-gate /*
23*11411SSurya.Prakki@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #include "lint.h"
280Sstevel@tonic-gate #include "thr_uberdata.h"
290Sstevel@tonic-gate 
300Sstevel@tonic-gate /*
310Sstevel@tonic-gate  * pthread_cancel: tries to cancel the targeted thread.
320Sstevel@tonic-gate  * If the target thread has already exited no action is taken.
330Sstevel@tonic-gate  * Else send SIGCANCEL to request the other thread to cancel itself.
340Sstevel@tonic-gate  */
350Sstevel@tonic-gate int
pthread_cancel(thread_t tid)366812Sraf pthread_cancel(thread_t tid)
370Sstevel@tonic-gate {
380Sstevel@tonic-gate 	ulwp_t *self = curthread;
390Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
400Sstevel@tonic-gate 	ulwp_t *ulwp;
410Sstevel@tonic-gate 	int error = 0;
420Sstevel@tonic-gate 
430Sstevel@tonic-gate 	if ((ulwp = find_lwp(tid)) == NULL)
440Sstevel@tonic-gate 		return (ESRCH);
450Sstevel@tonic-gate 
460Sstevel@tonic-gate 	if (ulwp->ul_cancel_pending) {
470Sstevel@tonic-gate 		/*
480Sstevel@tonic-gate 		 * Don't send SIGCANCEL more than once.
490Sstevel@tonic-gate 		 */
500Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
510Sstevel@tonic-gate 	} else if (ulwp == self) {
520Sstevel@tonic-gate 		/*
530Sstevel@tonic-gate 		 * Unlock self before cancelling.
540Sstevel@tonic-gate 		 */
555891Sraf 		ulwp_unlock(self, udp);
565891Sraf 		self->ul_nocancel = 0;	/* cancellation is now possible */
575891Sraf 		if (self->ul_sigdefer == 0)
580Sstevel@tonic-gate 			do_sigcancel();
595891Sraf 		else {
605891Sraf 			self->ul_cancel_pending = 1;
615891Sraf 			set_cancel_pending_flag(self, 0);
625891Sraf 		}
630Sstevel@tonic-gate 	} else if (ulwp->ul_cancel_disabled) {
640Sstevel@tonic-gate 		/*
650Sstevel@tonic-gate 		 * Don't send SIGCANCEL if cancellation is disabled;
660Sstevel@tonic-gate 		 * just set the thread's ulwp->ul_cancel_pending flag.
670Sstevel@tonic-gate 		 * This avoids a potential EINTR for the target thread.
685891Sraf 		 * We don't call set_cancel_pending_flag() here because
695891Sraf 		 * we cannot modify another thread's schedctl data.
700Sstevel@tonic-gate 		 */
710Sstevel@tonic-gate 		ulwp->ul_cancel_pending = 1;
720Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
730Sstevel@tonic-gate 	} else {
740Sstevel@tonic-gate 		/*
750Sstevel@tonic-gate 		 * Request the other thread to cancel itself.
760Sstevel@tonic-gate 		 */
776812Sraf 		error = _lwp_kill(tid, SIGCANCEL);
780Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
790Sstevel@tonic-gate 	}
800Sstevel@tonic-gate 
810Sstevel@tonic-gate 	return (error);
820Sstevel@tonic-gate }
830Sstevel@tonic-gate 
840Sstevel@tonic-gate /*
855891Sraf  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
865891Sraf  * If the state is already ENABLED or is being set to ENABLED,
875891Sraf  * the type of cancellation is ASYNCHRONOUS, and a cancel request
885891Sraf  * is pending, then the thread is cancelled right here.
895891Sraf  * Otherwise, pthread_setcancelstate() is not a cancellation point.
900Sstevel@tonic-gate  */
910Sstevel@tonic-gate int
pthread_setcancelstate(int state,int * oldstate)926812Sraf pthread_setcancelstate(int state, int *oldstate)
930Sstevel@tonic-gate {
940Sstevel@tonic-gate 	ulwp_t *self = curthread;
950Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
960Sstevel@tonic-gate 	int was_disabled;
970Sstevel@tonic-gate 
980Sstevel@tonic-gate 	/*
990Sstevel@tonic-gate 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
1000Sstevel@tonic-gate 	 * since it is tested under this lock by pthread_cancel(), above.
1010Sstevel@tonic-gate 	 * This has the side-effect of calling enter_critical() and this
1020Sstevel@tonic-gate 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
1030Sstevel@tonic-gate 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
1040Sstevel@tonic-gate 	 * handler and we must be async-signal safe here.)
1050Sstevel@tonic-gate 	 */
1060Sstevel@tonic-gate 	ulwp_lock(self, udp);
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate 	was_disabled = self->ul_cancel_disabled;
1090Sstevel@tonic-gate 	switch (state) {
1100Sstevel@tonic-gate 	case PTHREAD_CANCEL_ENABLE:
1110Sstevel@tonic-gate 		self->ul_cancel_disabled = 0;
1120Sstevel@tonic-gate 		break;
1130Sstevel@tonic-gate 	case PTHREAD_CANCEL_DISABLE:
1140Sstevel@tonic-gate 		self->ul_cancel_disabled = 1;
1150Sstevel@tonic-gate 		break;
1160Sstevel@tonic-gate 	default:
1170Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1180Sstevel@tonic-gate 		return (EINVAL);
1190Sstevel@tonic-gate 	}
1205891Sraf 	set_cancel_pending_flag(self, 0);
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate 	/*
1230Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1240Sstevel@tonic-gate 	 * is in async mode and is or was enabled, then exit.
1250Sstevel@tonic-gate 	 */
1260Sstevel@tonic-gate 	if ((!self->ul_cancel_disabled || !was_disabled) &&
1270Sstevel@tonic-gate 	    self->ul_cancel_async && self->ul_cancel_pending) {
1280Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1296812Sraf 		pthread_exit(PTHREAD_CANCELED);
1300Sstevel@tonic-gate 	}
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate 	ulwp_unlock(self, udp);
1330Sstevel@tonic-gate 
1340Sstevel@tonic-gate 	if (oldstate != NULL) {
1350Sstevel@tonic-gate 		if (was_disabled)
1360Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_DISABLE;
1370Sstevel@tonic-gate 		else
1380Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_ENABLE;
1390Sstevel@tonic-gate 	}
1400Sstevel@tonic-gate 	return (0);
1410Sstevel@tonic-gate }
1420Sstevel@tonic-gate 
1430Sstevel@tonic-gate /*
1440Sstevel@tonic-gate  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
1450Sstevel@tonic-gate  * If the type is being set as ASYNC, then it becomes
1460Sstevel@tonic-gate  * a cancellation point if there is a cancellation pending.
1470Sstevel@tonic-gate  */
1480Sstevel@tonic-gate int
pthread_setcanceltype(int type,int * oldtype)1496812Sraf pthread_setcanceltype(int type, int *oldtype)
1500Sstevel@tonic-gate {
1510Sstevel@tonic-gate 	ulwp_t *self = curthread;
1520Sstevel@tonic-gate 	int was_async;
1530Sstevel@tonic-gate 
1540Sstevel@tonic-gate 	/*
1550Sstevel@tonic-gate 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
1560Sstevel@tonic-gate 	 * We do this because curthread->ul_cancel_pending is set in the
1570Sstevel@tonic-gate 	 * SIGCANCEL handler and we must be async-signal safe here.
1580Sstevel@tonic-gate 	 */
1590Sstevel@tonic-gate 	enter_critical(self);
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate 	was_async = self->ul_cancel_async;
1620Sstevel@tonic-gate 	switch (type) {
1630Sstevel@tonic-gate 	case PTHREAD_CANCEL_ASYNCHRONOUS:
1640Sstevel@tonic-gate 		self->ul_cancel_async = 1;
1650Sstevel@tonic-gate 		break;
1660Sstevel@tonic-gate 	case PTHREAD_CANCEL_DEFERRED:
1670Sstevel@tonic-gate 		self->ul_cancel_async = 0;
1680Sstevel@tonic-gate 		break;
1690Sstevel@tonic-gate 	default:
1700Sstevel@tonic-gate 		exit_critical(self);
1710Sstevel@tonic-gate 		return (EINVAL);
1720Sstevel@tonic-gate 	}
1730Sstevel@tonic-gate 	self->ul_save_async = self->ul_cancel_async;
1740Sstevel@tonic-gate 
1750Sstevel@tonic-gate 	/*
1760Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1770Sstevel@tonic-gate 	 * is in enabled mode and is or was in async mode, exit.
1780Sstevel@tonic-gate 	 */
1790Sstevel@tonic-gate 	if ((self->ul_cancel_async || was_async) &&
1800Sstevel@tonic-gate 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
1810Sstevel@tonic-gate 		exit_critical(self);
1826812Sraf 		pthread_exit(PTHREAD_CANCELED);
1830Sstevel@tonic-gate 	}
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate 	exit_critical(self);
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	if (oldtype != NULL) {
1880Sstevel@tonic-gate 		if (was_async)
1890Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1900Sstevel@tonic-gate 		else
1910Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1920Sstevel@tonic-gate 	}
1930Sstevel@tonic-gate 	return (0);
1940Sstevel@tonic-gate }
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate /*
1970Sstevel@tonic-gate  * pthread_testcancel: tests for any cancellation pending
1980Sstevel@tonic-gate  * if the cancellation is enabled and is pending, act on
1990Sstevel@tonic-gate  * it by calling thr_exit. thr_exit takes care of calling
2000Sstevel@tonic-gate  * cleanup handlers.
2010Sstevel@tonic-gate  */
2020Sstevel@tonic-gate void
pthread_testcancel(void)2036812Sraf pthread_testcancel(void)
2040Sstevel@tonic-gate {
2050Sstevel@tonic-gate 	ulwp_t *self = curthread;
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
2086812Sraf 		pthread_exit(PTHREAD_CANCELED);
2090Sstevel@tonic-gate }
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate /*
2120Sstevel@tonic-gate  * For deferred mode, this routine makes a thread cancelable.
2130Sstevel@tonic-gate  * It is called from the functions which want to be cancellation
2140Sstevel@tonic-gate  * points and are about to block, such as cond_wait().
2150Sstevel@tonic-gate  */
2160Sstevel@tonic-gate void
_cancelon()2170Sstevel@tonic-gate _cancelon()
2180Sstevel@tonic-gate {
2190Sstevel@tonic-gate 	ulwp_t *self = curthread;
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2220Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2230Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2240Sstevel@tonic-gate 		self->ul_cancelable++;
2250Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2266812Sraf 			pthread_exit(PTHREAD_CANCELED);
2270Sstevel@tonic-gate 	}
2280Sstevel@tonic-gate }
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate /*
2310Sstevel@tonic-gate  * This routine turns cancelability off and possible calls pthread_exit().
2320Sstevel@tonic-gate  * It is called from functions which are cancellation points, like cond_wait().
2330Sstevel@tonic-gate  */
2340Sstevel@tonic-gate void
_canceloff()2350Sstevel@tonic-gate _canceloff()
2360Sstevel@tonic-gate {
2370Sstevel@tonic-gate 	ulwp_t *self = curthread;
2380Sstevel@tonic-gate 
2390Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2400Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2410Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2426812Sraf 			pthread_exit(PTHREAD_CANCELED);
2430Sstevel@tonic-gate 		self->ul_cancelable--;
2440Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2450Sstevel@tonic-gate 	}
2460Sstevel@tonic-gate }
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate /*
2490Sstevel@tonic-gate  * Same as _canceloff() but don't actually cancel the thread.
2500Sstevel@tonic-gate  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
2510Sstevel@tonic-gate  */
2520Sstevel@tonic-gate void
_canceloff_nocancel()2530Sstevel@tonic-gate _canceloff_nocancel()
2540Sstevel@tonic-gate {
2550Sstevel@tonic-gate 	ulwp_t *self = curthread;
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2580Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2590Sstevel@tonic-gate 		self->ul_cancelable--;
2600Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2610Sstevel@tonic-gate 	}
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /*
2650Sstevel@tonic-gate  * __pthread_cleanup_push: called by macro in pthread.h which defines
2660Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
2670Sstevel@tonic-gate  * cleanup struct and calls this routine to push the handler off the
2680Sstevel@tonic-gate  * curthread's struct.
2690Sstevel@tonic-gate  */
2700Sstevel@tonic-gate void
__pthread_cleanup_push(void (* routine)(void *),void * args,caddr_t fp,_cleanup_t * clnup_info)2710Sstevel@tonic-gate __pthread_cleanup_push(void (*routine)(void *),
2720Sstevel@tonic-gate 	void *args, caddr_t fp, _cleanup_t *clnup_info)
2730Sstevel@tonic-gate {
2740Sstevel@tonic-gate 	ulwp_t *self = curthread;
2750Sstevel@tonic-gate 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate 	infop->func = routine;
2780Sstevel@tonic-gate 	infop->arg = args;
2790Sstevel@tonic-gate 	infop->fp = fp;
2800Sstevel@tonic-gate 	infop->next = self->ul_clnup_hdr;
2810Sstevel@tonic-gate 	self->ul_clnup_hdr = infop;
2820Sstevel@tonic-gate }
2830Sstevel@tonic-gate 
2840Sstevel@tonic-gate /*
2850Sstevel@tonic-gate  * __pthread_cleanup_pop: called by macro in pthread.h which defines
2860Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
2870Sstevel@tonic-gate  * handler off the curthread's struct and execute it if necessary.
2880Sstevel@tonic-gate  */
2890Sstevel@tonic-gate /* ARGSUSED1 */
2900Sstevel@tonic-gate void
__pthread_cleanup_pop(int ex,_cleanup_t * clnup_info)2910Sstevel@tonic-gate __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
2920Sstevel@tonic-gate {
2930Sstevel@tonic-gate 	ulwp_t *self = curthread;
2940Sstevel@tonic-gate 	__cleanup_t *infop = self->ul_clnup_hdr;
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate 	self->ul_clnup_hdr = infop->next;
2970Sstevel@tonic-gate 	if (ex)
2980Sstevel@tonic-gate 		(*infop->func)(infop->arg);
2990Sstevel@tonic-gate }
3005891Sraf 
3015891Sraf /*
3025891Sraf  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
3035891Sraf  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
3045891Sraf  * a pending cancellation and we do not have cancellation disabled.
3055891Sraf  * In this situation, we will not go to sleep on any system call but
3065891Sraf  * will instead return EINTR immediately on any attempt to sleep,
3075891Sraf  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
3085891Sraf  * this condition, but SC_EINTR_FLG never goes away until the thread
3095891Sraf  * terminates (indicated by clear_flags != 0).
3105891Sraf  */
3115891Sraf void
set_cancel_pending_flag(ulwp_t * self,int clear_flags)3125891Sraf set_cancel_pending_flag(ulwp_t *self, int clear_flags)
3135891Sraf {
3145891Sraf 	volatile sc_shared_t *scp;
3155891Sraf 
3165891Sraf 	if (self->ul_vfork | self->ul_nocancel)
3175891Sraf 		return;
3185891Sraf 	enter_critical(self);
3195891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
3205891Sraf 	    (scp = setup_schedctl()) != NULL) {
3215891Sraf 		if (clear_flags)
3225891Sraf 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
3235891Sraf 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
3245891Sraf 			scp->sc_flgs |= SC_CANCEL_FLG;
3255891Sraf 		else
3265891Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
3275891Sraf 	}
3285891Sraf 	exit_critical(self);
3295891Sraf }
3305891Sraf 
3315891Sraf /*
3325891Sraf  * Called from the PROLOGUE macro in scalls.c to inform subsequent
3335891Sraf  * code that a cancellation point has been called and that the
3345891Sraf  * current thread should cancel itself as soon as all of its locks
3355891Sraf  * have been dropped (see safe_mutex_unlock()).
3365891Sraf  */
3375891Sraf void
set_cancel_eintr_flag(ulwp_t * self)3385891Sraf set_cancel_eintr_flag(ulwp_t *self)
3395891Sraf {
3405891Sraf 	volatile sc_shared_t *scp;
3415891Sraf 
3425891Sraf 	if (self->ul_vfork | self->ul_nocancel)
3435891Sraf 		return;
3445891Sraf 	enter_critical(self);
3455891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
3465891Sraf 	    (scp = setup_schedctl()) != NULL)
3475891Sraf 		scp->sc_flgs |= SC_EINTR_FLG;
3485891Sraf 	exit_critical(self);
3495891Sraf }
3505891Sraf 
3515891Sraf /*
3525891Sraf  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
3535891Sraf  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
3545891Sraf  * the unprotected (from signals) interval before reaching the kernel,
3555891Sraf  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
3565891Sraf  * the kernel to return immediately from these system calls, giving us
3575891Sraf  * a spurious wakeup but not a deadlock.
3585891Sraf  */
3595891Sraf void
set_parking_flag(ulwp_t * self,int park)3605891Sraf set_parking_flag(ulwp_t *self, int park)
3615891Sraf {
3625891Sraf 	volatile sc_shared_t *scp;
3635891Sraf 
3645891Sraf 	enter_critical(self);
3655891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
3665891Sraf 	    (scp = setup_schedctl()) != NULL) {
3675891Sraf 		if (park) {
3685891Sraf 			scp->sc_flgs |= SC_PARK_FLG;
3695891Sraf 			/*
3705891Sraf 			 * We are parking; allow the __lwp_park() call to
3715891Sraf 			 * block even if we have a pending cancellation.
3725891Sraf 			 */
3735891Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
3745891Sraf 		} else {
3755891Sraf 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
3765891Sraf 			/*
3775891Sraf 			 * We are no longer parking; restore the
3785891Sraf 			 * pending cancellation flag if necessary.
3795891Sraf 			 */
3805891Sraf 			if (self->ul_cancel_pending &&
3815891Sraf 			    !self->ul_cancel_disabled)
3825891Sraf 				scp->sc_flgs |= SC_CANCEL_FLG;
3835891Sraf 		}
3845891Sraf 	} else if (park == 0) {	/* schedctl failed, do it the long way */
385*11411SSurya.Prakki@Sun.COM 		(void) __lwp_unpark(self->ul_lwpid);
3865891Sraf 	}
3875891Sraf 	exit_critical(self);
3885891Sraf }
3895891Sraf 
3905891Sraf /*
3915891Sraf  * Test if the current thread is due to exit because of cancellation.
3925891Sraf  */
3935891Sraf int
cancel_active(void)3945891Sraf cancel_active(void)
3955891Sraf {
3965891Sraf 	ulwp_t *self = curthread;
3975891Sraf 	volatile sc_shared_t *scp;
3985891Sraf 	int exit_soon;
3995891Sraf 
4005891Sraf 	/*
4015891Sraf 	 * If there is a pending cancellation and cancellation
4025891Sraf 	 * is not disabled (SC_CANCEL_FLG) and we received
4035891Sraf 	 * EINTR from a recent system call (SC_EINTR_FLG),
4045891Sraf 	 * then we will soon be exiting.
4055891Sraf 	 */
4065891Sraf 	enter_critical(self);
4075891Sraf 	exit_soon =
4085891Sraf 	    (((scp = self->ul_schedctl) != NULL ||
4095891Sraf 	    (scp = setup_schedctl()) != NULL) &&
4105891Sraf 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
4115891Sraf 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
4125891Sraf 	exit_critical(self);
4135891Sraf 
4145891Sraf 	return (exit_soon);
4155891Sraf }
416