xref: /onnv-gate/usr/src/lib/libc/port/threads/cancel.c (revision 5891:0d5c6468bb04)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*5891Sraf  * Common Development and Distribution License (the "License").
6*5891Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*5891Sraf 
220Sstevel@tonic-gate /*
23*5891Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
310Sstevel@tonic-gate 
320Sstevel@tonic-gate /*
330Sstevel@tonic-gate  * pthread_cancel: tries to cancel the targeted thread.
340Sstevel@tonic-gate  * If the target thread has already exited no action is taken.
350Sstevel@tonic-gate  * Else send SIGCANCEL to request the other thread to cancel itself.
360Sstevel@tonic-gate  */
370Sstevel@tonic-gate #pragma weak pthread_cancel = _pthread_cancel
380Sstevel@tonic-gate int
390Sstevel@tonic-gate _pthread_cancel(thread_t tid)
400Sstevel@tonic-gate {
410Sstevel@tonic-gate 	ulwp_t *self = curthread;
420Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
430Sstevel@tonic-gate 	ulwp_t *ulwp;
440Sstevel@tonic-gate 	int error = 0;
450Sstevel@tonic-gate 
460Sstevel@tonic-gate 	if ((ulwp = find_lwp(tid)) == NULL)
470Sstevel@tonic-gate 		return (ESRCH);
480Sstevel@tonic-gate 
490Sstevel@tonic-gate 	if (ulwp->ul_cancel_pending) {
500Sstevel@tonic-gate 		/*
510Sstevel@tonic-gate 		 * Don't send SIGCANCEL more than once.
520Sstevel@tonic-gate 		 */
530Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
540Sstevel@tonic-gate 	} else if (ulwp == self) {
550Sstevel@tonic-gate 		/*
560Sstevel@tonic-gate 		 * Unlock self before cancelling.
570Sstevel@tonic-gate 		 */
58*5891Sraf 		ulwp_unlock(self, udp);
59*5891Sraf 		self->ul_nocancel = 0;	/* cancellation is now possible */
60*5891Sraf 		if (self->ul_sigdefer == 0)
610Sstevel@tonic-gate 			do_sigcancel();
62*5891Sraf 		else {
63*5891Sraf 			self->ul_cancel_pending = 1;
64*5891Sraf 			set_cancel_pending_flag(self, 0);
65*5891Sraf 		}
660Sstevel@tonic-gate 	} else if (ulwp->ul_cancel_disabled) {
670Sstevel@tonic-gate 		/*
680Sstevel@tonic-gate 		 * Don't send SIGCANCEL if cancellation is disabled;
690Sstevel@tonic-gate 		 * just set the thread's ulwp->ul_cancel_pending flag.
700Sstevel@tonic-gate 		 * This avoids a potential EINTR for the target thread.
71*5891Sraf 		 * We don't call set_cancel_pending_flag() here because
72*5891Sraf 		 * we cannot modify another thread's schedctl data.
730Sstevel@tonic-gate 		 */
740Sstevel@tonic-gate 		ulwp->ul_cancel_pending = 1;
750Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
760Sstevel@tonic-gate 	} else {
770Sstevel@tonic-gate 		/*
780Sstevel@tonic-gate 		 * Request the other thread to cancel itself.
790Sstevel@tonic-gate 		 */
800Sstevel@tonic-gate 		error = __lwp_kill(tid, SIGCANCEL);
810Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
820Sstevel@tonic-gate 	}
830Sstevel@tonic-gate 
840Sstevel@tonic-gate 	return (error);
850Sstevel@tonic-gate }
860Sstevel@tonic-gate 
870Sstevel@tonic-gate /*
88*5891Sraf  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
89*5891Sraf  * If the state is already ENABLED or is being set to ENABLED,
90*5891Sraf  * the type of cancellation is ASYNCHRONOUS, and a cancel request
91*5891Sraf  * is pending, then the thread is cancelled right here.
92*5891Sraf  * Otherwise, pthread_setcancelstate() is not a cancellation point.
930Sstevel@tonic-gate  */
940Sstevel@tonic-gate #pragma weak pthread_setcancelstate = _pthread_setcancelstate
950Sstevel@tonic-gate int
960Sstevel@tonic-gate _pthread_setcancelstate(int state, int *oldstate)
970Sstevel@tonic-gate {
980Sstevel@tonic-gate 	ulwp_t *self = curthread;
990Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
1000Sstevel@tonic-gate 	int was_disabled;
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate 	/*
1030Sstevel@tonic-gate 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
1040Sstevel@tonic-gate 	 * since it is tested under this lock by pthread_cancel(), above.
1050Sstevel@tonic-gate 	 * This has the side-effect of calling enter_critical() and this
1060Sstevel@tonic-gate 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
1070Sstevel@tonic-gate 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
1080Sstevel@tonic-gate 	 * handler and we must be async-signal safe here.)
1090Sstevel@tonic-gate 	 */
1100Sstevel@tonic-gate 	ulwp_lock(self, udp);
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	was_disabled = self->ul_cancel_disabled;
1130Sstevel@tonic-gate 	switch (state) {
1140Sstevel@tonic-gate 	case PTHREAD_CANCEL_ENABLE:
1150Sstevel@tonic-gate 		self->ul_cancel_disabled = 0;
1160Sstevel@tonic-gate 		break;
1170Sstevel@tonic-gate 	case PTHREAD_CANCEL_DISABLE:
1180Sstevel@tonic-gate 		self->ul_cancel_disabled = 1;
1190Sstevel@tonic-gate 		break;
1200Sstevel@tonic-gate 	default:
1210Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1220Sstevel@tonic-gate 		return (EINVAL);
1230Sstevel@tonic-gate 	}
124*5891Sraf 	set_cancel_pending_flag(self, 0);
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 	/*
1270Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1280Sstevel@tonic-gate 	 * is in async mode and is or was enabled, then exit.
1290Sstevel@tonic-gate 	 */
1300Sstevel@tonic-gate 	if ((!self->ul_cancel_disabled || !was_disabled) &&
1310Sstevel@tonic-gate 	    self->ul_cancel_async && self->ul_cancel_pending) {
1320Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1330Sstevel@tonic-gate 		_pthread_exit(PTHREAD_CANCELED);
1340Sstevel@tonic-gate 	}
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 	ulwp_unlock(self, udp);
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate 	if (oldstate != NULL) {
1390Sstevel@tonic-gate 		if (was_disabled)
1400Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_DISABLE;
1410Sstevel@tonic-gate 		else
1420Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_ENABLE;
1430Sstevel@tonic-gate 	}
1440Sstevel@tonic-gate 	return (0);
1450Sstevel@tonic-gate }
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate /*
1480Sstevel@tonic-gate  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
1490Sstevel@tonic-gate  * If the type is being set as ASYNC, then it becomes
1500Sstevel@tonic-gate  * a cancellation point if there is a cancellation pending.
1510Sstevel@tonic-gate  */
1520Sstevel@tonic-gate #pragma weak pthread_setcanceltype = _pthread_setcanceltype
1530Sstevel@tonic-gate int
1540Sstevel@tonic-gate _pthread_setcanceltype(int type, int *oldtype)
1550Sstevel@tonic-gate {
1560Sstevel@tonic-gate 	ulwp_t *self = curthread;
1570Sstevel@tonic-gate 	int was_async;
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	/*
1600Sstevel@tonic-gate 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
1610Sstevel@tonic-gate 	 * We do this because curthread->ul_cancel_pending is set in the
1620Sstevel@tonic-gate 	 * SIGCANCEL handler and we must be async-signal safe here.
1630Sstevel@tonic-gate 	 */
1640Sstevel@tonic-gate 	enter_critical(self);
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate 	was_async = self->ul_cancel_async;
1670Sstevel@tonic-gate 	switch (type) {
1680Sstevel@tonic-gate 	case PTHREAD_CANCEL_ASYNCHRONOUS:
1690Sstevel@tonic-gate 		self->ul_cancel_async = 1;
1700Sstevel@tonic-gate 		break;
1710Sstevel@tonic-gate 	case PTHREAD_CANCEL_DEFERRED:
1720Sstevel@tonic-gate 		self->ul_cancel_async = 0;
1730Sstevel@tonic-gate 		break;
1740Sstevel@tonic-gate 	default:
1750Sstevel@tonic-gate 		exit_critical(self);
1760Sstevel@tonic-gate 		return (EINVAL);
1770Sstevel@tonic-gate 	}
1780Sstevel@tonic-gate 	self->ul_save_async = self->ul_cancel_async;
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 	/*
1810Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1820Sstevel@tonic-gate 	 * is in enabled mode and is or was in async mode, exit.
1830Sstevel@tonic-gate 	 */
1840Sstevel@tonic-gate 	if ((self->ul_cancel_async || was_async) &&
1850Sstevel@tonic-gate 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
1860Sstevel@tonic-gate 		exit_critical(self);
1870Sstevel@tonic-gate 		_pthread_exit(PTHREAD_CANCELED);
1880Sstevel@tonic-gate 	}
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	exit_critical(self);
1910Sstevel@tonic-gate 
1920Sstevel@tonic-gate 	if (oldtype != NULL) {
1930Sstevel@tonic-gate 		if (was_async)
1940Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1950Sstevel@tonic-gate 		else
1960Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1970Sstevel@tonic-gate 	}
1980Sstevel@tonic-gate 	return (0);
1990Sstevel@tonic-gate }
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate /*
2020Sstevel@tonic-gate  * pthread_testcancel: tests for any cancellation pending
2030Sstevel@tonic-gate  * if the cancellation is enabled and is pending, act on
2040Sstevel@tonic-gate  * it by calling thr_exit. thr_exit takes care of calling
2050Sstevel@tonic-gate  * cleanup handlers.
2060Sstevel@tonic-gate  */
2070Sstevel@tonic-gate #pragma weak _private_testcancel = _pthread_testcancel
2080Sstevel@tonic-gate #pragma weak pthread_testcancel = _pthread_testcancel
2090Sstevel@tonic-gate void
2100Sstevel@tonic-gate _pthread_testcancel(void)
2110Sstevel@tonic-gate {
2120Sstevel@tonic-gate 	ulwp_t *self = curthread;
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
2150Sstevel@tonic-gate 		_pthread_exit(PTHREAD_CANCELED);
2160Sstevel@tonic-gate }
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate /*
2190Sstevel@tonic-gate  * For deferred mode, this routine makes a thread cancelable.
2200Sstevel@tonic-gate  * It is called from the functions which want to be cancellation
2210Sstevel@tonic-gate  * points and are about to block, such as cond_wait().
2220Sstevel@tonic-gate  */
2230Sstevel@tonic-gate void
2240Sstevel@tonic-gate _cancelon()
2250Sstevel@tonic-gate {
2260Sstevel@tonic-gate 	ulwp_t *self = curthread;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2290Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2300Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2310Sstevel@tonic-gate 		self->ul_cancelable++;
2320Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2330Sstevel@tonic-gate 			_pthread_exit(PTHREAD_CANCELED);
2340Sstevel@tonic-gate 	}
2350Sstevel@tonic-gate }
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate /*
2380Sstevel@tonic-gate  * This routine turns cancelability off and possible calls pthread_exit().
2390Sstevel@tonic-gate  * It is called from functions which are cancellation points, like cond_wait().
2400Sstevel@tonic-gate  */
2410Sstevel@tonic-gate void
2420Sstevel@tonic-gate _canceloff()
2430Sstevel@tonic-gate {
2440Sstevel@tonic-gate 	ulwp_t *self = curthread;
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2470Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2480Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2490Sstevel@tonic-gate 			_pthread_exit(PTHREAD_CANCELED);
2500Sstevel@tonic-gate 		self->ul_cancelable--;
2510Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2520Sstevel@tonic-gate 	}
2530Sstevel@tonic-gate }
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate /*
2560Sstevel@tonic-gate  * Same as _canceloff() but don't actually cancel the thread.
2570Sstevel@tonic-gate  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
2580Sstevel@tonic-gate  */
2590Sstevel@tonic-gate void
2600Sstevel@tonic-gate _canceloff_nocancel()
2610Sstevel@tonic-gate {
2620Sstevel@tonic-gate 	ulwp_t *self = curthread;
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2650Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2660Sstevel@tonic-gate 		self->ul_cancelable--;
2670Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2680Sstevel@tonic-gate 	}
2690Sstevel@tonic-gate }
2700Sstevel@tonic-gate 
2710Sstevel@tonic-gate /*
2720Sstevel@tonic-gate  * __pthread_cleanup_push: called by macro in pthread.h which defines
2730Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
2740Sstevel@tonic-gate  * cleanup struct and calls this routine to push the handler off the
2750Sstevel@tonic-gate  * curthread's struct.
2760Sstevel@tonic-gate  */
2770Sstevel@tonic-gate void
2780Sstevel@tonic-gate __pthread_cleanup_push(void (*routine)(void *),
2790Sstevel@tonic-gate 	void *args, caddr_t fp, _cleanup_t *clnup_info)
2800Sstevel@tonic-gate {
2810Sstevel@tonic-gate 	ulwp_t *self = curthread;
2820Sstevel@tonic-gate 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
2830Sstevel@tonic-gate 
2840Sstevel@tonic-gate 	infop->func = routine;
2850Sstevel@tonic-gate 	infop->arg = args;
2860Sstevel@tonic-gate 	infop->fp = fp;
2870Sstevel@tonic-gate 	infop->next = self->ul_clnup_hdr;
2880Sstevel@tonic-gate 	self->ul_clnup_hdr = infop;
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate /*
2920Sstevel@tonic-gate  * __pthread_cleanup_pop: called by macro in pthread.h which defines
2930Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
2940Sstevel@tonic-gate  * handler off the curthread's struct and execute it if necessary.
2950Sstevel@tonic-gate  */
2960Sstevel@tonic-gate /* ARGSUSED1 */
2970Sstevel@tonic-gate void
2980Sstevel@tonic-gate __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
2990Sstevel@tonic-gate {
3000Sstevel@tonic-gate 	ulwp_t *self = curthread;
3010Sstevel@tonic-gate 	__cleanup_t *infop = self->ul_clnup_hdr;
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate 	self->ul_clnup_hdr = infop->next;
3040Sstevel@tonic-gate 	if (ex)
3050Sstevel@tonic-gate 		(*infop->func)(infop->arg);
3060Sstevel@tonic-gate }
307*5891Sraf 
308*5891Sraf /*
309*5891Sraf  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
310*5891Sraf  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
311*5891Sraf  * a pending cancellation and we do not have cancellation disabled.
312*5891Sraf  * In this situation, we will not go to sleep on any system call but
313*5891Sraf  * will instead return EINTR immediately on any attempt to sleep,
314*5891Sraf  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
315*5891Sraf  * this condition, but SC_EINTR_FLG never goes away until the thread
316*5891Sraf  * terminates (indicated by clear_flags != 0).
317*5891Sraf  */
318*5891Sraf void
319*5891Sraf set_cancel_pending_flag(ulwp_t *self, int clear_flags)
320*5891Sraf {
321*5891Sraf 	volatile sc_shared_t *scp;
322*5891Sraf 
323*5891Sraf 	if (self->ul_vfork | self->ul_nocancel)
324*5891Sraf 		return;
325*5891Sraf 	enter_critical(self);
326*5891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
327*5891Sraf 	    (scp = setup_schedctl()) != NULL) {
328*5891Sraf 		if (clear_flags)
329*5891Sraf 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
330*5891Sraf 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
331*5891Sraf 			scp->sc_flgs |= SC_CANCEL_FLG;
332*5891Sraf 		else
333*5891Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
334*5891Sraf 	}
335*5891Sraf 	exit_critical(self);
336*5891Sraf }
337*5891Sraf 
338*5891Sraf /*
339*5891Sraf  * Called from the PROLOGUE macro in scalls.c to inform subsequent
340*5891Sraf  * code that a cancellation point has been called and that the
341*5891Sraf  * current thread should cancel itself as soon as all of its locks
342*5891Sraf  * have been dropped (see safe_mutex_unlock()).
343*5891Sraf  */
344*5891Sraf void
345*5891Sraf set_cancel_eintr_flag(ulwp_t *self)
346*5891Sraf {
347*5891Sraf 	volatile sc_shared_t *scp;
348*5891Sraf 
349*5891Sraf 	if (self->ul_vfork | self->ul_nocancel)
350*5891Sraf 		return;
351*5891Sraf 	enter_critical(self);
352*5891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
353*5891Sraf 	    (scp = setup_schedctl()) != NULL)
354*5891Sraf 		scp->sc_flgs |= SC_EINTR_FLG;
355*5891Sraf 	exit_critical(self);
356*5891Sraf }
357*5891Sraf 
358*5891Sraf /*
359*5891Sraf  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
360*5891Sraf  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
361*5891Sraf  * the unprotected (from signals) interval before reaching the kernel,
362*5891Sraf  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
363*5891Sraf  * the kernel to return immediately from these system calls, giving us
364*5891Sraf  * a spurious wakeup but not a deadlock.
365*5891Sraf  */
366*5891Sraf void
367*5891Sraf set_parking_flag(ulwp_t *self, int park)
368*5891Sraf {
369*5891Sraf 	volatile sc_shared_t *scp;
370*5891Sraf 
371*5891Sraf 	enter_critical(self);
372*5891Sraf 	if ((scp = self->ul_schedctl) != NULL ||
373*5891Sraf 	    (scp = setup_schedctl()) != NULL) {
374*5891Sraf 		if (park) {
375*5891Sraf 			scp->sc_flgs |= SC_PARK_FLG;
376*5891Sraf 			/*
377*5891Sraf 			 * We are parking; allow the __lwp_park() call to
378*5891Sraf 			 * block even if we have a pending cancellation.
379*5891Sraf 			 */
380*5891Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
381*5891Sraf 		} else {
382*5891Sraf 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
383*5891Sraf 			/*
384*5891Sraf 			 * We are no longer parking; restore the
385*5891Sraf 			 * pending cancellation flag if necessary.
386*5891Sraf 			 */
387*5891Sraf 			if (self->ul_cancel_pending &&
388*5891Sraf 			    !self->ul_cancel_disabled)
389*5891Sraf 				scp->sc_flgs |= SC_CANCEL_FLG;
390*5891Sraf 		}
391*5891Sraf 	} else if (park == 0) {	/* schedctl failed, do it the long way */
392*5891Sraf 		__lwp_unpark(self->ul_lwpid);
393*5891Sraf 	}
394*5891Sraf 	exit_critical(self);
395*5891Sraf }
396*5891Sraf 
397*5891Sraf /*
398*5891Sraf  * Test if the current thread is due to exit because of cancellation.
399*5891Sraf  */
400*5891Sraf int
401*5891Sraf cancel_active(void)
402*5891Sraf {
403*5891Sraf 	ulwp_t *self = curthread;
404*5891Sraf 	volatile sc_shared_t *scp;
405*5891Sraf 	int exit_soon;
406*5891Sraf 
407*5891Sraf 	/*
408*5891Sraf 	 * If there is a pending cancellation and cancellation
409*5891Sraf 	 * is not disabled (SC_CANCEL_FLG) and we received
410*5891Sraf 	 * EINTR from a recent system call (SC_EINTR_FLG),
411*5891Sraf 	 * then we will soon be exiting.
412*5891Sraf 	 */
413*5891Sraf 	enter_critical(self);
414*5891Sraf 	exit_soon =
415*5891Sraf 	    (((scp = self->ul_schedctl) != NULL ||
416*5891Sraf 	    (scp = setup_schedctl()) != NULL) &&
417*5891Sraf 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
418*5891Sraf 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
419*5891Sraf 	exit_critical(self);
420*5891Sraf 
421*5891Sraf 	return (exit_soon);
422*5891Sraf }
423