Lines Matching +full:wait +full:- +full:state

2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
6 * UCRL-CODE-235197
50 return (-EINVAL);
72 cvp->cv_magic = CV_MAGIC;
73 init_waitqueue_head(&cvp->cv_event);
74 init_waitqueue_head(&cvp->cv_destroy);
75 atomic_set(&cvp->cv_waiters, 0);
76 atomic_set(&cvp->cv_refs, 1);
77 cvp->cv_mutex = NULL;
84 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
85 ASSERT(cvp->cv_mutex == NULL);
86 ASSERT(!waitqueue_active(&cvp->cv_event));
97 ASSERT(cvp->cv_magic == CV_MAGIC);
99 cvp->cv_magic = CV_DESTROY;
100 atomic_dec(&cvp->cv_refs);
104 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
106 ASSERT3P(cvp->cv_mutex, ==, NULL);
107 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
108 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
109 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
114 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
116 DEFINE_WAIT(wait);
121 ASSERT(cvp->cv_magic == CV_MAGIC);
123 atomic_inc(&cvp->cv_refs);
125 m = READ_ONCE(cvp->cv_mutex);
127 m = xchg(&cvp->cv_mutex, mp);
131 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
132 atomic_inc(&cvp->cv_waiters);
137 * race where 'cvp->cv_waiters > 0' but the list is empty.
146 if (atomic_dec_and_test(&cvp->cv_waiters)) {
149 * just for debug anyway, so make it best-effort
151 cvp->cv_mutex = NULL;
152 wake_up(&cvp->cv_destroy);
155 finish_wait(&cvp->cv_event, &wait);
156 atomic_dec(&cvp->cv_refs);
211 * Return value is time left (expire_time - now) or -1 if timeout occurred.
215 int state, int io)
217 DEFINE_WAIT(wait);
223 ASSERT(cvp->cv_magic == CV_MAGIC);
226 /* XXX - Does not handle jiffie wrap properly */
227 time_left = expire_time - jiffies;
229 return (-1);
231 atomic_inc(&cvp->cv_refs);
232 m = READ_ONCE(cvp->cv_mutex);
234 m = xchg(&cvp->cv_mutex, mp);
238 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
239 atomic_inc(&cvp->cv_waiters);
244 * race where 'cvp->cv_waiters > 0' but the list is empty.
253 if (atomic_dec_and_test(&cvp->cv_waiters)) {
256 * just for debug anyway, so make it best-effort
258 cvp->cv_mutex = NULL;
259 wake_up(&cvp->cv_destroy);
262 finish_wait(&cvp->cv_event, &wait);
263 atomic_dec(&cvp->cv_refs);
270 return (time_left > 0 ? 1 : -1);
316 * Return value is time left (expire_time - now) or -1 if timeout occurred.
320 hrtime_t res, int state)
322 DEFINE_WAIT(wait);
331 ASSERT(cvp->cv_magic == CV_MAGIC);
334 time_left = expire_time - gethrtime();
336 return (-1);
338 atomic_inc(&cvp->cv_refs);
339 m = READ_ONCE(cvp->cv_mutex);
341 m = xchg(&cvp->cv_mutex, mp);
345 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
346 atomic_inc(&cvp->cv_waiters);
351 * race where 'cvp->cv_waiters > 0' but the list is empty.
361 if (atomic_dec_and_test(&cvp->cv_waiters)) {
364 * just for debug anyway, so make it best-effort
366 cvp->cv_mutex = NULL;
367 wake_up(&cvp->cv_destroy);
370 finish_wait(&cvp->cv_event, &wait);
371 atomic_dec(&cvp->cv_refs);
374 return (rc == -EINTR ? 1 : -1);
382 hrtime_t res, int flag, int state)
387 return (__cv_timedwait_hires(cvp, mp, tim, res, state));
432 ASSERT(cvp->cv_magic == CV_MAGIC);
433 atomic_inc(&cvp->cv_refs);
439 * the wait queue to ensure we don't race waking up processes.
441 if (atomic_read(&cvp->cv_waiters) > 0)
442 wake_up(&cvp->cv_event);
444 atomic_dec(&cvp->cv_refs);
452 ASSERT(cvp->cv_magic == CV_MAGIC);
453 atomic_inc(&cvp->cv_refs);
459 if (atomic_read(&cvp->cv_waiters) > 0)
460 wake_up_all(&cvp->cv_event);
462 atomic_dec(&cvp->cv_refs);