Lines Matching +full:up +full:- +full:counter
37 /* Spurious wake-ups are OK. Clear the flag before futexing. */ in ck_ec32_wake()
38 ck_pr_and_32(&ec->counter, (1U << 31) - 1); in ck_ec32_wake()
39 ops->wake32(ops, &ec->counter); in ck_ec32_wake()
57 ck_pr_and_64(&ec->counter, ~1); in ck_ec64_wake()
58 ops->wake64(ops, &ec->counter); in ck_ec64_wake()
82 new_deadline->tv_sec = TIME_MAX; in ck_ec_deadline_impl()
83 new_deadline->tv_nsec = NSEC_MAX; in ck_ec_deadline_impl()
87 r = ops->gettime(ops, &now); in ck_ec_deadline_impl()
89 return -1; in ck_ec_deadline_impl()
117 * value has changed), or the predicate returns non-0 (something else
120 * If deadline is ever reached, returns -1 (timeout).
137 const struct ck_ec_ops *ops = wait_state->ops; in exponential_backoff()
138 const uint32_t scale_factor = (ops->wait_scale_factor != 0) in exponential_backoff()
139 ? ops->wait_scale_factor in exponential_backoff()
141 const uint32_t shift_count = (ops->wait_shift_count != 0) in exponential_backoff()
142 ? ops->wait_shift_count in exponential_backoff()
144 uint32_t wait_ns = (ops->initial_wait_ns != 0) in exponential_backoff()
145 ? ops->initial_wait_ns in exponential_backoff()
155 return -1; in exponential_backoff()
160 wait_state->start = begin; in exponential_backoff()
165 wait_state->now = now; in exponential_backoff()
196 * Loops up to BUSY_LOOP_ITER times, or until ec's counter value
206 uint##W##_t current = ck_pr_load_##W(&ec->counter); \
207 size_t n = (ops->busy_loop_iter != 0) \
208 ? ops->busy_loop_iter \
215 current = ck_pr_load_##W(&ec->counter); \
227 * Attempts to upgrade ec->counter from unflagged to flagged.
230 * counter word is equal to flagged on return, or has been at some
247 /* We have a different counter value! */ \
252 * Flag the counter value. The CAS only fails if the \
253 * counter is already flagged, or has a new value. \
255 return (ck_pr_cas_##W##_value(&ec->counter, \
278 const struct ck_ec32 *ec = state->ec; in ck_ec32_wait_slow_once()
279 const uint32_t flagged_word = state->flagged_word; in ck_ec32_wait_slow_once()
281 wait_state->ops->wait32(wait_state, &ec->counter, in ck_ec32_wait_slow_once()
283 return ck_pr_load_32(&ec->counter) != flagged_word; in ck_ec32_wait_slow_once()
293 const struct ck_ec64 *ec = state->ec; in ck_ec64_wait_slow_once()
294 const uint64_t flagged_word = state->flagged_word; in ck_ec64_wait_slow_once()
300 if (ck_pr_load_64(&ec->counter) != flagged_word) { in ck_ec64_wait_slow_once()
304 wait_state->ops->wait64(wait_state, &ec->counter, in ck_ec64_wait_slow_once()
306 return ck_pr_load_64(&ec->counter) != flagged_word; in ck_ec64_wait_slow_once()
331 return -1; \
343 * counter word is flagged. \
353 * By now, ec->counter == flagged_word (at \
355 * heuristically let any in-flight SP inc/add \
358 * lost wake-ups. \
379 /* Spurious wake-up. Redo the slow path. */ \