Lines Matching +full:wait +full:- +full:on +full:- +full:write
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * in turn was based on work described in:
46 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
49 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 * Data-Structures
55 * use-after-free errors with lockless datastructures or as
58 * The basic approach is to maintain a monotonic write sequence
59 * number that is updated on some application defined granularity.
60 * Readers record the most recent write sequence number they have
63 * write older than this value has been observed by all readers
65 * readers by storing an invalid sequence number in the per-cpu
67 * a global write clock that is used to mark memory on free.
69 * The write and read sequence numbers can be thought of as a two
74 * advanced as far towards the write sequence as active readers allow.
79 * sequence number is consequently never higher than the write sequence.
85 * observation. That is to say, the delta between read and write
92 * complete without waiting. The batch granularity and free-to-use
100 * per-cpu cache of memory before advancing the sequence. It then
103 * value once for n=cache-size frees and the waits are done long
109 * the write sequence number becomes too costly we can advance
110 * it for every N buckets in exchange for higher free-to-use
124 * | -------------------- sequence number space -------------------- |
126 * | ----- valid sequence numbers ---- |
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
160 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
164 #define SMR_SEQ_INIT (UINT_MAX - 100000)
173 * Hardclock is responsible for advancing ticks on a single CPU while every
176 * Because these interrupts are not synchronized we must wait one additional
190 * sequence may not be advanced on write for lazy or deferred SMRs. In this
210 * Advance a lazy write sequence number. These move forward at the rate of
213 * This returns the goal write sequence number.
227 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); in smr_lazy_advance()
233 d = t - s_wr.ticks; in smr_lazy_advance()
247 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); in smr_lazy_advance()
253 * Increment the shared write sequence by 2. Since it is initialized
261 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); in smr_shared_advance()
265 * Advance the write sequence number for a normal smr section. If the
266 * write sequence is too far behind the read sequence we have to poll
275 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_default_advance()
282 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_default_advance()
292 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); in smr_default_advance()
300 * Deferred SMRs conditionally update s_wr_seq based on an
307 if (++self->c_deferred < self->c_limit) in smr_deferred_advance()
309 self->c_deferred = 0; in smr_deferred_advance()
314 * Advance the write sequence and return the value for use as the
315 * wait goal. This guarantees that any changes made by the calling
348 s = self->c_shared; in smr_advance()
349 flags = self->c_flags; in smr_advance()
363 * Poll to determine the currently observed sequence number on a cpu
364 * and spinwait if the 'wait' argument is true.
367 smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait) in smr_poll_cpu() argument
373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu()
389 * cached value. This is only likely to happen on in smr_poll_cpu()
402 if (!wait) in smr_poll_cpu()
419 smr_seq_t s_wr_seq, smr_seq_t goal, bool wait) in smr_poll_scan() argument
428 * The read sequence can be no larger than the write sequence at in smr_poll_scan()
434 * Query the active sequence on this cpu. If we're not in smr_poll_scan()
440 wait); in smr_poll_scan()
453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
455 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); in smr_poll_scan()
463 * Poll to determine whether all readers have observed the 'goal' write
466 * If wait is true this will spin until the goal is met.
475 smr_poll(smr_t smr, smr_seq_t goal, bool wait) in smr_poll() argument
487 KASSERT(!wait || !SMR_ENTERED(smr), in smr_poll()
489 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_poll()
490 ("smr_poll: Blocking not allowed on lazy smrs.")); in smr_poll()
500 s = self->c_shared; in smr_poll()
501 flags = self->c_flags; in smr_poll()
505 * Conditionally advance the lazy write clock on any writer in smr_poll()
513 * observe an updated read sequence that is larger than write. in smr_poll()
515 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_poll()
528 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); in smr_poll()
545 if (!wait) { in smr_poll()
549 /* LAZY is always !wait. */ in smr_poll()
558 * it to be valid. If it is not then the caller held on to it and in smr_poll()
566 s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait); in smr_poll()
579 KASSERT(success || !wait, ("%s: blocking poll failed", __func__)); in smr_poll()
593 s->s_name = name; in smr_create()
594 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; in smr_create()
595 s->s_wr.ticks = ticks; in smr_create()
600 c->c_seq = SMR_SEQ_INVALID; in smr_create()
601 c->c_shared = s; in smr_create()
602 c->c_deferred = 0; in smr_create()
603 c->c_limit = limit; in smr_create()
604 c->c_flags = flags; in smr_create()
616 uma_zfree(smr_shared_zone, smr->c_shared); in smr_destroy()
628 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); in smr_init()
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); in smr_init()