Lines Matching +full:as +full:- +full:is

10  * source.  A copy of the CDDL is also available via the Internet at
23 * Aggregate-sum counters are a form of fanned-out counter, used when atomic
27 * where the write rate (increment/decrement) is much higher than the read rate
31 * buckets. The core counter contains a lock for the entire counter, as well
32 * as the current upper and lower bounds on the value of the counter. The
33 * aggsum_bucket structure contains a per-bucket lock to protect the contents of
38 * The basic operation of an aggsum is simple. Threads that wish to modify the
44 * large), and borrow more from the core counter. Borrowing is done by adding to
47 * subtracted). Clearing the bucket is the opposite; we add the current delta
51 * core counter; since all other operations access CPU-local resources,
55 * task. It is fast to determine the upper and lower bounds of the aggum; this
57 * approximation of the aggsum's value is acceptable. However, if one needs to
58 * know whether some specific value is above or below the current value in the
61 * then clearing a bucket. This proceeds until the target is outside of the
63 * cleared and we know that the target is equal to the aggsum's value. Finally,
64 * the most expensive operation is determining the precise value of the aggsum.
67 * expensive is clearing buckets. This involves grabbing the global lock
71 * request, which will also be expensive). This is what makes aggsums well
72 * suited for write-many read-rarely operations.
74 * Note that the aggsums do not expand if more CPUs are hot-added. In that
77 * front, and dynamically adding them is a complex task.
88 aggsum_init(aggsum_t *as, uint64_t value) in aggsum_init() argument
90 memset(as, 0, sizeof (*as)); in aggsum_init()
91 as->as_lower_bound = as->as_upper_bound = value; in aggsum_init()
92 mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL); in aggsum_init()
97 as->as_bucketshift = highbit64(boot_ncpus / 6) / 2; in aggsum_init()
98 as->as_numbuckets = ((boot_ncpus - 1) >> as->as_bucketshift) + 1; in aggsum_init()
99 as->as_buckets = kmem_zalloc(as->as_numbuckets * in aggsum_init()
101 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_init()
102 mutex_init(&as->as_buckets[i].asc_lock, in aggsum_init()
108 aggsum_fini(aggsum_t *as) in aggsum_fini() argument
110 for (int i = 0; i < as->as_numbuckets; i++) in aggsum_fini()
111 mutex_destroy(&as->as_buckets[i].asc_lock); in aggsum_fini()
112 kmem_free(as->as_buckets, as->as_numbuckets * sizeof (aggsum_bucket_t)); in aggsum_fini()
113 mutex_destroy(&as->as_lock); in aggsum_fini()
117 aggsum_lower_bound(aggsum_t *as) in aggsum_lower_bound() argument
119 return (atomic_load_64((volatile uint64_t *)&as->as_lower_bound)); in aggsum_lower_bound()
123 aggsum_upper_bound(aggsum_t *as) in aggsum_upper_bound() argument
125 return (atomic_load_64(&as->as_upper_bound)); in aggsum_upper_bound()
129 aggsum_value(aggsum_t *as) in aggsum_value() argument
134 mutex_enter(&as->as_lock); in aggsum_value()
135 lb = as->as_lower_bound; in aggsum_value()
136 ub = as->as_upper_bound; in aggsum_value()
138 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
139 ASSERT0(as->as_buckets[i].asc_delta); in aggsum_value()
140 ASSERT0(as->as_buckets[i].asc_borrowed); in aggsum_value()
142 mutex_exit(&as->as_lock); in aggsum_value()
145 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
146 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_value()
147 if (asb->asc_borrowed == 0) in aggsum_value()
149 mutex_enter(&asb->asc_lock); in aggsum_value()
150 lb += asb->asc_delta + asb->asc_borrowed; in aggsum_value()
151 ub += asb->asc_delta - asb->asc_borrowed; in aggsum_value()
152 asb->asc_delta = 0; in aggsum_value()
153 asb->asc_borrowed = 0; in aggsum_value()
154 mutex_exit(&asb->asc_lock); in aggsum_value()
157 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_value()
158 atomic_store_64(&as->as_upper_bound, lb); in aggsum_value()
159 mutex_exit(&as->as_lock); in aggsum_value()
165 aggsum_add(aggsum_t *as, int64_t delta) in aggsum_add() argument
170 asb = &as->as_buckets[(CPU_SEQID_UNSTABLE >> as->as_bucketshift) % in aggsum_add()
171 as->as_numbuckets]; in aggsum_add()
174 mutex_enter(&asb->asc_lock); in aggsum_add()
175 if (asb->asc_delta + delta <= (int64_t)asb->asc_borrowed && in aggsum_add()
176 asb->asc_delta + delta >= -(int64_t)asb->asc_borrowed) { in aggsum_add()
177 asb->asc_delta += delta; in aggsum_add()
178 mutex_exit(&asb->asc_lock); in aggsum_add()
181 mutex_exit(&asb->asc_lock); in aggsum_add()
185 * considering what is requested now and what we borrowed before. in aggsum_add()
187 borrow = (delta < 0 ? -delta : delta); in aggsum_add()
188 borrow <<= aggsum_borrow_shift + as->as_bucketshift; in aggsum_add()
189 mutex_enter(&as->as_lock); in aggsum_add()
190 if (borrow >= asb->asc_borrowed) in aggsum_add()
191 borrow -= asb->asc_borrowed; in aggsum_add()
193 borrow = (borrow - (int64_t)asb->asc_borrowed) / 4; in aggsum_add()
194 mutex_enter(&asb->asc_lock); in aggsum_add()
195 delta += asb->asc_delta; in aggsum_add()
196 asb->asc_delta = 0; in aggsum_add()
197 asb->asc_borrowed += borrow; in aggsum_add()
198 mutex_exit(&asb->asc_lock); in aggsum_add()
199 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, in aggsum_add()
200 as->as_lower_bound + delta - borrow); in aggsum_add()
201 atomic_store_64(&as->as_upper_bound, in aggsum_add()
202 as->as_upper_bound + delta + borrow); in aggsum_add()
203 mutex_exit(&as->as_lock); in aggsum_add()
207 * Compare the aggsum value to target efficiently. Returns -1 if the value
208 * represented by the aggsum is less than target, 1 if it's greater, and 0 if
212 aggsum_compare(aggsum_t *as, uint64_t target) in aggsum_compare() argument
218 if (atomic_load_64(&as->as_upper_bound) < target) in aggsum_compare()
219 return (-1); in aggsum_compare()
220 lb = atomic_load_64((volatile uint64_t *)&as->as_lower_bound); in aggsum_compare()
223 mutex_enter(&as->as_lock); in aggsum_compare()
224 lb = as->as_lower_bound; in aggsum_compare()
225 ub = as->as_upper_bound; in aggsum_compare()
226 for (i = 0; i < as->as_numbuckets; i++) { in aggsum_compare()
227 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_compare()
228 if (asb->asc_borrowed == 0) in aggsum_compare()
230 mutex_enter(&asb->asc_lock); in aggsum_compare()
231 lb += asb->asc_delta + asb->asc_borrowed; in aggsum_compare()
232 ub += asb->asc_delta - asb->asc_borrowed; in aggsum_compare()
233 asb->asc_delta = 0; in aggsum_compare()
234 asb->asc_borrowed = 0; in aggsum_compare()
235 mutex_exit(&asb->asc_lock); in aggsum_compare()
239 if (i >= as->as_numbuckets) in aggsum_compare()
241 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_compare()
242 atomic_store_64(&as->as_upper_bound, ub); in aggsum_compare()
243 mutex_exit(&as->as_lock); in aggsum_compare()
244 return (ub < target ? -1 : (uint64_t)lb > target ? 1 : 0); in aggsum_compare()