xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/sec.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1*7bdf38e5Schristos #include "test/jemalloc_test.h"
2*7bdf38e5Schristos 
3*7bdf38e5Schristos #include "jemalloc/internal/sec.h"
4*7bdf38e5Schristos 
5*7bdf38e5Schristos typedef struct pai_test_allocator_s pai_test_allocator_t;
6*7bdf38e5Schristos struct pai_test_allocator_s {
7*7bdf38e5Schristos 	pai_t pai;
8*7bdf38e5Schristos 	bool alloc_fail;
9*7bdf38e5Schristos 	size_t alloc_count;
10*7bdf38e5Schristos 	size_t alloc_batch_count;
11*7bdf38e5Schristos 	size_t dalloc_count;
12*7bdf38e5Schristos 	size_t dalloc_batch_count;
13*7bdf38e5Schristos 	/*
14*7bdf38e5Schristos 	 * We use a simple bump allocator as the implementation.  This isn't
15*7bdf38e5Schristos 	 * *really* correct, since we may allow expansion into a subsequent
16*7bdf38e5Schristos 	 * allocation, but it's not like the SEC is really examining the
17*7bdf38e5Schristos 	 * pointers it gets back; this is mostly just helpful for debugging.
18*7bdf38e5Schristos 	 */
19*7bdf38e5Schristos 	uintptr_t next_ptr;
20*7bdf38e5Schristos 	size_t expand_count;
21*7bdf38e5Schristos 	bool expand_return_value;
22*7bdf38e5Schristos 	size_t shrink_count;
23*7bdf38e5Schristos 	bool shrink_return_value;
24*7bdf38e5Schristos };
25*7bdf38e5Schristos 
26*7bdf38e5Schristos static void
27*7bdf38e5Schristos test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
28*7bdf38e5Schristos     size_t max_bytes) {
29*7bdf38e5Schristos 	sec_opts_t opts;
30*7bdf38e5Schristos 	opts.nshards = 1;
31*7bdf38e5Schristos 	opts.max_alloc = max_alloc;
32*7bdf38e5Schristos 	opts.max_bytes = max_bytes;
33*7bdf38e5Schristos 	/*
34*7bdf38e5Schristos 	 * Just choose reasonable defaults for these; most tests don't care so
35*7bdf38e5Schristos 	 * long as they're something reasonable.
36*7bdf38e5Schristos 	 */
37*7bdf38e5Schristos 	opts.bytes_after_flush = max_bytes / 2;
38*7bdf38e5Schristos 	opts.batch_fill_extra = 4;
39*7bdf38e5Schristos 
40*7bdf38e5Schristos 	/*
41*7bdf38e5Schristos 	 * We end up leaking this base, but that's fine; this test is
42*7bdf38e5Schristos 	 * short-running, and SECs are arena-scoped in reality.
43*7bdf38e5Schristos 	 */
44*7bdf38e5Schristos 	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
45*7bdf38e5Schristos 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
46*7bdf38e5Schristos 
47*7bdf38e5Schristos 	bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
48*7bdf38e5Schristos 	assert_false(err, "Unexpected initialization failure");
49*7bdf38e5Schristos 	assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
50*7bdf38e5Schristos }
51*7bdf38e5Schristos 
52*7bdf38e5Schristos static inline edata_t *
53*7bdf38e5Schristos pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
54*7bdf38e5Schristos     size_t alignment, bool zero, bool guarded, bool frequent_reuse,
55*7bdf38e5Schristos     bool *deferred_work_generated) {
56*7bdf38e5Schristos 	assert(!guarded);
57*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
58*7bdf38e5Schristos 	if (ta->alloc_fail) {
59*7bdf38e5Schristos 		return NULL;
60*7bdf38e5Schristos 	}
61*7bdf38e5Schristos 	edata_t *edata = malloc(sizeof(edata_t));
62*7bdf38e5Schristos 	assert_ptr_not_null(edata, "");
63*7bdf38e5Schristos 	ta->next_ptr += alignment - 1;
64*7bdf38e5Schristos 	edata_init(edata, /* arena_ind */ 0,
65*7bdf38e5Schristos 	    (void *)(ta->next_ptr & ~(alignment - 1)), size,
66*7bdf38e5Schristos 	    /* slab */ false,
67*7bdf38e5Schristos 	    /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
68*7bdf38e5Schristos 	    /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
69*7bdf38e5Schristos 	ta->next_ptr += size;
70*7bdf38e5Schristos 	ta->alloc_count++;
71*7bdf38e5Schristos 	return edata;
72*7bdf38e5Schristos }
73*7bdf38e5Schristos 
74*7bdf38e5Schristos static inline size_t
75*7bdf38e5Schristos pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
76*7bdf38e5Schristos     size_t nallocs, edata_list_active_t *results,
77*7bdf38e5Schristos     bool *deferred_work_generated) {
78*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
79*7bdf38e5Schristos 	if (ta->alloc_fail) {
80*7bdf38e5Schristos 		return 0;
81*7bdf38e5Schristos 	}
82*7bdf38e5Schristos 	for (size_t i = 0; i < nallocs; i++) {
83*7bdf38e5Schristos 		edata_t *edata = malloc(sizeof(edata_t));
84*7bdf38e5Schristos 		assert_ptr_not_null(edata, "");
85*7bdf38e5Schristos 		edata_init(edata, /* arena_ind */ 0,
86*7bdf38e5Schristos 		    (void *)ta->next_ptr, size,
87*7bdf38e5Schristos 		    /* slab */ false, /* szind */ 0, /* sn */ 1,
88*7bdf38e5Schristos 		    extent_state_active, /* zero */ false, /* comitted */ true,
89*7bdf38e5Schristos 		    /* ranged */ false, EXTENT_NOT_HEAD);
90*7bdf38e5Schristos 		ta->next_ptr += size;
91*7bdf38e5Schristos 		ta->alloc_batch_count++;
92*7bdf38e5Schristos 		edata_list_active_append(results, edata);
93*7bdf38e5Schristos 	}
94*7bdf38e5Schristos 	return nallocs;
95*7bdf38e5Schristos }
96*7bdf38e5Schristos 
97*7bdf38e5Schristos static bool
98*7bdf38e5Schristos pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
99*7bdf38e5Schristos     size_t old_size, size_t new_size, bool zero,
100*7bdf38e5Schristos     bool *deferred_work_generated) {
101*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
102*7bdf38e5Schristos 	ta->expand_count++;
103*7bdf38e5Schristos 	return ta->expand_return_value;
104*7bdf38e5Schristos }
105*7bdf38e5Schristos 
106*7bdf38e5Schristos static bool
107*7bdf38e5Schristos pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
108*7bdf38e5Schristos     size_t old_size, size_t new_size, bool *deferred_work_generated) {
109*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
110*7bdf38e5Schristos 	ta->shrink_count++;
111*7bdf38e5Schristos 	return ta->shrink_return_value;
112*7bdf38e5Schristos }
113*7bdf38e5Schristos 
114*7bdf38e5Schristos static void
115*7bdf38e5Schristos pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
116*7bdf38e5Schristos     bool *deferred_work_generated) {
117*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
118*7bdf38e5Schristos 	ta->dalloc_count++;
119*7bdf38e5Schristos 	free(edata);
120*7bdf38e5Schristos }
121*7bdf38e5Schristos 
122*7bdf38e5Schristos static void
123*7bdf38e5Schristos pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
124*7bdf38e5Schristos     edata_list_active_t *list, bool *deferred_work_generated) {
125*7bdf38e5Schristos 	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
126*7bdf38e5Schristos 
127*7bdf38e5Schristos 	edata_t *edata;
128*7bdf38e5Schristos 	while ((edata = edata_list_active_first(list)) != NULL) {
129*7bdf38e5Schristos 		edata_list_active_remove(list, edata);
130*7bdf38e5Schristos 		ta->dalloc_batch_count++;
131*7bdf38e5Schristos 		free(edata);
132*7bdf38e5Schristos 	}
133*7bdf38e5Schristos }
134*7bdf38e5Schristos 
135*7bdf38e5Schristos static inline void
136*7bdf38e5Schristos pai_test_allocator_init(pai_test_allocator_t *ta) {
137*7bdf38e5Schristos 	ta->alloc_fail = false;
138*7bdf38e5Schristos 	ta->alloc_count = 0;
139*7bdf38e5Schristos 	ta->alloc_batch_count = 0;
140*7bdf38e5Schristos 	ta->dalloc_count = 0;
141*7bdf38e5Schristos 	ta->dalloc_batch_count = 0;
142*7bdf38e5Schristos 	/* Just don't start the edata at 0. */
143*7bdf38e5Schristos 	ta->next_ptr = 10 * PAGE;
144*7bdf38e5Schristos 	ta->expand_count = 0;
145*7bdf38e5Schristos 	ta->expand_return_value = false;
146*7bdf38e5Schristos 	ta->shrink_count = 0;
147*7bdf38e5Schristos 	ta->shrink_return_value = false;
148*7bdf38e5Schristos 	ta->pai.alloc = &pai_test_allocator_alloc;
149*7bdf38e5Schristos 	ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
150*7bdf38e5Schristos 	ta->pai.expand = &pai_test_allocator_expand;
151*7bdf38e5Schristos 	ta->pai.shrink = &pai_test_allocator_shrink;
152*7bdf38e5Schristos 	ta->pai.dalloc = &pai_test_allocator_dalloc;
153*7bdf38e5Schristos 	ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
154*7bdf38e5Schristos }
155*7bdf38e5Schristos 
156*7bdf38e5Schristos TEST_BEGIN(test_reuse) {
157*7bdf38e5Schristos 	pai_test_allocator_t ta;
158*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
159*7bdf38e5Schristos 	sec_t sec;
160*7bdf38e5Schristos 	/*
161*7bdf38e5Schristos 	 * We can't use the "real" tsd, since we malloc within the test
162*7bdf38e5Schristos 	 * allocator hooks; we'd get lock inversion crashes.  Eventually, we
163*7bdf38e5Schristos 	 * should have a way to mock tsds, but for now just don't do any
164*7bdf38e5Schristos 	 * lock-order checking.
165*7bdf38e5Schristos 	 */
166*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
167*7bdf38e5Schristos 	/*
168*7bdf38e5Schristos 	 * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
169*7bdf38e5Schristos 	 * able to get to 33 pages in the cache before triggering a flush.  We
170*7bdf38e5Schristos 	 * set the flush liimt to twice this amount, to avoid accidentally
171*7bdf38e5Schristos 	 * triggering a flush caused by the batch-allocation down the cache fill
172*7bdf38e5Schristos 	 * pathway disrupting ordering.
173*7bdf38e5Schristos 	 */
174*7bdf38e5Schristos 	enum { NALLOCS = 11 };
175*7bdf38e5Schristos 	edata_t *one_page[NALLOCS];
176*7bdf38e5Schristos 	edata_t *two_page[NALLOCS];
177*7bdf38e5Schristos 	bool deferred_work_generated = false;
178*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
179*7bdf38e5Schristos 	    /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
180*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS; i++) {
181*7bdf38e5Schristos 		one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
182*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
183*7bdf38e5Schristos 		    false, &deferred_work_generated);
184*7bdf38e5Schristos 		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
185*7bdf38e5Schristos 		two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
186*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
187*7bdf38e5Schristos 		    false, &deferred_work_generated);
188*7bdf38e5Schristos 		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
189*7bdf38e5Schristos 	}
190*7bdf38e5Schristos 	expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
191*7bdf38e5Schristos 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
192*7bdf38e5Schristos 	expect_zu_le(2 * NALLOCS, max_allocs,
193*7bdf38e5Schristos 	    "Incorrect number of allocations");
194*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
195*7bdf38e5Schristos 	    "Incorrect number of allocations");
196*7bdf38e5Schristos 	/*
197*7bdf38e5Schristos 	 * Free in a different order than we allocated, to make sure free-list
198*7bdf38e5Schristos 	 * separation works correctly.
199*7bdf38e5Schristos 	 */
200*7bdf38e5Schristos 	for (int i = NALLOCS - 1; i >= 0; i--) {
201*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, one_page[i],
202*7bdf38e5Schristos 		    &deferred_work_generated);
203*7bdf38e5Schristos 	}
204*7bdf38e5Schristos 	for (int i = NALLOCS - 1; i >= 0; i--) {
205*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, two_page[i],
206*7bdf38e5Schristos 		    &deferred_work_generated);
207*7bdf38e5Schristos 	}
208*7bdf38e5Schristos 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
209*7bdf38e5Schristos 	    "Incorrect number of allocations");
210*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
211*7bdf38e5Schristos 	    "Incorrect number of allocations");
212*7bdf38e5Schristos 	/*
213*7bdf38e5Schristos 	 * Check that the n'th most recent deallocated extent is returned for
214*7bdf38e5Schristos 	 * the n'th alloc request of a given size.
215*7bdf38e5Schristos 	 */
216*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS; i++) {
217*7bdf38e5Schristos 		edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
218*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
219*7bdf38e5Schristos 		    false, &deferred_work_generated);
220*7bdf38e5Schristos 		edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
221*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
222*7bdf38e5Schristos 		    false, &deferred_work_generated);
223*7bdf38e5Schristos 		expect_ptr_eq(one_page[i], alloc1,
224*7bdf38e5Schristos 		    "Got unexpected allocation");
225*7bdf38e5Schristos 		expect_ptr_eq(two_page[i], alloc2,
226*7bdf38e5Schristos 		    "Got unexpected allocation");
227*7bdf38e5Schristos 	}
228*7bdf38e5Schristos 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
229*7bdf38e5Schristos 	    "Incorrect number of allocations");
230*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
231*7bdf38e5Schristos 	    "Incorrect number of allocations");
232*7bdf38e5Schristos }
233*7bdf38e5Schristos TEST_END
234*7bdf38e5Schristos 
235*7bdf38e5Schristos 
236*7bdf38e5Schristos TEST_BEGIN(test_auto_flush) {
237*7bdf38e5Schristos 	pai_test_allocator_t ta;
238*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
239*7bdf38e5Schristos 	sec_t sec;
240*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
241*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
242*7bdf38e5Schristos 	/*
243*7bdf38e5Schristos 	 * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
244*7bdf38e5Schristos 	 * able to get to 30 pages in the cache before triggering a flush.  The
245*7bdf38e5Schristos 	 * choice of NALLOCS here is chosen to match the batch allocation
246*7bdf38e5Schristos 	 * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
247*7bdf38e5Schristos 	 * empty, even in the presence of batch allocation on fill).
248*7bdf38e5Schristos 	 * Eventually, once our allocation batching strategies become smarter,
249*7bdf38e5Schristos 	 * this should change.
250*7bdf38e5Schristos 	 */
251*7bdf38e5Schristos 	enum { NALLOCS = 10 };
252*7bdf38e5Schristos 	edata_t *extra_alloc;
253*7bdf38e5Schristos 	edata_t *allocs[NALLOCS];
254*7bdf38e5Schristos 	bool deferred_work_generated = false;
255*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
256*7bdf38e5Schristos 	    /* max_bytes */ NALLOCS * PAGE);
257*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS; i++) {
258*7bdf38e5Schristos 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
259*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
260*7bdf38e5Schristos 		    false, &deferred_work_generated);
261*7bdf38e5Schristos 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
262*7bdf38e5Schristos 	}
263*7bdf38e5Schristos 	extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
264*7bdf38e5Schristos 	    /* guarded */ false, /* frequent_reuse */ false,
265*7bdf38e5Schristos 	    &deferred_work_generated);
266*7bdf38e5Schristos 	expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
267*7bdf38e5Schristos 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
268*7bdf38e5Schristos 	expect_zu_le(NALLOCS + 1, max_allocs,
269*7bdf38e5Schristos 	    "Incorrect number of allocations");
270*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
271*7bdf38e5Schristos 	    "Incorrect number of allocations");
272*7bdf38e5Schristos 	/* Free until the SEC is full, but should not have flushed yet. */
273*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS; i++) {
274*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
275*7bdf38e5Schristos 	}
276*7bdf38e5Schristos 	expect_zu_le(NALLOCS + 1, max_allocs,
277*7bdf38e5Schristos 	    "Incorrect number of allocations");
278*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
279*7bdf38e5Schristos 	    "Incorrect number of allocations");
280*7bdf38e5Schristos 	/*
281*7bdf38e5Schristos 	 * Free the extra allocation; this should trigger a flush.  The internal
282*7bdf38e5Schristos 	 * flushing logic is allowed to get complicated; for now, we rely on our
283*7bdf38e5Schristos 	 * whitebox knowledge of the fact that the SEC flushes bins in their
284*7bdf38e5Schristos 	 * entirety when it decides to do so, and it has only one bin active
285*7bdf38e5Schristos 	 * right now.
286*7bdf38e5Schristos 	 */
287*7bdf38e5Schristos 	pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
288*7bdf38e5Schristos 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
289*7bdf38e5Schristos 	    "Incorrect number of allocations");
290*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
291*7bdf38e5Schristos 	    "Incorrect number of (non-batch) deallocations");
292*7bdf38e5Schristos 	expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
293*7bdf38e5Schristos 	    "Incorrect number of batch deallocations");
294*7bdf38e5Schristos }
295*7bdf38e5Schristos TEST_END
296*7bdf38e5Schristos 
297*7bdf38e5Schristos /*
298*7bdf38e5Schristos  * A disable and a flush are *almost* equivalent; the only difference is what
299*7bdf38e5Schristos  * happens afterwards; disabling disallows all future caching as well.
300*7bdf38e5Schristos  */
301*7bdf38e5Schristos static void
302*7bdf38e5Schristos do_disable_flush_test(bool is_disable) {
303*7bdf38e5Schristos 	pai_test_allocator_t ta;
304*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
305*7bdf38e5Schristos 	sec_t sec;
306*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
307*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
308*7bdf38e5Schristos 
309*7bdf38e5Schristos 	enum { NALLOCS = 11 };
310*7bdf38e5Schristos 	edata_t *allocs[NALLOCS];
311*7bdf38e5Schristos 	bool deferred_work_generated = false;
312*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
313*7bdf38e5Schristos 	    /* max_bytes */ NALLOCS * PAGE);
314*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS; i++) {
315*7bdf38e5Schristos 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
316*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
317*7bdf38e5Schristos 		    false, &deferred_work_generated);
318*7bdf38e5Schristos 		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
319*7bdf38e5Schristos 	}
320*7bdf38e5Schristos 	/* Free all but the last aloc. */
321*7bdf38e5Schristos 	for (int i = 0; i < NALLOCS - 1; i++) {
322*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
323*7bdf38e5Schristos 	}
324*7bdf38e5Schristos 	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
325*7bdf38e5Schristos 
326*7bdf38e5Schristos 	expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
327*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
328*7bdf38e5Schristos 	    "Incorrect number of allocations");
329*7bdf38e5Schristos 
330*7bdf38e5Schristos 	if (is_disable) {
331*7bdf38e5Schristos 		sec_disable(tsdn, &sec);
332*7bdf38e5Schristos 	} else {
333*7bdf38e5Schristos 		sec_flush(tsdn, &sec);
334*7bdf38e5Schristos 	}
335*7bdf38e5Schristos 
336*7bdf38e5Schristos 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
337*7bdf38e5Schristos 	    "Incorrect number of allocations");
338*7bdf38e5Schristos 	expect_zu_eq(0, ta.dalloc_count,
339*7bdf38e5Schristos 	    "Incorrect number of (non-batch) deallocations");
340*7bdf38e5Schristos 	expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
341*7bdf38e5Schristos 	    "Incorrect number of batch deallocations");
342*7bdf38e5Schristos 	size_t old_dalloc_batch_count = ta.dalloc_batch_count;
343*7bdf38e5Schristos 
344*7bdf38e5Schristos 	/*
345*7bdf38e5Schristos 	 * If we free into a disabled SEC, it should forward to the fallback.
346*7bdf38e5Schristos 	 * Otherwise, the SEC should accept the allocation.
347*7bdf38e5Schristos 	 */
348*7bdf38e5Schristos 	pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
349*7bdf38e5Schristos 	    &deferred_work_generated);
350*7bdf38e5Schristos 
351*7bdf38e5Schristos 	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
352*7bdf38e5Schristos 	    "Incorrect number of allocations");
353*7bdf38e5Schristos 	expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
354*7bdf38e5Schristos 	    "Incorrect number of (non-batch) deallocations");
355*7bdf38e5Schristos 	expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
356*7bdf38e5Schristos 	    "Incorrect number of batch deallocations");
357*7bdf38e5Schristos }
358*7bdf38e5Schristos 
359*7bdf38e5Schristos TEST_BEGIN(test_disable) {
360*7bdf38e5Schristos 	do_disable_flush_test(/* is_disable */ true);
361*7bdf38e5Schristos }
362*7bdf38e5Schristos TEST_END
363*7bdf38e5Schristos 
364*7bdf38e5Schristos TEST_BEGIN(test_flush) {
365*7bdf38e5Schristos 	do_disable_flush_test(/* is_disable */ false);
366*7bdf38e5Schristos }
367*7bdf38e5Schristos TEST_END
368*7bdf38e5Schristos 
369*7bdf38e5Schristos TEST_BEGIN(test_max_alloc_respected) {
370*7bdf38e5Schristos 	pai_test_allocator_t ta;
371*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
372*7bdf38e5Schristos 	sec_t sec;
373*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
374*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
375*7bdf38e5Schristos 
376*7bdf38e5Schristos 	size_t max_alloc = 2 * PAGE;
377*7bdf38e5Schristos 	size_t attempted_alloc = 3 * PAGE;
378*7bdf38e5Schristos 
379*7bdf38e5Schristos 	bool deferred_work_generated = false;
380*7bdf38e5Schristos 
381*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
382*7bdf38e5Schristos 	    /* max_bytes */ 1000 * PAGE);
383*7bdf38e5Schristos 
384*7bdf38e5Schristos 	for (size_t i = 0; i < 100; i++) {
385*7bdf38e5Schristos 		expect_zu_eq(i, ta.alloc_count,
386*7bdf38e5Schristos 		    "Incorrect number of allocations");
387*7bdf38e5Schristos 		expect_zu_eq(i, ta.dalloc_count,
388*7bdf38e5Schristos 		    "Incorrect number of deallocations");
389*7bdf38e5Schristos 		edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
390*7bdf38e5Schristos 		    PAGE, /* zero */ false, /* guarded */ false,
391*7bdf38e5Schristos 		    /* frequent_reuse */ false, &deferred_work_generated);
392*7bdf38e5Schristos 		expect_ptr_not_null(edata, "Unexpected alloc failure");
393*7bdf38e5Schristos 		expect_zu_eq(i + 1, ta.alloc_count,
394*7bdf38e5Schristos 		    "Incorrect number of allocations");
395*7bdf38e5Schristos 		expect_zu_eq(i, ta.dalloc_count,
396*7bdf38e5Schristos 		    "Incorrect number of deallocations");
397*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
398*7bdf38e5Schristos 	}
399*7bdf38e5Schristos }
400*7bdf38e5Schristos TEST_END
401*7bdf38e5Schristos 
402*7bdf38e5Schristos TEST_BEGIN(test_expand_shrink_delegate) {
403*7bdf38e5Schristos 	/*
404*7bdf38e5Schristos 	 * Expand and shrink shouldn't affect sec state; they should just
405*7bdf38e5Schristos 	 * delegate to the fallback PAI.
406*7bdf38e5Schristos 	 */
407*7bdf38e5Schristos 	pai_test_allocator_t ta;
408*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
409*7bdf38e5Schristos 	sec_t sec;
410*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
411*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
412*7bdf38e5Schristos 
413*7bdf38e5Schristos 	bool deferred_work_generated = false;
414*7bdf38e5Schristos 
415*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
416*7bdf38e5Schristos 	    /* max_bytes */ 1000 * PAGE);
417*7bdf38e5Schristos 	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
418*7bdf38e5Schristos 	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
419*7bdf38e5Schristos 	    &deferred_work_generated);
420*7bdf38e5Schristos 	expect_ptr_not_null(edata, "Unexpected alloc failure");
421*7bdf38e5Schristos 
422*7bdf38e5Schristos 	bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
423*7bdf38e5Schristos 	    /* zero */ false, &deferred_work_generated);
424*7bdf38e5Schristos 	expect_false(err, "Unexpected expand failure");
425*7bdf38e5Schristos 	expect_zu_eq(1, ta.expand_count, "");
426*7bdf38e5Schristos 	ta.expand_return_value = true;
427*7bdf38e5Schristos 	err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
428*7bdf38e5Schristos 	    /* zero */ false, &deferred_work_generated);
429*7bdf38e5Schristos 	expect_true(err, "Unexpected expand success");
430*7bdf38e5Schristos 	expect_zu_eq(2, ta.expand_count, "");
431*7bdf38e5Schristos 
432*7bdf38e5Schristos 	err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
433*7bdf38e5Schristos 	    &deferred_work_generated);
434*7bdf38e5Schristos 	expect_false(err, "Unexpected shrink failure");
435*7bdf38e5Schristos 	expect_zu_eq(1, ta.shrink_count, "");
436*7bdf38e5Schristos 	ta.shrink_return_value = true;
437*7bdf38e5Schristos 	err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
438*7bdf38e5Schristos 	    &deferred_work_generated);
439*7bdf38e5Schristos 	expect_true(err, "Unexpected shrink success");
440*7bdf38e5Schristos 	expect_zu_eq(2, ta.shrink_count, "");
441*7bdf38e5Schristos }
442*7bdf38e5Schristos TEST_END
443*7bdf38e5Schristos 
444*7bdf38e5Schristos TEST_BEGIN(test_nshards_0) {
445*7bdf38e5Schristos 	pai_test_allocator_t ta;
446*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
447*7bdf38e5Schristos 	sec_t sec;
448*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
449*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
450*7bdf38e5Schristos 	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
451*7bdf38e5Schristos 	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
452*7bdf38e5Schristos 
453*7bdf38e5Schristos 	sec_opts_t opts = SEC_OPTS_DEFAULT;
454*7bdf38e5Schristos 	opts.nshards = 0;
455*7bdf38e5Schristos 	sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
456*7bdf38e5Schristos 
457*7bdf38e5Schristos 	bool deferred_work_generated = false;
458*7bdf38e5Schristos 	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
459*7bdf38e5Schristos 	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
460*7bdf38e5Schristos 	    &deferred_work_generated);
461*7bdf38e5Schristos 	pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
462*7bdf38e5Schristos 
463*7bdf38e5Schristos 	/* Both operations should have gone directly to the fallback. */
464*7bdf38e5Schristos 	expect_zu_eq(1, ta.alloc_count, "");
465*7bdf38e5Schristos 	expect_zu_eq(1, ta.dalloc_count, "");
466*7bdf38e5Schristos }
467*7bdf38e5Schristos TEST_END
468*7bdf38e5Schristos 
469*7bdf38e5Schristos static void
470*7bdf38e5Schristos expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
471*7bdf38e5Schristos 	sec_stats_t stats;
472*7bdf38e5Schristos 	/*
473*7bdf38e5Schristos 	 * Check that the stats merging accumulates rather than overwrites by
474*7bdf38e5Schristos 	 * putting some (made up) data there to begin with.
475*7bdf38e5Schristos 	 */
476*7bdf38e5Schristos 	stats.bytes = 123;
477*7bdf38e5Schristos 	sec_stats_merge(tsdn, sec, &stats);
478*7bdf38e5Schristos 	assert_zu_le(npages * PAGE + 123, stats.bytes, "");
479*7bdf38e5Schristos }
480*7bdf38e5Schristos 
481*7bdf38e5Schristos TEST_BEGIN(test_stats_simple) {
482*7bdf38e5Schristos 	pai_test_allocator_t ta;
483*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
484*7bdf38e5Schristos 	sec_t sec;
485*7bdf38e5Schristos 
486*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
487*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
488*7bdf38e5Schristos 
489*7bdf38e5Schristos 	enum {
490*7bdf38e5Schristos 		NITERS = 100,
491*7bdf38e5Schristos 		FLUSH_PAGES = 20,
492*7bdf38e5Schristos 	};
493*7bdf38e5Schristos 
494*7bdf38e5Schristos 	bool deferred_work_generated = false;
495*7bdf38e5Schristos 
496*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
497*7bdf38e5Schristos 	    /* max_bytes */ FLUSH_PAGES * PAGE);
498*7bdf38e5Schristos 
499*7bdf38e5Schristos 	edata_t *allocs[FLUSH_PAGES];
500*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
501*7bdf38e5Schristos 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
502*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
503*7bdf38e5Schristos 		    false, &deferred_work_generated);
504*7bdf38e5Schristos 		expect_stats_pages(tsdn, &sec, 0);
505*7bdf38e5Schristos 	}
506*7bdf38e5Schristos 
507*7bdf38e5Schristos 	/* Increase and decrease, without flushing. */
508*7bdf38e5Schristos 	for (size_t i = 0; i < NITERS; i++) {
509*7bdf38e5Schristos 		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
510*7bdf38e5Schristos 			pai_dalloc(tsdn, &sec.pai, allocs[j],
511*7bdf38e5Schristos 			    &deferred_work_generated);
512*7bdf38e5Schristos 			expect_stats_pages(tsdn, &sec, j + 1);
513*7bdf38e5Schristos 		}
514*7bdf38e5Schristos 		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
515*7bdf38e5Schristos 			allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
516*7bdf38e5Schristos 			    /* zero */ false, /* guarded */ false,
517*7bdf38e5Schristos 			    /* frequent_reuse */ false,
518*7bdf38e5Schristos 			    &deferred_work_generated);
519*7bdf38e5Schristos 			expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
520*7bdf38e5Schristos 		}
521*7bdf38e5Schristos 	}
522*7bdf38e5Schristos }
523*7bdf38e5Schristos TEST_END
524*7bdf38e5Schristos 
525*7bdf38e5Schristos TEST_BEGIN(test_stats_auto_flush) {
526*7bdf38e5Schristos 	pai_test_allocator_t ta;
527*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
528*7bdf38e5Schristos 	sec_t sec;
529*7bdf38e5Schristos 
530*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
531*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
532*7bdf38e5Schristos 
533*7bdf38e5Schristos 	enum {
534*7bdf38e5Schristos 		FLUSH_PAGES = 10,
535*7bdf38e5Schristos 	};
536*7bdf38e5Schristos 
537*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
538*7bdf38e5Schristos 	    /* max_bytes */ FLUSH_PAGES * PAGE);
539*7bdf38e5Schristos 
540*7bdf38e5Schristos 	edata_t *extra_alloc0;
541*7bdf38e5Schristos 	edata_t *extra_alloc1;
542*7bdf38e5Schristos 	edata_t *allocs[2 * FLUSH_PAGES];
543*7bdf38e5Schristos 
544*7bdf38e5Schristos 	bool deferred_work_generated = false;
545*7bdf38e5Schristos 
546*7bdf38e5Schristos 	extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
547*7bdf38e5Schristos 	    /* guarded */ false, /* frequent_reuse */ false,
548*7bdf38e5Schristos 	    &deferred_work_generated);
549*7bdf38e5Schristos 	extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
550*7bdf38e5Schristos 	    /* guarded */ false, /* frequent_reuse */ false,
551*7bdf38e5Schristos 	    &deferred_work_generated);
552*7bdf38e5Schristos 
553*7bdf38e5Schristos 	for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
554*7bdf38e5Schristos 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
555*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
556*7bdf38e5Schristos 		    false, &deferred_work_generated);
557*7bdf38e5Schristos 	}
558*7bdf38e5Schristos 
559*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
560*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
561*7bdf38e5Schristos 	}
562*7bdf38e5Schristos 	pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
563*7bdf38e5Schristos 
564*7bdf38e5Schristos 	/* Flush the remaining pages; stats should still work. */
565*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
566*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
567*7bdf38e5Schristos 		    &deferred_work_generated);
568*7bdf38e5Schristos 	}
569*7bdf38e5Schristos 
570*7bdf38e5Schristos 	pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
571*7bdf38e5Schristos 
572*7bdf38e5Schristos 	expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
573*7bdf38e5Schristos 	    - ta.dalloc_count - ta.dalloc_batch_count);
574*7bdf38e5Schristos }
575*7bdf38e5Schristos TEST_END
576*7bdf38e5Schristos 
577*7bdf38e5Schristos TEST_BEGIN(test_stats_manual_flush) {
578*7bdf38e5Schristos 	pai_test_allocator_t ta;
579*7bdf38e5Schristos 	pai_test_allocator_init(&ta);
580*7bdf38e5Schristos 	sec_t sec;
581*7bdf38e5Schristos 
582*7bdf38e5Schristos 	/* See the note above -- we can't use the real tsd. */
583*7bdf38e5Schristos 	tsdn_t *tsdn = TSDN_NULL;
584*7bdf38e5Schristos 
585*7bdf38e5Schristos 	enum {
586*7bdf38e5Schristos 		FLUSH_PAGES = 10,
587*7bdf38e5Schristos 	};
588*7bdf38e5Schristos 
589*7bdf38e5Schristos 	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
590*7bdf38e5Schristos 	    /* max_bytes */ FLUSH_PAGES * PAGE);
591*7bdf38e5Schristos 
592*7bdf38e5Schristos 	bool deferred_work_generated = false;
593*7bdf38e5Schristos 	edata_t *allocs[FLUSH_PAGES];
594*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES; i++) {
595*7bdf38e5Schristos 		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
596*7bdf38e5Schristos 		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
597*7bdf38e5Schristos 		    false, &deferred_work_generated);
598*7bdf38e5Schristos 		expect_stats_pages(tsdn, &sec, 0);
599*7bdf38e5Schristos 	}
600*7bdf38e5Schristos 
601*7bdf38e5Schristos 	/* Dalloc the first half of the allocations. */
602*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
603*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
604*7bdf38e5Schristos 		expect_stats_pages(tsdn, &sec, i + 1);
605*7bdf38e5Schristos 	}
606*7bdf38e5Schristos 
607*7bdf38e5Schristos 	sec_flush(tsdn, &sec);
608*7bdf38e5Schristos 	expect_stats_pages(tsdn, &sec, 0);
609*7bdf38e5Schristos 
610*7bdf38e5Schristos 	/* Flush the remaining pages. */
611*7bdf38e5Schristos 	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
612*7bdf38e5Schristos 		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
613*7bdf38e5Schristos 		    &deferred_work_generated);
614*7bdf38e5Schristos 		expect_stats_pages(tsdn, &sec, i + 1);
615*7bdf38e5Schristos 	}
616*7bdf38e5Schristos 	sec_disable(tsdn, &sec);
617*7bdf38e5Schristos 	expect_stats_pages(tsdn, &sec, 0);
618*7bdf38e5Schristos }
619*7bdf38e5Schristos TEST_END
620*7bdf38e5Schristos 
621*7bdf38e5Schristos int
622*7bdf38e5Schristos main(void) {
623*7bdf38e5Schristos 	return test(
624*7bdf38e5Schristos 	    test_reuse,
625*7bdf38e5Schristos 	    test_auto_flush,
626*7bdf38e5Schristos 	    test_disable,
627*7bdf38e5Schristos 	    test_flush,
628*7bdf38e5Schristos 	    test_max_alloc_respected,
629*7bdf38e5Schristos 	    test_expand_shrink_delegate,
630*7bdf38e5Schristos 	    test_nshards_0,
631*7bdf38e5Schristos 	    test_stats_simple,
632*7bdf38e5Schristos 	    test_stats_auto_flush,
633*7bdf38e5Schristos 	    test_stats_manual_flush);
634*7bdf38e5Schristos }
635