xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/rtree.c (revision f8cf1a9151c7af1cb0bd8b09c13c66bca599c027)
1 #include "test/jemalloc_test.h"
2 
3 #include "jemalloc/internal/rtree.h"
4 
5 #define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
6 
7 /* Potentially too large to safely place on the stack. */
8 rtree_t test_rtree;
9 
10 TEST_BEGIN(test_rtree_read_empty) {
11 	tsdn_t *tsdn;
12 
13 	tsdn = tsdn_fetch();
14 
15 	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
16 	    /* metadata_use_hooks */ true);
17 	expect_ptr_not_null(base, "Unexpected base_new failure");
18 
19 	rtree_t *rtree = &test_rtree;
20 	rtree_ctx_t rtree_ctx;
21 	rtree_ctx_data_init(&rtree_ctx);
22 	expect_false(rtree_new(rtree, base, false),
23 	    "Unexpected rtree_new() failure");
24 	rtree_contents_t contents;
25 	expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE,
26 	    &contents), "rtree_read_independent() should fail on empty rtree.");
27 
28 	base_delete(tsdn, base);
29 }
30 TEST_END
31 
32 #undef NTHREADS
33 #undef NITERS
34 #undef SEED
35 
36 static edata_t *
37 alloc_edata(void) {
38 	void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT));
39 	assert_ptr_not_null(ret, "Unexpected mallocx() failure");
40 
41 	return ret;
42 }
43 
44 TEST_BEGIN(test_rtree_extrema) {
45 	edata_t *edata_a, *edata_b;
46 	edata_a = alloc_edata();
47 	edata_b = alloc_edata();
48 	edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
49 	    false, sz_size2index(SC_LARGE_MINCLASS), 0,
50 	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
51 	edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
52 	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
53 
54 	tsdn_t *tsdn = tsdn_fetch();
55 
56 	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
57 	    /* metadata_use_hooks */ true);
58 	expect_ptr_not_null(base, "Unexpected base_new failure");
59 
60 	rtree_t *rtree = &test_rtree;
61 	rtree_ctx_t rtree_ctx;
62 	rtree_ctx_data_init(&rtree_ctx);
63 	expect_false(rtree_new(rtree, base, false),
64 	    "Unexpected rtree_new() failure");
65 
66 	rtree_contents_t contents_a;
67 	contents_a.edata = edata_a;
68 	contents_a.metadata.szind = edata_szind_get(edata_a);
69 	contents_a.metadata.slab = edata_slab_get(edata_a);
70 	contents_a.metadata.is_head = edata_is_head_get(edata_a);
71 	contents_a.metadata.state = edata_state_get(edata_a);
72 	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
73 	    "Unexpected rtree_write() failure");
74 	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
75 	    "Unexpected rtree_write() failure");
76 	rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx,
77 	    PAGE);
78 	expect_true(contents_a.edata == read_contents_a.edata
79 	    && contents_a.metadata.szind == read_contents_a.metadata.szind
80 	    && contents_a.metadata.slab == read_contents_a.metadata.slab
81 	    && contents_a.metadata.is_head == read_contents_a.metadata.is_head
82 	    && contents_a.metadata.state == read_contents_a.metadata.state,
83 	    "rtree_read() should return previously set value");
84 
85 	rtree_contents_t contents_b;
86 	contents_b.edata = edata_b;
87 	contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b);
88 	contents_b.metadata.slab = edata_slab_get(edata_b);
89 	contents_b.metadata.is_head = edata_is_head_get(edata_b);
90 	contents_b.metadata.state = edata_state_get(edata_b);
91 	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
92 	    contents_b), "Unexpected rtree_write() failure");
93 	rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
94 	    ~((uintptr_t)0));
95 	assert_true(contents_b.edata == read_contents_b.edata
96 	    && contents_b.metadata.szind == read_contents_b.metadata.szind
97 	    && contents_b.metadata.slab == read_contents_b.metadata.slab
98 	    && contents_b.metadata.is_head == read_contents_b.metadata.is_head
99 	    && contents_b.metadata.state == read_contents_b.metadata.state,
100 	    "rtree_read() should return previously set value");
101 
102 	base_delete(tsdn, base);
103 }
104 TEST_END
105 
106 TEST_BEGIN(test_rtree_bits) {
107 	tsdn_t *tsdn = tsdn_fetch();
108 	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
109 	    /* metadata_use_hooks */ true);
110 	expect_ptr_not_null(base, "Unexpected base_new failure");
111 
112 	uintptr_t keys[] = {PAGE, PAGE + 1,
113 	    PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
114 	edata_t *edata_c = alloc_edata();
115 	edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
116 	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
117 
118 	rtree_t *rtree = &test_rtree;
119 	rtree_ctx_t rtree_ctx;
120 	rtree_ctx_data_init(&rtree_ctx);
121 	expect_false(rtree_new(rtree, base, false),
122 	    "Unexpected rtree_new() failure");
123 
124 	for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
125 		rtree_contents_t contents;
126 		contents.edata = edata_c;
127 		contents.metadata.szind = SC_NSIZES;
128 		contents.metadata.slab = false;
129 		contents.metadata.is_head = false;
130 		contents.metadata.state = extent_state_active;
131 
132 		expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
133 		    contents), "Unexpected rtree_write() failure");
134 		for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
135 			expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
136 			    keys[j]).edata, edata_c,
137 			    "rtree_edata_read() should return previously set "
138 			    "value and ignore insignificant key bits; i=%u, "
139 			    "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
140 			    j, keys[i], keys[j]);
141 		}
142 		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
143 		    (((uintptr_t)2) << LG_PAGE)).edata,
144 		    "Only leftmost rtree leaf should be set; i=%u", i);
145 		rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
146 	}
147 
148 	base_delete(tsdn, base);
149 }
150 TEST_END
151 
152 TEST_BEGIN(test_rtree_random) {
153 #define NSET 16
154 #define SEED 42
155 	sfmt_t *sfmt = init_gen_rand(SEED);
156 	tsdn_t *tsdn = tsdn_fetch();
157 
158 	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
159 	    /* metadata_use_hooks */ true);
160 	expect_ptr_not_null(base, "Unexpected base_new failure");
161 
162 	uintptr_t keys[NSET];
163 	rtree_t *rtree = &test_rtree;
164 	rtree_ctx_t rtree_ctx;
165 	rtree_ctx_data_init(&rtree_ctx);
166 
167 	edata_t *edata_d = alloc_edata();
168 	edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
169 	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
170 
171 	expect_false(rtree_new(rtree, base, false),
172 	    "Unexpected rtree_new() failure");
173 
174 	for (unsigned i = 0; i < NSET; i++) {
175 		keys[i] = (uintptr_t)gen_rand64(sfmt);
176 		rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
177 		    &rtree_ctx, keys[i], false, true);
178 		expect_ptr_not_null(elm,
179 		    "Unexpected rtree_leaf_elm_lookup() failure");
180 		rtree_contents_t contents;
181 		contents.edata = edata_d;
182 		contents.metadata.szind = SC_NSIZES;
183 		contents.metadata.slab = false;
184 		contents.metadata.is_head = false;
185 		contents.metadata.state = edata_state_get(edata_d);
186 		rtree_leaf_elm_write(tsdn, rtree, elm, contents);
187 		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
188 		    keys[i]).edata, edata_d,
189 		    "rtree_edata_read() should return previously set value");
190 	}
191 	for (unsigned i = 0; i < NSET; i++) {
192 		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
193 		    keys[i]).edata, edata_d,
194 		    "rtree_edata_read() should return previously set value, "
195 		    "i=%u", i);
196 	}
197 
198 	for (unsigned i = 0; i < NSET; i++) {
199 		rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
200 		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
201 		    keys[i]).edata,
202 		   "rtree_edata_read() should return previously set value");
203 	}
204 	for (unsigned i = 0; i < NSET; i++) {
205 		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
206 		    keys[i]).edata,
207 		    "rtree_edata_read() should return previously set value");
208 	}
209 
210 	base_delete(tsdn, base);
211 	fini_gen_rand(sfmt);
212 #undef NSET
213 #undef SEED
214 }
215 TEST_END
216 
217 static void
218 test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start,
219     uintptr_t end) {
220 	rtree_ctx_t rtree_ctx;
221 	rtree_ctx_data_init(&rtree_ctx);
222 
223 	edata_t *edata_e = alloc_edata();
224 	edata_init(edata_e, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
225 	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
226 	rtree_contents_t contents;
227 	contents.edata = edata_e;
228 	contents.metadata.szind = SC_NSIZES;
229 	contents.metadata.slab = false;
230 	contents.metadata.is_head = false;
231 	contents.metadata.state = extent_state_active;
232 
233 	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start,
234 	    contents), "Unexpected rtree_write() failure");
235 	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end,
236 	    contents), "Unexpected rtree_write() failure");
237 
238 	rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents);
239 	for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
240 		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
241 		    start + (i << LG_PAGE)).edata, edata_e,
242 		    "rtree_edata_read() should return previously set value");
243 	}
244 	rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end);
245 	rtree_leaf_elm_t *elm;
246 	for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
247 		elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx,
248 		    start + (i << LG_PAGE), false, false);
249 		expect_ptr_not_null(elm, "Should have been initialized.");
250 		expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm,
251 		    false).edata, "Should have been cleared.");
252 	}
253 }
254 
255 TEST_BEGIN(test_rtree_range) {
256 	tsdn_t *tsdn = tsdn_fetch();
257 	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
258 	    /* metadata_use_hooks */ true);
259 	expect_ptr_not_null(base, "Unexpected base_new failure");
260 
261 	rtree_t *rtree = &test_rtree;
262 	expect_false(rtree_new(rtree, base, false),
263 	    "Unexpected rtree_new() failure");
264 
265 	/* Not crossing rtree node boundary first. */
266 	uintptr_t start = ZU(1) << rtree_leaf_maskbits();
267 	uintptr_t end = start + (ZU(100) << LG_PAGE);
268 	test_rtree_range_write(tsdn, rtree, start, end);
269 
270 	/* Crossing rtree node boundary. */
271 	start = (ZU(1) << rtree_leaf_maskbits()) - (ZU(10) << LG_PAGE);
272 	end = start + (ZU(100) << LG_PAGE);
273 	assert_ptr_ne((void *)rtree_leafkey(start), (void *)rtree_leafkey(end),
274 	    "The range should span across two rtree nodes");
275 	test_rtree_range_write(tsdn, rtree, start, end);
276 
277 	base_delete(tsdn, base);
278 }
279 TEST_END
280 
281 int
282 main(void) {
283 	return test(
284 	    test_rtree_read_empty,
285 	    test_rtree_extrema,
286 	    test_rtree_bits,
287 	    test_rtree_random,
288 	    test_rtree_range);
289 }
290