xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/san.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1*7bdf38e5Schristos #include "test/jemalloc_test.h"
2*7bdf38e5Schristos #include "test/arena_util.h"
3*7bdf38e5Schristos #include "test/san.h"
4*7bdf38e5Schristos 
5*7bdf38e5Schristos #include "jemalloc/internal/san.h"
6*7bdf38e5Schristos 
7*7bdf38e5Schristos static void
8*7bdf38e5Schristos verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
9*7bdf38e5Schristos 	expect_true(extent_is_guarded(tsdn, ptr),
10*7bdf38e5Schristos 	    "All extents should be guarded.");
11*7bdf38e5Schristos }
12*7bdf38e5Schristos 
13*7bdf38e5Schristos #define MAX_SMALL_ALLOCATIONS 4096
14*7bdf38e5Schristos void *small_alloc[MAX_SMALL_ALLOCATIONS];
15*7bdf38e5Schristos 
16*7bdf38e5Schristos /*
17*7bdf38e5Schristos  * This test allocates page sized slabs and checks that every two slabs have
18*7bdf38e5Schristos  * at least one page in between them. That page is supposed to be the guard
19*7bdf38e5Schristos  * page.
20*7bdf38e5Schristos  */
21*7bdf38e5Schristos TEST_BEGIN(test_guarded_small) {
22*7bdf38e5Schristos 	test_skip_if(opt_prof);
23*7bdf38e5Schristos 
24*7bdf38e5Schristos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
25*7bdf38e5Schristos 	unsigned npages = 16, pages_found = 0, ends_found = 0;
26*7bdf38e5Schristos 	VARIABLE_ARRAY(uintptr_t, pages, npages);
27*7bdf38e5Schristos 
28*7bdf38e5Schristos 	/* Allocate to get sanitized pointers. */
29*7bdf38e5Schristos 	size_t slab_sz = PAGE;
30*7bdf38e5Schristos 	size_t sz = slab_sz / 8;
31*7bdf38e5Schristos 	unsigned n_alloc = 0;
32*7bdf38e5Schristos 	while (n_alloc < MAX_SMALL_ALLOCATIONS) {
33*7bdf38e5Schristos 		void *ptr = malloc(sz);
34*7bdf38e5Schristos 		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
35*7bdf38e5Schristos 		small_alloc[n_alloc] = ptr;
36*7bdf38e5Schristos 		verify_extent_guarded(tsdn, ptr);
37*7bdf38e5Schristos 		if ((uintptr_t)ptr % PAGE == 0) {
38*7bdf38e5Schristos 			assert_u_lt(pages_found, npages,
39*7bdf38e5Schristos 			    "Unexpectedly large number of page aligned allocs");
40*7bdf38e5Schristos 			pages[pages_found++] = (uintptr_t)ptr;
41*7bdf38e5Schristos 		}
42*7bdf38e5Schristos 		if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
43*7bdf38e5Schristos 			ends_found++;
44*7bdf38e5Schristos 		}
45*7bdf38e5Schristos 		n_alloc++;
46*7bdf38e5Schristos 		if (pages_found == npages && ends_found == npages) {
47*7bdf38e5Schristos 			break;
48*7bdf38e5Schristos 		}
49*7bdf38e5Schristos 	}
50*7bdf38e5Schristos 	/* Should found the ptrs being checked for overflow and underflow. */
51*7bdf38e5Schristos 	expect_u_eq(pages_found, npages, "Could not found the expected pages.");
52*7bdf38e5Schristos 	expect_u_eq(ends_found, npages, "Could not found the expected pages.");
53*7bdf38e5Schristos 
54*7bdf38e5Schristos 	/* Verify the pages are not continuous, i.e. separated by guards. */
55*7bdf38e5Schristos 	for (unsigned i = 0; i < npages - 1; i++) {
56*7bdf38e5Schristos 		for (unsigned j = i + 1; j < npages; j++) {
57*7bdf38e5Schristos 			uintptr_t ptr_diff = pages[i] > pages[j] ?
58*7bdf38e5Schristos 			    pages[i] - pages[j] : pages[j] - pages[i];
59*7bdf38e5Schristos 			expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
60*7bdf38e5Schristos 			    "There should be at least one pages between "
61*7bdf38e5Schristos 			    "guarded slabs");
62*7bdf38e5Schristos 		}
63*7bdf38e5Schristos 	}
64*7bdf38e5Schristos 
65*7bdf38e5Schristos 	for (unsigned i = 0; i < n_alloc + 1; i++) {
66*7bdf38e5Schristos 		free(small_alloc[i]);
67*7bdf38e5Schristos 	}
68*7bdf38e5Schristos }
69*7bdf38e5Schristos TEST_END
70*7bdf38e5Schristos 
71*7bdf38e5Schristos TEST_BEGIN(test_guarded_large) {
72*7bdf38e5Schristos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
73*7bdf38e5Schristos 	unsigned nlarge = 32;
74*7bdf38e5Schristos 	VARIABLE_ARRAY(uintptr_t, large, nlarge);
75*7bdf38e5Schristos 
76*7bdf38e5Schristos 	/* Allocate to get sanitized pointers. */
77*7bdf38e5Schristos 	size_t large_sz = SC_LARGE_MINCLASS;
78*7bdf38e5Schristos 	for (unsigned i = 0; i < nlarge; i++) {
79*7bdf38e5Schristos 		void *ptr = malloc(large_sz);
80*7bdf38e5Schristos 		verify_extent_guarded(tsdn, ptr);
81*7bdf38e5Schristos 		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
82*7bdf38e5Schristos 		large[i] = (uintptr_t)ptr;
83*7bdf38e5Schristos 	}
84*7bdf38e5Schristos 
85*7bdf38e5Schristos 	/* Verify the pages are not continuous, i.e. separated by guards. */
86*7bdf38e5Schristos 	for (unsigned i = 0; i < nlarge; i++) {
87*7bdf38e5Schristos 		for (unsigned j = i + 1; j < nlarge; j++) {
88*7bdf38e5Schristos 			uintptr_t ptr_diff = large[i] > large[j] ?
89*7bdf38e5Schristos 			    large[i] - large[j] : large[j] - large[i];
90*7bdf38e5Schristos 			expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
91*7bdf38e5Schristos 			    "There should be at least two pages between "
92*7bdf38e5Schristos 			    " guarded large allocations");
93*7bdf38e5Schristos 		}
94*7bdf38e5Schristos 	}
95*7bdf38e5Schristos 
96*7bdf38e5Schristos 	for (unsigned i = 0; i < nlarge; i++) {
97*7bdf38e5Schristos 		free((void *)large[i]);
98*7bdf38e5Schristos 	}
99*7bdf38e5Schristos }
100*7bdf38e5Schristos TEST_END
101*7bdf38e5Schristos 
102*7bdf38e5Schristos static void
103*7bdf38e5Schristos verify_pdirty(unsigned arena_ind, uint64_t expected) {
104*7bdf38e5Schristos 	uint64_t pdirty = get_arena_pdirty(arena_ind);
105*7bdf38e5Schristos 	expect_u64_eq(pdirty, expected / PAGE,
106*7bdf38e5Schristos 	    "Unexpected dirty page amount.");
107*7bdf38e5Schristos }
108*7bdf38e5Schristos 
109*7bdf38e5Schristos static void
110*7bdf38e5Schristos verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
111*7bdf38e5Schristos 	uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
112*7bdf38e5Schristos 	expect_u64_eq(pmuzzy, expected / PAGE,
113*7bdf38e5Schristos 	    "Unexpected muzzy page amount.");
114*7bdf38e5Schristos }
115*7bdf38e5Schristos 
116*7bdf38e5Schristos TEST_BEGIN(test_guarded_decay) {
117*7bdf38e5Schristos 	unsigned arena_ind = do_arena_create(-1, -1);
118*7bdf38e5Schristos 	do_decay(arena_ind);
119*7bdf38e5Schristos 	do_purge(arena_ind);
120*7bdf38e5Schristos 
121*7bdf38e5Schristos 	verify_pdirty(arena_ind, 0);
122*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
123*7bdf38e5Schristos 
124*7bdf38e5Schristos 	/* Verify that guarded extents as dirty. */
125*7bdf38e5Schristos 	size_t sz1 = PAGE, sz2 = PAGE * 2;
126*7bdf38e5Schristos 	/* W/o maps_coalesce, guarded extents are unguarded eagerly. */
127*7bdf38e5Schristos 	size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
128*7bdf38e5Schristos 	generate_dirty(arena_ind, sz1);
129*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz1 + add_guard_size);
130*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
131*7bdf38e5Schristos 
132*7bdf38e5Schristos 	/* Should reuse the first extent. */
133*7bdf38e5Schristos 	generate_dirty(arena_ind, sz1);
134*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz1 + add_guard_size);
135*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
136*7bdf38e5Schristos 
137*7bdf38e5Schristos 	/* Should not reuse; expect new dirty pages. */
138*7bdf38e5Schristos 	generate_dirty(arena_ind, sz2);
139*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
140*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
141*7bdf38e5Schristos 
142*7bdf38e5Schristos 	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
143*7bdf38e5Schristos 	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
144*7bdf38e5Schristos 
145*7bdf38e5Schristos 	/* Should reuse dirty extents for the two mallocx. */
146*7bdf38e5Schristos 	void *p1 = do_mallocx(sz1, flags);
147*7bdf38e5Schristos 	verify_extent_guarded(tsdn, p1);
148*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz2 + add_guard_size);
149*7bdf38e5Schristos 
150*7bdf38e5Schristos 	void *p2 = do_mallocx(sz2, flags);
151*7bdf38e5Schristos 	verify_extent_guarded(tsdn, p2);
152*7bdf38e5Schristos 	verify_pdirty(arena_ind, 0);
153*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
154*7bdf38e5Schristos 
155*7bdf38e5Schristos 	dallocx(p1, flags);
156*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz1 + add_guard_size);
157*7bdf38e5Schristos 	dallocx(p2, flags);
158*7bdf38e5Schristos 	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
159*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
160*7bdf38e5Schristos 
161*7bdf38e5Schristos 	do_purge(arena_ind);
162*7bdf38e5Schristos 	verify_pdirty(arena_ind, 0);
163*7bdf38e5Schristos 	verify_pmuzzy(arena_ind, 0);
164*7bdf38e5Schristos 
165*7bdf38e5Schristos 	if (config_stats) {
166*7bdf38e5Schristos 		expect_u64_eq(get_arena_npurge(arena_ind), 1,
167*7bdf38e5Schristos 		    "Expected purging to occur");
168*7bdf38e5Schristos 		expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
169*7bdf38e5Schristos 		    "Expected purging to occur");
170*7bdf38e5Schristos 		expect_u64_eq(get_arena_dirty_purged(arena_ind),
171*7bdf38e5Schristos 		    (sz1 + sz2 + 2 * add_guard_size) / PAGE,
172*7bdf38e5Schristos 		    "Expected purging to occur");
173*7bdf38e5Schristos 		expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
174*7bdf38e5Schristos 		    "Expected purging to occur");
175*7bdf38e5Schristos 	}
176*7bdf38e5Schristos 
177*7bdf38e5Schristos 	if (opt_retain) {
178*7bdf38e5Schristos 		/*
179*7bdf38e5Schristos 		 * With retain, guarded extents are not mergable and will be
180*7bdf38e5Schristos 		 * cached in ecache_retained.  They should be reused.
181*7bdf38e5Schristos 		 */
182*7bdf38e5Schristos 		void *new_p1 = do_mallocx(sz1, flags);
183*7bdf38e5Schristos 		verify_extent_guarded(tsdn, p1);
184*7bdf38e5Schristos 		expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
185*7bdf38e5Schristos 
186*7bdf38e5Schristos 		void *new_p2 = do_mallocx(sz2, flags);
187*7bdf38e5Schristos 		verify_extent_guarded(tsdn, p2);
188*7bdf38e5Schristos 		expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
189*7bdf38e5Schristos 
190*7bdf38e5Schristos 		dallocx(new_p1, flags);
191*7bdf38e5Schristos 		verify_pdirty(arena_ind, sz1 + add_guard_size);
192*7bdf38e5Schristos 		dallocx(new_p2, flags);
193*7bdf38e5Schristos 		verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
194*7bdf38e5Schristos 		verify_pmuzzy(arena_ind, 0);
195*7bdf38e5Schristos 	}
196*7bdf38e5Schristos 
197*7bdf38e5Schristos 	do_arena_destroy(arena_ind);
198*7bdf38e5Schristos }
199*7bdf38e5Schristos TEST_END
200*7bdf38e5Schristos 
201*7bdf38e5Schristos int
202*7bdf38e5Schristos main(void) {
203*7bdf38e5Schristos 	return test(
204*7bdf38e5Schristos 	    test_guarded_small,
205*7bdf38e5Schristos 	    test_guarded_large,
206*7bdf38e5Schristos 	    test_guarded_decay);
207*7bdf38e5Schristos }
208