xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/junk.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1a0698ed9Schristos #include "test/jemalloc_test.h"
2a0698ed9Schristos 
3*7bdf38e5Schristos #define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
4*7bdf38e5Schristos static size_t ptr_ind;
5*7bdf38e5Schristos static void *volatile ptrs[100];
6*7bdf38e5Schristos static void *last_junked_ptr;
7*7bdf38e5Schristos static size_t last_junked_usize;
8a0698ed9Schristos 
9a0698ed9Schristos static void
10*7bdf38e5Schristos reset() {
11*7bdf38e5Schristos 	ptr_ind = 0;
12*7bdf38e5Schristos 	last_junked_ptr = NULL;
13*7bdf38e5Schristos 	last_junked_usize = 0;
14a0698ed9Schristos }
15a0698ed9Schristos 
16a0698ed9Schristos static void
17*7bdf38e5Schristos test_junk(void *ptr, size_t usize) {
18*7bdf38e5Schristos 	last_junked_ptr = ptr;
19*7bdf38e5Schristos 	last_junked_usize = usize;
20a0698ed9Schristos }
21a0698ed9Schristos 
22a0698ed9Schristos static void
23*7bdf38e5Schristos do_allocs(size_t size, bool zero, size_t lg_align) {
24*7bdf38e5Schristos #define JUNK_ALLOC(...)							\
25*7bdf38e5Schristos 	do {								\
26*7bdf38e5Schristos 		assert(ptr_ind + 1 < arraylen(ptrs));			\
27*7bdf38e5Schristos 		void *ptr = __VA_ARGS__;				\
28*7bdf38e5Schristos 		assert_ptr_not_null(ptr, "");				\
29*7bdf38e5Schristos 		ptrs[ptr_ind++] = ptr;					\
30*7bdf38e5Schristos 		if (opt_junk_alloc && !zero) {				\
31*7bdf38e5Schristos 			expect_ptr_eq(ptr, last_junked_ptr, "");	\
32*7bdf38e5Schristos 			expect_zu_eq(last_junked_usize,			\
33*7bdf38e5Schristos 			    TEST_MALLOC_SIZE(ptr), "");			\
34*7bdf38e5Schristos 		}							\
35*7bdf38e5Schristos 	} while (0)
36*7bdf38e5Schristos 	if (!zero && lg_align == 0) {
37*7bdf38e5Schristos 		JUNK_ALLOC(malloc(size));
38a0698ed9Schristos 	}
39*7bdf38e5Schristos 	if (!zero) {
40*7bdf38e5Schristos 		JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
41*7bdf38e5Schristos 	}
42*7bdf38e5Schristos #ifdef JEMALLOC_OVERRIDE_MEMALIGN
43*7bdf38e5Schristos 	if (!zero) {
44*7bdf38e5Schristos 		JUNK_ALLOC(je_memalign(1 << lg_align, size));
45*7bdf38e5Schristos 	}
46*7bdf38e5Schristos #endif
47*7bdf38e5Schristos #ifdef JEMALLOC_OVERRIDE_VALLOC
48*7bdf38e5Schristos 	if (!zero && lg_align == LG_PAGE) {
49*7bdf38e5Schristos 		JUNK_ALLOC(je_valloc(size));
50*7bdf38e5Schristos 	}
51*7bdf38e5Schristos #endif
52*7bdf38e5Schristos 	int zero_flag = zero ? MALLOCX_ZERO : 0;
53*7bdf38e5Schristos 	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
54*7bdf38e5Schristos 	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
55*7bdf38e5Schristos 	    | MALLOCX_TCACHE_NONE));
56*7bdf38e5Schristos 	if (lg_align >= LG_SIZEOF_PTR) {
57*7bdf38e5Schristos 		void *memalign_result;
58*7bdf38e5Schristos 		int err = posix_memalign(&memalign_result, (1 << lg_align),
59*7bdf38e5Schristos 		    size);
60*7bdf38e5Schristos 		assert_d_eq(err, 0, "");
61*7bdf38e5Schristos 		JUNK_ALLOC(memalign_result);
62a0698ed9Schristos 	}
63a0698ed9Schristos }
64a0698ed9Schristos 
65*7bdf38e5Schristos TEST_BEGIN(test_junk_alloc_free) {
66*7bdf38e5Schristos 	bool zerovals[] = {false, true};
67*7bdf38e5Schristos 	size_t sizevals[] = {
68*7bdf38e5Schristos 		1, 8, 100, 1000, 100*1000
69*7bdf38e5Schristos 	/*
70*7bdf38e5Schristos 	 * Memory allocation failure is a real possibility in 32-bit mode.
71*7bdf38e5Schristos 	 * Rather than try to check in the face of resource exhaustion, we just
72*7bdf38e5Schristos 	 * rely more on the 64-bit tests.  This is a little bit white-box-y in
73*7bdf38e5Schristos 	 * the sense that this is only a good test strategy if we know that the
74*7bdf38e5Schristos 	 * junk pathways don't touch interact with the allocation selection
75*7bdf38e5Schristos 	 * mechanisms; but this is in fact the case.
76*7bdf38e5Schristos 	 */
77*7bdf38e5Schristos #if LG_SIZEOF_PTR == 3
78*7bdf38e5Schristos 		    , 10 * 1000 * 1000
79*7bdf38e5Schristos #endif
80*7bdf38e5Schristos 	};
81*7bdf38e5Schristos 	size_t lg_alignvals[] = {
82*7bdf38e5Schristos 		0, 4, 10, 15, 16, LG_PAGE
83*7bdf38e5Schristos #if LG_SIZEOF_PTR == 3
84*7bdf38e5Schristos 		    , 20, 24
85*7bdf38e5Schristos #endif
86*7bdf38e5Schristos 	};
87a0698ed9Schristos 
88*7bdf38e5Schristos #define JUNK_FREE(...)							\
89*7bdf38e5Schristos 	do {								\
90*7bdf38e5Schristos 		do_allocs(size, zero, lg_align);			\
91*7bdf38e5Schristos 		for (size_t n = 0; n < ptr_ind; n++) {			\
92*7bdf38e5Schristos 			void *ptr = ptrs[n];				\
93*7bdf38e5Schristos 			__VA_ARGS__;					\
94*7bdf38e5Schristos 			if (opt_junk_free) {				\
95*7bdf38e5Schristos 				assert_ptr_eq(ptr, last_junked_ptr,	\
96*7bdf38e5Schristos 				    "");				\
97*7bdf38e5Schristos 				assert_zu_eq(usize, last_junked_usize,	\
98*7bdf38e5Schristos 				    "");				\
99*7bdf38e5Schristos 			}						\
100*7bdf38e5Schristos 			reset();					\
101*7bdf38e5Schristos 		}							\
102*7bdf38e5Schristos 	} while (0)
103*7bdf38e5Schristos 	for (size_t i = 0; i < arraylen(zerovals); i++) {
104*7bdf38e5Schristos 		for (size_t j = 0; j < arraylen(sizevals); j++) {
105*7bdf38e5Schristos 			for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
106*7bdf38e5Schristos 				bool zero = zerovals[i];
107*7bdf38e5Schristos 				size_t size = sizevals[j];
108*7bdf38e5Schristos 				size_t lg_align = lg_alignvals[k];
109*7bdf38e5Schristos 				size_t usize = nallocx(size,
110*7bdf38e5Schristos 				    MALLOCX_LG_ALIGN(lg_align));
111a0698ed9Schristos 
112*7bdf38e5Schristos 				JUNK_FREE(free(ptr));
113*7bdf38e5Schristos 				JUNK_FREE(dallocx(ptr, 0));
114*7bdf38e5Schristos 				JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
115*7bdf38e5Schristos 				JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
116*7bdf38e5Schristos 				    lg_align)));
117*7bdf38e5Schristos 				JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
118*7bdf38e5Schristos 				    lg_align)));
119*7bdf38e5Schristos 				JUNK_FREE(sdallocx(ptr, usize,
120*7bdf38e5Schristos 				    MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
121*7bdf38e5Schristos 				if (opt_zero_realloc_action
122*7bdf38e5Schristos 				    == zero_realloc_action_free) {
123*7bdf38e5Schristos 					JUNK_FREE(realloc(ptr, 0));
124a0698ed9Schristos 				}
125a0698ed9Schristos 			}
126a0698ed9Schristos 		}
127a0698ed9Schristos 	}
128a0698ed9Schristos }
129a0698ed9Schristos TEST_END
130a0698ed9Schristos 
131*7bdf38e5Schristos TEST_BEGIN(test_realloc_expand) {
132*7bdf38e5Schristos 	char *volatile ptr;
133*7bdf38e5Schristos 	char *volatile expanded;
134*7bdf38e5Schristos 
135*7bdf38e5Schristos 	test_skip_if(!opt_junk_alloc);
136*7bdf38e5Schristos 
137*7bdf38e5Schristos 	/* Realloc */
138*7bdf38e5Schristos 	ptr = malloc(SC_SMALL_MAXCLASS);
139*7bdf38e5Schristos 	expanded = realloc(ptr, SC_LARGE_MINCLASS);
140*7bdf38e5Schristos 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
141*7bdf38e5Schristos 	expect_zu_eq(last_junked_usize,
142*7bdf38e5Schristos 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
143*7bdf38e5Schristos 	free(expanded);
144*7bdf38e5Schristos 
145*7bdf38e5Schristos 	/* rallocx(..., 0) */
146*7bdf38e5Schristos 	ptr = malloc(SC_SMALL_MAXCLASS);
147*7bdf38e5Schristos 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
148*7bdf38e5Schristos 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
149*7bdf38e5Schristos 	expect_zu_eq(last_junked_usize,
150*7bdf38e5Schristos 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
151*7bdf38e5Schristos 	free(expanded);
152*7bdf38e5Schristos 
153*7bdf38e5Schristos 	/* rallocx(..., nonzero) */
154*7bdf38e5Schristos 	ptr = malloc(SC_SMALL_MAXCLASS);
155*7bdf38e5Schristos 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
156*7bdf38e5Schristos 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
157*7bdf38e5Schristos 	expect_zu_eq(last_junked_usize,
158*7bdf38e5Schristos 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
159*7bdf38e5Schristos 	free(expanded);
160*7bdf38e5Schristos 
161*7bdf38e5Schristos 	/* rallocx(..., MALLOCX_ZERO) */
162*7bdf38e5Schristos 	ptr = malloc(SC_SMALL_MAXCLASS);
163*7bdf38e5Schristos 	last_junked_ptr = (void *)-1;
164*7bdf38e5Schristos 	last_junked_usize = (size_t)-1;
165*7bdf38e5Schristos 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
166*7bdf38e5Schristos 	expect_ptr_eq(last_junked_ptr, (void *)-1, "");
167*7bdf38e5Schristos 	expect_zu_eq(last_junked_usize, (size_t)-1, "");
168*7bdf38e5Schristos 	free(expanded);
169*7bdf38e5Schristos 
170*7bdf38e5Schristos 	/*
171*7bdf38e5Schristos 	 * Unfortunately, testing xallocx reliably is difficult to do portably
172*7bdf38e5Schristos 	 * (since allocations can be expanded / not expanded differently on
173*7bdf38e5Schristos 	 * different platforms.  We rely on manual inspection there -- the
174*7bdf38e5Schristos 	 * xallocx pathway is easy to inspect, though.
175*7bdf38e5Schristos 	 *
176*7bdf38e5Schristos 	 * Likewise, we don't test the shrinking pathways.  It's difficult to do
177*7bdf38e5Schristos 	 * so consistently (because of the risk of split failure or memory
178*7bdf38e5Schristos 	 * exhaustion, in which case no junking should happen).  This is fine
179*7bdf38e5Schristos 	 * -- junking is a best-effort debug mechanism in the first place.
180*7bdf38e5Schristos 	 */
181a0698ed9Schristos }
182a0698ed9Schristos TEST_END
183a0698ed9Schristos 
184a0698ed9Schristos int
185a0698ed9Schristos main(void) {
186*7bdf38e5Schristos 	junk_alloc_callback = &test_junk;
187*7bdf38e5Schristos 	junk_free_callback = &test_junk;
188*7bdf38e5Schristos 	/*
189*7bdf38e5Schristos 	 * We check the last pointer junked.  If a reentrant call happens, that
190*7bdf38e5Schristos 	 * might be an internal allocation.
191*7bdf38e5Schristos 	 */
192*7bdf38e5Schristos 	return test_no_reentrancy(
193*7bdf38e5Schristos 	    test_junk_alloc_free,
194*7bdf38e5Schristos 	    test_realloc_expand);
195a0698ed9Schristos }
196