xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/junk.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1 #include "test/jemalloc_test.h"
2 
3 #define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
4 static size_t ptr_ind;
5 static void *volatile ptrs[100];
6 static void *last_junked_ptr;
7 static size_t last_junked_usize;
8 
9 static void
10 reset() {
11 	ptr_ind = 0;
12 	last_junked_ptr = NULL;
13 	last_junked_usize = 0;
14 }
15 
16 static void
17 test_junk(void *ptr, size_t usize) {
18 	last_junked_ptr = ptr;
19 	last_junked_usize = usize;
20 }
21 
22 static void
23 do_allocs(size_t size, bool zero, size_t lg_align) {
24 #define JUNK_ALLOC(...)							\
25 	do {								\
26 		assert(ptr_ind + 1 < arraylen(ptrs));			\
27 		void *ptr = __VA_ARGS__;				\
28 		assert_ptr_not_null(ptr, "");				\
29 		ptrs[ptr_ind++] = ptr;					\
30 		if (opt_junk_alloc && !zero) {				\
31 			expect_ptr_eq(ptr, last_junked_ptr, "");	\
32 			expect_zu_eq(last_junked_usize,			\
33 			    TEST_MALLOC_SIZE(ptr), "");			\
34 		}							\
35 	} while (0)
36 	if (!zero && lg_align == 0) {
37 		JUNK_ALLOC(malloc(size));
38 	}
39 	if (!zero) {
40 		JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
41 	}
42 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
43 	if (!zero) {
44 		JUNK_ALLOC(je_memalign(1 << lg_align, size));
45 	}
46 #endif
47 #ifdef JEMALLOC_OVERRIDE_VALLOC
48 	if (!zero && lg_align == LG_PAGE) {
49 		JUNK_ALLOC(je_valloc(size));
50 	}
51 #endif
52 	int zero_flag = zero ? MALLOCX_ZERO : 0;
53 	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
54 	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
55 	    | MALLOCX_TCACHE_NONE));
56 	if (lg_align >= LG_SIZEOF_PTR) {
57 		void *memalign_result;
58 		int err = posix_memalign(&memalign_result, (1 << lg_align),
59 		    size);
60 		assert_d_eq(err, 0, "");
61 		JUNK_ALLOC(memalign_result);
62 	}
63 }
64 
65 TEST_BEGIN(test_junk_alloc_free) {
66 	bool zerovals[] = {false, true};
67 	size_t sizevals[] = {
68 		1, 8, 100, 1000, 100*1000
69 	/*
70 	 * Memory allocation failure is a real possibility in 32-bit mode.
71 	 * Rather than try to check in the face of resource exhaustion, we just
72 	 * rely more on the 64-bit tests.  This is a little bit white-box-y in
73 	 * the sense that this is only a good test strategy if we know that the
74 	 * junk pathways don't touch interact with the allocation selection
75 	 * mechanisms; but this is in fact the case.
76 	 */
77 #if LG_SIZEOF_PTR == 3
78 		    , 10 * 1000 * 1000
79 #endif
80 	};
81 	size_t lg_alignvals[] = {
82 		0, 4, 10, 15, 16, LG_PAGE
83 #if LG_SIZEOF_PTR == 3
84 		    , 20, 24
85 #endif
86 	};
87 
88 #define JUNK_FREE(...)							\
89 	do {								\
90 		do_allocs(size, zero, lg_align);			\
91 		for (size_t n = 0; n < ptr_ind; n++) {			\
92 			void *ptr = ptrs[n];				\
93 			__VA_ARGS__;					\
94 			if (opt_junk_free) {				\
95 				assert_ptr_eq(ptr, last_junked_ptr,	\
96 				    "");				\
97 				assert_zu_eq(usize, last_junked_usize,	\
98 				    "");				\
99 			}						\
100 			reset();					\
101 		}							\
102 	} while (0)
103 	for (size_t i = 0; i < arraylen(zerovals); i++) {
104 		for (size_t j = 0; j < arraylen(sizevals); j++) {
105 			for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
106 				bool zero = zerovals[i];
107 				size_t size = sizevals[j];
108 				size_t lg_align = lg_alignvals[k];
109 				size_t usize = nallocx(size,
110 				    MALLOCX_LG_ALIGN(lg_align));
111 
112 				JUNK_FREE(free(ptr));
113 				JUNK_FREE(dallocx(ptr, 0));
114 				JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
115 				JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
116 				    lg_align)));
117 				JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
118 				    lg_align)));
119 				JUNK_FREE(sdallocx(ptr, usize,
120 				    MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
121 				if (opt_zero_realloc_action
122 				    == zero_realloc_action_free) {
123 					JUNK_FREE(realloc(ptr, 0));
124 				}
125 			}
126 		}
127 	}
128 }
129 TEST_END
130 
131 TEST_BEGIN(test_realloc_expand) {
132 	char *volatile ptr;
133 	char *volatile expanded;
134 
135 	test_skip_if(!opt_junk_alloc);
136 
137 	/* Realloc */
138 	ptr = malloc(SC_SMALL_MAXCLASS);
139 	expanded = realloc(ptr, SC_LARGE_MINCLASS);
140 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
141 	expect_zu_eq(last_junked_usize,
142 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
143 	free(expanded);
144 
145 	/* rallocx(..., 0) */
146 	ptr = malloc(SC_SMALL_MAXCLASS);
147 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
148 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
149 	expect_zu_eq(last_junked_usize,
150 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
151 	free(expanded);
152 
153 	/* rallocx(..., nonzero) */
154 	ptr = malloc(SC_SMALL_MAXCLASS);
155 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
156 	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
157 	expect_zu_eq(last_junked_usize,
158 	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
159 	free(expanded);
160 
161 	/* rallocx(..., MALLOCX_ZERO) */
162 	ptr = malloc(SC_SMALL_MAXCLASS);
163 	last_junked_ptr = (void *)-1;
164 	last_junked_usize = (size_t)-1;
165 	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
166 	expect_ptr_eq(last_junked_ptr, (void *)-1, "");
167 	expect_zu_eq(last_junked_usize, (size_t)-1, "");
168 	free(expanded);
169 
170 	/*
171 	 * Unfortunately, testing xallocx reliably is difficult to do portably
172 	 * (since allocations can be expanded / not expanded differently on
173 	 * different platforms.  We rely on manual inspection there -- the
174 	 * xallocx pathway is easy to inspect, though.
175 	 *
176 	 * Likewise, we don't test the shrinking pathways.  It's difficult to do
177 	 * so consistently (because of the risk of split failure or memory
178 	 * exhaustion, in which case no junking should happen).  This is fine
179 	 * -- junking is a best-effort debug mechanism in the first place.
180 	 */
181 }
182 TEST_END
183 
184 int
185 main(void) {
186 	junk_alloc_callback = &test_junk;
187 	junk_free_callback = &test_junk;
188 	/*
189 	 * We check the last pointer junked.  If a reentrant call happens, that
190 	 * might be an internal allocation.
191 	 */
192 	return test_no_reentrancy(
193 	    test_junk_alloc_free,
194 	    test_realloc_expand);
195 }
196