xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/batch_alloc.c (revision 4439cfd0acf9c7dc90625e5cd83b2317a9ab8967)
1 #include "test/jemalloc_test.h"
2 
3 #define BATCH_MAX ((1U << 16) + 1024)
4 static void *global_ptrs[BATCH_MAX];
5 
6 #define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
7 
8 static void
9 verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
10     bool zero) {
11 	for (size_t i = 0; i < batch; ++i) {
12 		void *p = ptrs[i];
13 		expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, "");
14 		if (zero) {
15 			for (size_t k = 0; k < usize; ++k) {
16 				expect_true(*((unsigned char *)p + k) == 0, "");
17 			}
18 		}
19 	}
20 }
21 
22 static void
23 verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
24     arena_t *arena, unsigned nregs) {
25 	if (config_prof && opt_prof) {
26 		/*
27 		 * Checking batch locality when prof is on is feasible but
28 		 * complicated, while checking the non-prof case suffices for
29 		 * unit-test purpose.
30 		 */
31 		return;
32 	}
33 	for (size_t i = 0, j = 0; i < batch; ++i, ++j) {
34 		if (j == nregs) {
35 			j = 0;
36 		}
37 		if (j == 0 && batch - i < nregs) {
38 			break;
39 		}
40 		void *p = ptrs[i];
41 		expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, "");
42 		if (j == 0) {
43 			expect_true(PAGE_ALIGNED(p), "");
44 			continue;
45 		}
46 		assert(i > 0);
47 		void *q = ptrs[i - 1];
48 		expect_true((uintptr_t)p > (uintptr_t)q
49 		    && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, "");
50 	}
51 }
52 
53 static void
54 release_batch(void **ptrs, size_t batch, size_t size) {
55 	for (size_t i = 0; i < batch; ++i) {
56 		sdallocx(ptrs[i], size, 0);
57 	}
58 }
59 
60 typedef struct batch_alloc_packet_s batch_alloc_packet_t;
61 struct batch_alloc_packet_s {
62 	void **ptrs;
63 	size_t num;
64 	size_t size;
65 	int flags;
66 };
67 
68 static size_t
69 batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) {
70 	batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags};
71 	size_t filled;
72 	size_t len = sizeof(size_t);
73 	assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len,
74 	    &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
75 	return filled;
76 }
77 
78 static void
79 test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
80 	tsd_t *tsd = tsd_fetch();
81 	assert(tsd != NULL);
82 	const size_t usize =
83 	    (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size));
84 	const szind_t ind = sz_size2index(usize);
85 	const bin_info_t *bin_info = &bin_infos[ind];
86 	const unsigned nregs = bin_info->nregs;
87 	assert(nregs > 0);
88 	arena_t *arena;
89 	if (arena_flag != 0) {
90 		arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag),
91 		    false);
92 	} else {
93 		arena = arena_choose(tsd, NULL);
94 	}
95 	assert(arena != NULL);
96 	int flags = arena_flag;
97 	if (alignment != 0) {
98 		flags |= MALLOCX_ALIGN(alignment);
99 	}
100 	if (zero) {
101 		flags |= MALLOCX_ZERO;
102 	}
103 
104 	/*
105 	 * Allocate for the purpose of bootstrapping arena_tdata, so that the
106 	 * change in bin stats won't contaminate the stats to be verified below.
107 	 */
108 	void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE);
109 
110 	for (size_t i = 0; i < 4; ++i) {
111 		size_t base = 0;
112 		if (i == 1) {
113 			base = nregs;
114 		} else if (i == 2) {
115 			base = nregs * 2;
116 		} else if (i == 3) {
117 			base = (1 << 16);
118 		}
119 		for (int j = -1; j <= 1; ++j) {
120 			if (base == 0 && j == -1) {
121 				continue;
122 			}
123 			size_t batch = base + (size_t)j;
124 			assert(batch < BATCH_MAX);
125 			size_t filled = batch_alloc_wrapper(global_ptrs, batch,
126 			    size, flags);
127 			assert_zu_eq(filled, batch, "");
128 			verify_batch_basic(tsd, global_ptrs, batch, usize,
129 			    zero);
130 			verify_batch_locality(tsd, global_ptrs, batch, usize,
131 			    arena, nregs);
132 			release_batch(global_ptrs, batch, usize);
133 		}
134 	}
135 
136 	free(p);
137 }
138 
139 TEST_BEGIN(test_batch_alloc) {
140 	test_wrapper(11, 0, false, 0);
141 }
142 TEST_END
143 
144 TEST_BEGIN(test_batch_alloc_zero) {
145 	test_wrapper(11, 0, true, 0);
146 }
147 TEST_END
148 
149 TEST_BEGIN(test_batch_alloc_aligned) {
150 	test_wrapper(7, 16, false, 0);
151 }
152 TEST_END
153 
154 TEST_BEGIN(test_batch_alloc_manual_arena) {
155 	unsigned arena_ind;
156 	size_t len_unsigned = sizeof(unsigned);
157 	assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL,
158 	    0), 0, "");
159 	test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind));
160 }
161 TEST_END
162 
163 TEST_BEGIN(test_batch_alloc_large) {
164 	size_t size = SC_LARGE_MINCLASS;
165 	for (size_t batch = 0; batch < 4; ++batch) {
166 		assert(batch < BATCH_MAX);
167 		size_t filled = batch_alloc(global_ptrs, batch, size, 0);
168 		assert_zu_eq(filled, batch, "");
169 		release_batch(global_ptrs, batch, size);
170 	}
171 	size = tcache_maxclass + 1;
172 	for (size_t batch = 0; batch < 4; ++batch) {
173 		assert(batch < BATCH_MAX);
174 		size_t filled = batch_alloc(global_ptrs, batch, size, 0);
175 		assert_zu_eq(filled, batch, "");
176 		release_batch(global_ptrs, batch, size);
177 	}
178 }
179 TEST_END
180 
181 int
182 main(void) {
183 	return test(
184 	    test_batch_alloc,
185 	    test_batch_alloc_zero,
186 	    test_batch_alloc_aligned,
187 	    test_batch_alloc_manual_arena,
188 	    test_batch_alloc_large);
189 }
190