xref: /netbsd-src/regress/sys/kern/allocfree/allocfree.c (revision b8fa0d52008e7bc1c47f41dc1f40007f120cb28c)
1 /*	$NetBSD: allocfree.c,v 1.2 2016/03/11 18:26:40 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: allocfree.c,v 1.2 2016/03/11 18:26:40 christos Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/kmem.h>
39 #include <sys/malloc.h>
40 #include <sys/kthread.h>
41 #include <sys/condvar.h>
42 #include <sys/cpu.h>
43 #include <sys/atomic.h>
44 
45 #include <machine/cpu_counter.h>
46 
47 MODULE(MODULE_CLASS_MISC, allocfree, NULL);
48 
49 static size_t		sz = 128;
50 static int		nthreads;
51 static int		count = 100000;
52 static uint64_t		total;
53 static kmutex_t		lock;
54 static kcondvar_t	cv;
55 static int		nrun;
56 static void		(*method)(void);
57 static int		barrier;
58 static volatile u_int	barrier2;
59 static int		timing;
60 static struct pool	pool;
61 static pool_cache_t	cache;
62 
63 static void
handle_props(prop_dictionary_t props)64 handle_props(prop_dictionary_t props)
65 {
66 	prop_number_t num;
67 
68 	num = prop_dictionary_get(props, "size");
69 	if (num != NULL && prop_object_type(num) == PROP_TYPE_NUMBER) {
70 		sz = (size_t)prop_number_integer_value(num);
71 		sz = max(sz, 1);
72 		sz = min(sz, 1024*1024);
73 	}
74 	num = prop_dictionary_get(props, "count");
75 	if (num != NULL && prop_object_type(num) == PROP_TYPE_NUMBER) {
76 		count = (int)prop_number_integer_value(num);
77 		count = min(count, 1);
78 	}
79 	num = prop_dictionary_get(props, "timing");
80 	if (num != NULL && prop_object_type(num) == PROP_TYPE_NUMBER) {
81 		timing = (int)prop_number_integer_value(num);
82 	}
83 }
84 
85 static void
kmem_method(void)86 kmem_method(void)
87 {
88 	int *p;
89 
90 	p = kmem_alloc(sz, KM_SLEEP);
91 	if (p != NULL) {
92 		*p = 1;
93 		kmem_free(p, sz);
94 	}
95 }
96 
97 static void
malloc_method(void)98 malloc_method(void)
99 {
100 	int *p;
101 
102 	p = malloc(sz, M_DEVBUF, M_WAITOK);
103 	if (p != NULL) {
104 		*p = 1;
105 		free(p, M_DEVBUF);
106 	}
107 }
108 
109 static void
pool_method(void)110 pool_method(void)
111 {
112 	int *p;
113 
114 	p = pool_get(&pool, PR_WAITOK);
115 	if (p != NULL) {
116 		*p = 1;
117 		pool_put(&pool, p);
118 	}
119 }
120 
121 static void
cache_method(void)122 cache_method(void)
123 {
124 	int *p;
125 
126 	p = pool_cache_get(cache, PR_WAITOK);
127 	if (p != NULL) {
128 		*p = 1;
129 		pool_cache_put(cache, p);
130 	}
131 }
132 
133 static void
test_thread(void * cookie)134 test_thread(void *cookie)
135 {
136 	struct timespec s, e, t;
137 	int lcv;
138 	uint64_t x;
139 
140 	kpreempt_disable();
141 
142 	memset(&t, 0, sizeof(t));
143 	x = 0;
144 
145 	mutex_enter(&lock);
146 	barrier++;
147 	while (barrier < nthreads) {
148 		cv_wait(&cv, &lock);
149 	}
150 	cv_broadcast(&cv);
151 	mutex_exit(&lock);
152 
153 	atomic_inc_uint(&barrier2);
154 	while (barrier2 < nthreads) {
155 		nullop(NULL);
156 	}
157 
158 	if (timing) {
159 		for (lcv = count; lcv != 0; lcv--) {
160 			x -= cpu_counter();
161 			(*method)();
162 			x += cpu_counter();
163 		}
164 	} else {
165 		for (lcv = count; lcv != 0; lcv--) {
166 			nanotime(&s);
167 			(*method)();
168 			nanotime(&e);
169 			timespecsub(&e, &s, &e);
170 			timespecadd(&e, &t, &t);
171 		}
172 	}
173 
174 	mutex_enter(&lock);
175 	barrier = 0;
176 	barrier2 = 0;
177 	if (timing) {
178 		total += x * 1000000000LL / cpu_frequency(curcpu());
179 	} else {
180 		total += timespec2ns(&t);
181 	}
182 	if (--nrun == 0) {
183 		cv_broadcast(&cv);
184 	}
185 	mutex_exit(&lock);
186 
187 	kpreempt_enable();
188 	kthread_exit(0);
189 }
190 
191 static void
run2(int nt,void (* func)(void))192 run2(int nt, void (*func)(void))
193 {
194 	struct cpu_info *ci;
195 	CPU_INFO_ITERATOR cii;
196 	int error;
197 
198 	nthreads = nt;
199 	total = 0;
200 	method = func;
201 	for (CPU_INFO_FOREACH(cii, ci)) {
202 		if (nt-- == 0) {
203 			break;
204 		}
205 		error = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
206 		    ci, test_thread, NULL, NULL, "test");
207 		if (error == 0) {
208 			nrun++;
209 		} else {
210 			nthreads--;
211 		}
212 	}
213 	mutex_enter(&lock);
214 	cv_broadcast(&cv);
215 	while (nrun > 0) {
216 		cv_wait(&cv, &lock);
217 	}
218 	mutex_exit(&lock);
219 	if (nthreads == 0) {
220 		printf("FAILED\n");
221 	} else {
222 		printf("\t%d", (int)(total / nthreads / count));
223 	}
224 }
225 
226 static void
run1(int nt)227 run1(int nt)
228 {
229 
230 	run2(nt, malloc_method);
231 	run2(nt, kmem_method);
232 	run2(nt, pool_method);
233 	run2(nt, cache_method);
234 	printf("\n");
235 
236 }
237 
238 static void
run0(void)239 run0(void)
240 {
241 	int i;
242 
243 	for (i = 1; i <= ncpu; i++) {
244 		printf("%zu\t%d", sz, i);
245 		run1(i);
246 	}
247 }
248 
249 static int
allocfree_modcmd(modcmd_t cmd,void * arg)250 allocfree_modcmd(modcmd_t cmd, void *arg)
251 {
252 	const char *timer;
253 
254 	switch (cmd) {
255 	case MODULE_CMD_INIT:
256 		handle_props(arg);
257 		timer = (timing ? "cpu_counter" : "nanotime");
258 		printf("=> using %s() for timings\n", timer);
259 		printf("SIZE\tNCPU\tMALLOC\tKMEM\tPOOL\tCACHE\n");
260 		mutex_init(&lock, MUTEX_DEFAULT, IPL_NONE);
261 		cv_init(&cv, "testcv");
262 		pool_init(&pool, sz, 0, 0, 0, "tpool",
263 		    &pool_allocator_nointr, IPL_NONE);
264 		cache = pool_cache_init(sz, 0, 0, 0, "tcache",
265 		    NULL, IPL_NONE, NULL, NULL, NULL);
266 		run0();
267 		pool_destroy(&pool);
268 		pool_cache_destroy(cache);
269 		mutex_destroy(&lock);
270 		cv_destroy(&cv);
271 		return 0;
272 
273 	case MODULE_CMD_FINI:
274 		/* XXX in theory, threads could still be running. */
275 		return 0;
276 
277 	default:
278 		return ENOTTY;
279 	}
280 }
281