xref: /netbsd-src/external/bsd/jemalloc/dist/src/mutex.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/malloc_io.h"
6 #include "jemalloc/internal/spin.h"
7 
8 #ifndef _CRT_SPINCOUNT
9 #define _CRT_SPINCOUNT 4000
10 #endif
11 
12 /*
13  * Based on benchmark results, a fixed spin with this amount of retries works
14  * well for our critical sections.
15  */
16 int64_t opt_mutex_max_spin = 600;
17 
18 /******************************************************************************/
19 /* Data. */
20 
21 #ifdef JEMALLOC_LAZY_LOCK
22 bool isthreaded = false;
23 #endif
24 #ifdef JEMALLOC_MUTEX_INIT_CB
25 static bool		postpone_init = true;
26 static malloc_mutex_t	*postponed_mutexes = NULL;
27 #endif
28 
29 /******************************************************************************/
30 /*
31  * We intercept pthread_create() calls in order to toggle isthreaded if the
32  * process goes multi-threaded.
33  */
34 
35 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
36 JEMALLOC_EXPORT int
37 pthread_create(pthread_t *__restrict thread,
38     const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
39     void *__restrict arg) {
40 	return pthread_create_wrapper(thread, attr, start_routine, arg);
41 }
42 #endif
43 
44 /******************************************************************************/
45 
46 #ifdef JEMALLOC_MUTEX_INIT_CB
47 JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
48     void *(calloc_cb)(size_t, size_t));
49 #endif
50 
51 void
52 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
53 	mutex_prof_data_t *data = &mutex->prof_data;
54 	nstime_t before;
55 
56 	if (ncpus == 1) {
57 		goto label_spin_done;
58 	}
59 
60 	int cnt = 0;
61 	do {
62 		spin_cpu_spinwait();
63 		if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
64                     && !malloc_mutex_trylock_final(mutex)) {
65 			data->n_spin_acquired++;
66 			return;
67 		}
68 	} while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
69 
70 	if (!config_stats) {
71 		/* Only spin is useful when stats is off. */
72 		malloc_mutex_lock_final(mutex);
73 		return;
74 	}
75 label_spin_done:
76 	nstime_init_update(&before);
77 	/* Copy before to after to avoid clock skews. */
78 	nstime_t after;
79 	nstime_copy(&after, &before);
80 	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
81 	    ATOMIC_RELAXED) + 1;
82 	/* One last try as above two calls may take quite some cycles. */
83 	if (!malloc_mutex_trylock_final(mutex)) {
84 		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
85 		data->n_spin_acquired++;
86 		return;
87 	}
88 
89 	/* True slow path. */
90 	malloc_mutex_lock_final(mutex);
91 	/* Update more slow-path only counters. */
92 	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
93 	nstime_update(&after);
94 
95 	nstime_t delta;
96 	nstime_copy(&delta, &after);
97 	nstime_subtract(&delta, &before);
98 
99 	data->n_wait_times++;
100 	nstime_add(&data->tot_wait_time, &delta);
101 	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
102 		nstime_copy(&data->max_wait_time, &delta);
103 	}
104 	if (n_thds > data->max_n_thds) {
105 		data->max_n_thds = n_thds;
106 	}
107 }
108 
109 static void
110 mutex_prof_data_init(mutex_prof_data_t *data) {
111 	memset(data, 0, sizeof(mutex_prof_data_t));
112 	nstime_init_zero(&data->max_wait_time);
113 	nstime_init_zero(&data->tot_wait_time);
114 	data->prev_owner = NULL;
115 }
116 
117 void
118 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
119 	malloc_mutex_assert_owner(tsdn, mutex);
120 	mutex_prof_data_init(&mutex->prof_data);
121 }
122 
123 static int
124 mutex_addr_comp(const witness_t *witness1, void *mutex1,
125     const witness_t *witness2, void *mutex2) {
126 	assert(mutex1 != NULL);
127 	assert(mutex2 != NULL);
128 	uintptr_t mu1int = (uintptr_t)mutex1;
129 	uintptr_t mu2int = (uintptr_t)mutex2;
130 	if (mu1int < mu2int) {
131 		return -1;
132 	} else if (mu1int == mu2int) {
133 		return 0;
134 	} else {
135 		return 1;
136 	}
137 }
138 
139 bool
140 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
141     witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
142 	mutex_prof_data_init(&mutex->prof_data);
143 #ifdef _WIN32
144 #  if _WIN32_WINNT >= 0x0600
145 	InitializeSRWLock(&mutex->lock);
146 #  else
147 	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
148 	    _CRT_SPINCOUNT)) {
149 		return true;
150 	}
151 #  endif
152 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
153        mutex->lock = OS_UNFAIR_LOCK_INIT;
154 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
155 	if (postpone_init) {
156 		mutex->postponed_next = postponed_mutexes;
157 		postponed_mutexes = mutex;
158 	} else {
159 		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
160 		    bootstrap_calloc) != 0) {
161 			return true;
162 		}
163 	}
164 #elif MALLOC_MUTEX_TYPE == PTHREAD_MUTEX_DEFAULT
165 	if (pthread_mutex_init(&mutex->lock, NULL) == -1)
166 		return true;
167 #else
168 	pthread_mutexattr_t attr;
169 
170 	if (pthread_mutexattr_init(&attr) != 0) {
171 		return true;
172 	}
173 	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
174 	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
175 		pthread_mutexattr_destroy(&attr);
176 		return true;
177 	}
178 	pthread_mutexattr_destroy(&attr);
179 #endif
180 	if (config_debug) {
181 		mutex->lock_order = lock_order;
182 		if (lock_order == malloc_mutex_address_ordered) {
183 			witness_init(&mutex->witness, name, rank,
184 			    mutex_addr_comp, mutex);
185 		} else {
186 			witness_init(&mutex->witness, name, rank, NULL, NULL);
187 		}
188 	}
189 	return false;
190 }
191 
192 void
193 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
194 	malloc_mutex_lock(tsdn, mutex);
195 }
196 
197 void
198 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
199 	malloc_mutex_unlock(tsdn, mutex);
200 }
201 
202 void
203 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
204 #ifdef JEMALLOC_MUTEX_INIT_CB
205 	malloc_mutex_unlock(tsdn, mutex);
206 #else
207 	if (malloc_mutex_init(mutex, mutex->witness.name,
208 	    mutex->witness.rank, mutex->lock_order)) {
209 		malloc_printf("<jemalloc>: Error re-initializing mutex in "
210 		    "child\n");
211 		if (opt_abort) {
212 			abort();
213 		}
214 	}
215 #endif
216 }
217 
218 bool
219 malloc_mutex_boot(void) {
220 #ifdef JEMALLOC_MUTEX_INIT_CB
221 	postpone_init = false;
222 	while (postponed_mutexes != NULL) {
223 		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
224 		    bootstrap_calloc) != 0) {
225 			return true;
226 		}
227 		postponed_mutexes = postponed_mutexes->postponed_next;
228 	}
229 #endif
230 	return false;
231 }
232