1 /* $NetBSD: locks.c,v 1.48 2011/01/18 22:21:23 haad Exp $ */ 2 3 /* 4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved. 5 * 6 * Development of this software was supported by the 7 * Finnish Cultural Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.48 2011/01/18 22:21:23 haad Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/kmem.h> 36 #include <sys/mutex.h> 37 #include <sys/rwlock.h> 38 39 #include <rump/rumpuser.h> 40 41 #include "rump_private.h" 42 43 /* 44 * Simple lockdebug. If it's compiled in, it's always active. 45 * Currently available only for mtx/rwlock. 46 */ 47 #ifdef LOCKDEBUG 48 #include <sys/lockdebug.h> 49 50 static lockops_t mutex_lockops = { 51 "mutex", 52 LOCKOPS_SLEEP, 53 NULL 54 }; 55 static lockops_t rw_lockops = { 56 "rwlock", 57 LOCKOPS_SLEEP, 58 NULL 59 }; 60 61 #define ALLOCK(lock, ops) \ 62 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0)) 63 #define FREELOCK(lock) \ 64 lockdebug_free(lock) 65 #define WANTLOCK(lock, shar, try) \ 66 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try) 67 #define LOCKED(lock, shar) \ 68 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar) 69 #define UNLOCKED(lock, shar) \ 70 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar) 71 #else 72 #define ALLOCK(a, b) 73 #define FREELOCK(a) 74 #define WANTLOCK(a, b, c) 75 #define LOCKED(a, b) 76 #define UNLOCKED(a, b) 77 #endif 78 79 /* 80 * We map locks to pthread routines. The difference between kernel 81 * and rumpuser routines is that while the kernel uses static 82 * storage, rumpuser allocates the object from the heap. This 83 * indirection is necessary because we don't know the size of 84 * pthread objects here. It is also beneficial, since we can 85 * be easily compatible with the kernel ABI because all kernel 86 * objects regardless of machine architecture are always at least 87 * the size of a pointer. The downside, of course, is a performance 88 * penalty. 89 */ 90 91 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx)) 92 93 void 94 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) 95 { 96 97 CTASSERT(sizeof(kmutex_t) >= sizeof(void *)); 98 99 rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx); 100 ALLOCK(mtx, &mutex_lockops); 101 } 102 103 void 104 mutex_destroy(kmutex_t *mtx) 105 { 106 107 FREELOCK(mtx); 108 rumpuser_mutex_destroy(RUMPMTX(mtx)); 109 } 110 111 void 112 mutex_enter(kmutex_t *mtx) 113 { 114 115 WANTLOCK(mtx, false, false); 116 rumpuser_mutex_enter(RUMPMTX(mtx)); 117 LOCKED(mtx, false); 118 } 119 __strong_alias(mutex_spin_enter,mutex_enter); 120 121 int 122 mutex_tryenter(kmutex_t *mtx) 123 { 124 int rv; 125 126 rv = rumpuser_mutex_tryenter(RUMPMTX(mtx)); 127 if (rv) { 128 WANTLOCK(mtx, false, true); 129 LOCKED(mtx, false); 130 } 131 return rv; 132 } 133 134 void 135 mutex_exit(kmutex_t *mtx) 136 { 137 138 UNLOCKED(mtx, false); 139 rumpuser_mutex_exit(RUMPMTX(mtx)); 140 } 141 __strong_alias(mutex_spin_exit,mutex_exit); 142 143 int 144 mutex_owned(kmutex_t *mtx) 145 { 146 147 return mutex_owner(mtx) == curlwp; 148 } 149 150 struct lwp * 151 mutex_owner(kmutex_t *mtx) 152 { 153 154 return rumpuser_mutex_owner(RUMPMTX(mtx)); 155 } 156 157 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw)) 158 159 /* reader/writer locks */ 160 161 void 162 rw_init(krwlock_t *rw) 163 { 164 165 CTASSERT(sizeof(krwlock_t) >= sizeof(void *)); 166 167 rumpuser_rw_init((struct rumpuser_rw **)rw); 168 ALLOCK(rw, &rw_lockops); 169 } 170 171 void 172 rw_destroy(krwlock_t *rw) 173 { 174 175 FREELOCK(rw); 176 rumpuser_rw_destroy(RUMPRW(rw)); 177 } 178 179 void 180 rw_enter(krwlock_t *rw, const krw_t op) 181 { 182 183 184 WANTLOCK(rw, op == RW_READER, false); 185 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER); 186 LOCKED(rw, op == RW_READER); 187 } 188 189 int 190 rw_tryenter(krwlock_t *rw, const krw_t op) 191 { 192 int rv; 193 194 rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER); 195 if (rv) { 196 WANTLOCK(rw, op == RW_READER, true); 197 LOCKED(rw, op == RW_READER); 198 } 199 return rv; 200 } 201 202 void 203 rw_exit(krwlock_t *rw) 204 { 205 206 #ifdef LOCKDEBUG 207 bool shared = !rw_write_held(rw); 208 209 if (shared) 210 KASSERT(rw_read_held(rw)); 211 UNLOCKED(rw, shared); 212 #endif 213 rumpuser_rw_exit(RUMPRW(rw)); 214 } 215 216 /* always fails */ 217 int 218 rw_tryupgrade(krwlock_t *rw) 219 { 220 221 return 0; 222 } 223 224 void 225 rw_downgrade(krwlock_t *rw) 226 { 227 228 #ifdef LOCKDEBUG 229 KASSERT(!rw_write_held(rw)); 230 #endif 231 /* 232 * XXX HACK: How we can downgrade re lock in rump properly. 233 */ 234 rw_exit(rw); 235 rw_enter(rw, RW_READER); 236 return; 237 } 238 239 int 240 rw_write_held(krwlock_t *rw) 241 { 242 243 return rumpuser_rw_wrheld(RUMPRW(rw)); 244 } 245 246 int 247 rw_read_held(krwlock_t *rw) 248 { 249 250 return rumpuser_rw_rdheld(RUMPRW(rw)); 251 } 252 253 int 254 rw_lock_held(krwlock_t *rw) 255 { 256 257 return rumpuser_rw_held(RUMPRW(rw)); 258 } 259 260 /* curriculum vitaes */ 261 262 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv)) 263 264 void 265 cv_init(kcondvar_t *cv, const char *msg) 266 { 267 268 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *)); 269 270 rumpuser_cv_init((struct rumpuser_cv **)cv); 271 } 272 273 void 274 cv_destroy(kcondvar_t *cv) 275 { 276 277 rumpuser_cv_destroy(RUMPCV(cv)); 278 } 279 280 static int 281 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) 282 { 283 struct lwp *l = curlwp; 284 int rv; 285 286 if (__predict_false(l->l_stat == LSDEAD || l->l_stat == LSZOMB)) { 287 /* 288 * sleepq code expects us to sleep, so set l_mutex 289 * back to cpu lock here if we didn't. 290 */ 291 l->l_mutex = l->l_cpu->ci_schedstate.spc_mutex; 292 return EINTR; 293 } 294 295 UNLOCKED(mtx, false); 296 297 l->l_private = cv; 298 rv = 0; 299 if (ts) { 300 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), 301 ts->tv_sec, ts->tv_nsec)) 302 rv = EWOULDBLOCK; 303 } else { 304 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); 305 } 306 307 /* 308 * Check for LSDEAD. if so, we need to wait here until we 309 * are allowed to exit. 310 */ 311 if (__predict_false(l->l_stat == LSDEAD)) { 312 struct proc *p = l->l_proc; 313 314 mutex_exit(mtx); /* drop and retake later */ 315 316 mutex_enter(p->p_lock); 317 while (l->l_stat == LSDEAD) { 318 /* avoid recursion */ 319 rumpuser_cv_wait(RUMPCV(&p->p_waitcv), 320 RUMPMTX(p->p_lock)); 321 } 322 KASSERT(l->l_stat == LSZOMB); 323 mutex_exit(p->p_lock); 324 325 /* ok, we can exit and remove "reference" to l->private */ 326 327 mutex_enter(mtx); 328 rv = EINTR; 329 } 330 l->l_private = NULL; 331 332 LOCKED(mtx, false); 333 334 return rv; 335 } 336 337 void 338 cv_wait(kcondvar_t *cv, kmutex_t *mtx) 339 { 340 341 if (__predict_false(rump_threads == 0)) 342 panic("cv_wait without threads"); 343 (void) docvwait(cv, mtx, NULL); 344 } 345 346 int 347 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) 348 { 349 350 if (__predict_false(rump_threads == 0)) 351 panic("cv_wait without threads"); 352 return docvwait(cv, mtx, NULL); 353 } 354 355 int 356 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) 357 { 358 struct timespec ts, tick; 359 extern int hz; 360 int rv; 361 362 if (ticks == 0) { 363 rv = cv_wait_sig(cv, mtx); 364 } else { 365 /* 366 * XXX: this fetches rump kernel time, but 367 * rumpuser_cv_timedwait uses host time. 368 */ 369 nanotime(&ts); 370 tick.tv_sec = ticks / hz; 371 tick.tv_nsec = (ticks % hz) * (1000000000/hz); 372 timespecadd(&ts, &tick, &ts); 373 374 rv = docvwait(cv, mtx, &ts); 375 } 376 377 return rv; 378 } 379 __strong_alias(cv_timedwait_sig,cv_timedwait); 380 381 void 382 cv_signal(kcondvar_t *cv) 383 { 384 385 rumpuser_cv_signal(RUMPCV(cv)); 386 } 387 388 void 389 cv_broadcast(kcondvar_t *cv) 390 { 391 392 rumpuser_cv_broadcast(RUMPCV(cv)); 393 } 394 395 bool 396 cv_has_waiters(kcondvar_t *cv) 397 { 398 399 return rumpuser_cv_has_waiters(RUMPCV(cv)); 400 } 401 402 /* this is not much of an attempt, but ... */ 403 bool 404 cv_is_valid(kcondvar_t *cv) 405 { 406 407 return RUMPCV(cv) != NULL; 408 } 409