1 /* $NetBSD: locks.c,v 1.67 2013/12/09 17:03:41 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.67 2013/12/09 17:03:41 pooka Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/kmem.h> 33 #include <sys/mutex.h> 34 #include <sys/rwlock.h> 35 36 #include <rump/rumpuser.h> 37 38 #include "rump_private.h" 39 40 /* 41 * Simple lockdebug. If it's compiled in, it's always active. 42 * Currently available only for mtx/rwlock. 43 */ 44 #ifdef LOCKDEBUG 45 #include <sys/lockdebug.h> 46 47 static lockops_t mutex_lockops = { 48 "mutex", 49 LOCKOPS_SLEEP, 50 NULL 51 }; 52 static lockops_t rw_lockops = { 53 "rwlock", 54 LOCKOPS_SLEEP, 55 NULL 56 }; 57 58 #define ALLOCK(lock, ops) \ 59 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0)) 60 #define FREELOCK(lock) \ 61 lockdebug_free(lock) 62 #define WANTLOCK(lock, shar) \ 63 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar) 64 #define LOCKED(lock, shar) \ 65 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar) 66 #define UNLOCKED(lock, shar) \ 67 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar) 68 #else 69 #define ALLOCK(a, b) 70 #define FREELOCK(a) 71 #define WANTLOCK(a, b) 72 #define LOCKED(a, b) 73 #define UNLOCKED(a, b) 74 #endif 75 76 /* not used, but need the symbols for pointer comparisons */ 77 syncobj_t mutex_syncobj, rw_syncobj; 78 79 /* 80 * We map locks to pthread routines. The difference between kernel 81 * and rumpuser routines is that while the kernel uses static 82 * storage, rumpuser allocates the object from the heap. This 83 * indirection is necessary because we don't know the size of 84 * pthread objects here. It is also beneficial, since we can 85 * be easily compatible with the kernel ABI because all kernel 86 * objects regardless of machine architecture are always at least 87 * the size of a pointer. The downside, of course, is a performance 88 * penalty. 89 */ 90 91 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx)) 92 93 void 94 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) 95 { 96 int ruflags = RUMPUSER_MTX_KMUTEX; 97 int isspin; 98 99 CTASSERT(sizeof(kmutex_t) >= sizeof(void *)); 100 101 /* 102 * Try to figure out if the caller wanted a spin mutex or 103 * not with this easy set of conditionals. The difference 104 * between a spin mutex and an adaptive mutex for a rump 105 * kernel is that the hypervisor does not relinquish the 106 * rump kernel CPU context for a spin mutex. The 107 * hypervisor itself may block even when "spinning". 108 */ 109 if (type == MUTEX_SPIN) { 110 isspin = 1; 111 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK || 112 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET || 113 ipl == IPL_SOFTSERIAL) { 114 isspin = 0; 115 } else { 116 isspin = 1; 117 } 118 119 if (isspin) 120 ruflags |= RUMPUSER_MTX_SPIN; 121 rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags); 122 ALLOCK(mtx, &mutex_lockops); 123 } 124 125 void 126 mutex_destroy(kmutex_t *mtx) 127 { 128 129 FREELOCK(mtx); 130 rumpuser_mutex_destroy(RUMPMTX(mtx)); 131 } 132 133 void 134 mutex_enter(kmutex_t *mtx) 135 { 136 137 WANTLOCK(mtx, 0); 138 rumpuser_mutex_enter(RUMPMTX(mtx)); 139 LOCKED(mtx, false); 140 } 141 142 void 143 mutex_spin_enter(kmutex_t *mtx) 144 { 145 146 WANTLOCK(mtx, 0); 147 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx)); 148 LOCKED(mtx, false); 149 } 150 151 int 152 mutex_tryenter(kmutex_t *mtx) 153 { 154 int error; 155 156 error = rumpuser_mutex_tryenter(RUMPMTX(mtx)); 157 if (error == 0) { 158 WANTLOCK(mtx, 0); 159 LOCKED(mtx, false); 160 } 161 return error == 0; 162 } 163 164 void 165 mutex_exit(kmutex_t *mtx) 166 { 167 168 UNLOCKED(mtx, false); 169 rumpuser_mutex_exit(RUMPMTX(mtx)); 170 } 171 __strong_alias(mutex_spin_exit,mutex_exit); 172 173 int 174 mutex_owned(kmutex_t *mtx) 175 { 176 177 return mutex_owner(mtx) == curlwp; 178 } 179 180 struct lwp * 181 mutex_owner(kmutex_t *mtx) 182 { 183 struct lwp *l; 184 185 rumpuser_mutex_owner(RUMPMTX(mtx), &l); 186 return l; 187 } 188 189 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw)) 190 191 /* reader/writer locks */ 192 193 static enum rumprwlock 194 krw2rumprw(const krw_t op) 195 { 196 197 switch (op) { 198 case RW_READER: 199 return RUMPUSER_RW_READER; 200 case RW_WRITER: 201 return RUMPUSER_RW_WRITER; 202 default: 203 panic("unknown rwlock type"); 204 } 205 } 206 207 void 208 rw_init(krwlock_t *rw) 209 { 210 211 CTASSERT(sizeof(krwlock_t) >= sizeof(void *)); 212 213 rumpuser_rw_init((struct rumpuser_rw **)rw); 214 ALLOCK(rw, &rw_lockops); 215 } 216 217 void 218 rw_destroy(krwlock_t *rw) 219 { 220 221 FREELOCK(rw); 222 rumpuser_rw_destroy(RUMPRW(rw)); 223 } 224 225 void 226 rw_enter(krwlock_t *rw, const krw_t op) 227 { 228 229 230 WANTLOCK(rw, op == RW_READER); 231 rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw)); 232 LOCKED(rw, op == RW_READER); 233 } 234 235 int 236 rw_tryenter(krwlock_t *rw, const krw_t op) 237 { 238 int error; 239 240 error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw)); 241 if (error == 0) { 242 WANTLOCK(rw, op == RW_READER); 243 LOCKED(rw, op == RW_READER); 244 } 245 return error == 0; 246 } 247 248 void 249 rw_exit(krwlock_t *rw) 250 { 251 252 #ifdef LOCKDEBUG 253 bool shared = !rw_write_held(rw); 254 255 if (shared) 256 KASSERT(rw_read_held(rw)); 257 UNLOCKED(rw, shared); 258 #endif 259 rumpuser_rw_exit(RUMPRW(rw)); 260 } 261 262 int 263 rw_tryupgrade(krwlock_t *rw) 264 { 265 int rv; 266 267 rv = rumpuser_rw_tryupgrade(RUMPRW(rw)); 268 if (rv == 0) { 269 UNLOCKED(rw, 1); 270 WANTLOCK(rw, 0); 271 LOCKED(rw, 0); 272 } 273 return rv == 0; 274 } 275 276 void 277 rw_downgrade(krwlock_t *rw) 278 { 279 280 rumpuser_rw_downgrade(RUMPRW(rw)); 281 UNLOCKED(rw, 0); 282 WANTLOCK(rw, 1); 283 LOCKED(rw, 1); 284 } 285 286 int 287 rw_read_held(krwlock_t *rw) 288 { 289 int rv; 290 291 rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv); 292 return rv; 293 } 294 295 int 296 rw_write_held(krwlock_t *rw) 297 { 298 int rv; 299 300 rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv); 301 return rv; 302 } 303 304 int 305 rw_lock_held(krwlock_t *rw) 306 { 307 308 return rw_read_held(rw) || rw_write_held(rw); 309 } 310 311 /* curriculum vitaes */ 312 313 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv)) 314 315 void 316 cv_init(kcondvar_t *cv, const char *msg) 317 { 318 319 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *)); 320 321 rumpuser_cv_init((struct rumpuser_cv **)cv); 322 } 323 324 void 325 cv_destroy(kcondvar_t *cv) 326 { 327 328 rumpuser_cv_destroy(RUMPCV(cv)); 329 } 330 331 static int 332 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) 333 { 334 struct lwp *l = curlwp; 335 int rv; 336 337 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { 338 /* 339 * yield() here, someone might want the cpu 340 * to set a condition. otherwise we'll just 341 * loop forever. 342 */ 343 yield(); 344 return EINTR; 345 } 346 347 UNLOCKED(mtx, false); 348 349 l->l_private = cv; 350 rv = 0; 351 if (ts) { 352 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), 353 ts->tv_sec, ts->tv_nsec)) 354 rv = EWOULDBLOCK; 355 } else { 356 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); 357 } 358 359 LOCKED(mtx, false); 360 361 /* 362 * Check for QEXIT. if so, we need to wait here until we 363 * are allowed to exit. 364 */ 365 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { 366 struct proc *p = l->l_proc; 367 368 UNLOCKED(mtx, false); 369 mutex_exit(mtx); /* drop and retake later */ 370 371 mutex_enter(p->p_lock); 372 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) { 373 /* avoid recursion */ 374 rumpuser_cv_wait(RUMPCV(&p->p_waitcv), 375 RUMPMTX(p->p_lock)); 376 } 377 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT); 378 mutex_exit(p->p_lock); 379 380 /* ok, we can exit and remove "reference" to l->private */ 381 382 mutex_enter(mtx); 383 LOCKED(mtx, false); 384 rv = EINTR; 385 } 386 l->l_private = NULL; 387 388 return rv; 389 } 390 391 void 392 cv_wait(kcondvar_t *cv, kmutex_t *mtx) 393 { 394 395 if (__predict_false(rump_threads == 0)) 396 panic("cv_wait without threads"); 397 (void) docvwait(cv, mtx, NULL); 398 } 399 400 int 401 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) 402 { 403 404 if (__predict_false(rump_threads == 0)) 405 panic("cv_wait without threads"); 406 return docvwait(cv, mtx, NULL); 407 } 408 409 int 410 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) 411 { 412 struct timespec ts; 413 extern int hz; 414 int rv; 415 416 if (ticks == 0) { 417 rv = cv_wait_sig(cv, mtx); 418 } else { 419 ts.tv_sec = ticks / hz; 420 ts.tv_nsec = (ticks % hz) * (1000000000/hz); 421 rv = docvwait(cv, mtx, &ts); 422 } 423 424 return rv; 425 } 426 __strong_alias(cv_timedwait_sig,cv_timedwait); 427 428 void 429 cv_signal(kcondvar_t *cv) 430 { 431 432 rumpuser_cv_signal(RUMPCV(cv)); 433 } 434 435 void 436 cv_broadcast(kcondvar_t *cv) 437 { 438 439 rumpuser_cv_broadcast(RUMPCV(cv)); 440 } 441 442 bool 443 cv_has_waiters(kcondvar_t *cv) 444 { 445 int rv; 446 447 rumpuser_cv_has_waiters(RUMPCV(cv), &rv); 448 return rv != 0; 449 } 450 451 /* this is not much of an attempt, but ... */ 452 bool 453 cv_is_valid(kcondvar_t *cv) 454 { 455 456 return RUMPCV(cv) != NULL; 457 } 458