1 /* $NetBSD: sleepq.c,v 1.13 2011/01/28 17:57:03 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.13 2011/01/28 17:57:03 pooka Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/condvar.h> 33 #include <sys/mutex.h> 34 #include <sys/once.h> 35 #include <sys/queue.h> 36 #include <sys/sleepq.h> 37 #include <sys/syncobj.h> 38 #include <sys/atomic.h> 39 40 #include "rump_private.h" 41 42 /* 43 * Flimsy and minimalistic sleepq implementation. This is implemented 44 * only for the use of callouts in kern_timeout.c. locking etc is 45 * completely incorrect, horrible, etc etc etc. 46 */ 47 48 syncobj_t sleep_syncobj; 49 static kcondvar_t sq_cv; 50 51 static int 52 sqinit1(void) 53 { 54 55 cv_init(&sq_cv, "sleepq"); 56 57 return 0; 58 } 59 60 void 61 sleepq_init(sleepq_t *sq) 62 { 63 ONCE_DECL(sqctl); 64 65 RUN_ONCE(&sqctl, sqinit1); 66 67 TAILQ_INIT(sq); 68 } 69 70 void 71 sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob) 72 { 73 struct lwp *l = curlwp; 74 75 l->l_wchan = wc; 76 l->l_wmesg = wmsg; 77 l->l_sleepq = sq; 78 TAILQ_INSERT_TAIL(sq, l, l_sleepchain); 79 } 80 81 int 82 sleepq_block(int timo, bool catch) 83 { 84 struct lwp *l = curlwp; 85 int error = 0; 86 kmutex_t *mp = l->l_mutex; 87 int biglocks = l->l_biglocks; 88 89 while (l->l_wchan) { 90 l->l_mutex = mp; /* keep sleepq lock until woken up */ 91 error = cv_timedwait(&sq_cv, mp, timo); 92 if (error == EWOULDBLOCK || error == EINTR) { 93 if (l->l_wchan) { 94 TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain); 95 l->l_wchan = NULL; 96 l->l_wmesg = NULL; 97 } 98 } 99 } 100 mutex_spin_exit(mp); 101 102 if (biglocks) 103 KERNEL_LOCK(biglocks, curlwp); 104 105 return error; 106 } 107 108 lwp_t * 109 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) 110 { 111 struct lwp *l, *l_next; 112 bool found = false; 113 114 if (__predict_false(expected != -1)) 115 panic("sleepq_wake: \"expected\" not supported"); 116 117 for (l = TAILQ_FIRST(sq); l; l = l_next) { 118 l_next = TAILQ_NEXT(l, l_sleepchain); 119 if (l->l_wchan == wchan) { 120 found = true; 121 l->l_wchan = NULL; 122 l->l_wmesg = NULL; 123 TAILQ_REMOVE(sq, l, l_sleepchain); 124 } 125 } 126 if (found) 127 cv_broadcast(&sq_cv); 128 129 mutex_spin_exit(mp); 130 return NULL; 131 } 132 133 void 134 sleepq_unsleep(struct lwp *l, bool cleanup) 135 { 136 137 l->l_wchan = NULL; 138 l->l_wmesg = NULL; 139 TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain); 140 cv_broadcast(&sq_cv); 141 142 if (cleanup) { 143 mutex_spin_exit(l->l_mutex); 144 } 145 } 146 147 /* 148 * Thread scheduler handles priorities. Therefore no action here. 149 * (maybe do something if we're deperate?) 150 */ 151 void 152 sleepq_changepri(struct lwp *l, pri_t pri) 153 { 154 155 } 156 157 void 158 sleepq_lendpri(struct lwp *l, pri_t pri) 159 { 160 161 } 162 163 struct lwp * 164 syncobj_noowner(wchan_t wc) 165 { 166 167 return NULL; 168 } 169 170 void 171 lwp_unlock_to(struct lwp *l, kmutex_t *new) 172 { 173 kmutex_t *old; 174 175 KASSERT(mutex_owned(l->l_mutex)); 176 177 old = l->l_mutex; 178 membar_exit(); 179 l->l_mutex = new; 180 mutex_spin_exit(old); 181 } 182