1 /* $NetBSD: sleepq.c,v 1.28 2023/10/13 18:23:54 ad Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.28 2023/10/13 18:23:54 ad Exp $");
30
31 #include <sys/param.h>
32 #include <sys/condvar.h>
33 #include <sys/mutex.h>
34 #include <sys/once.h>
35 #include <sys/queue.h>
36 #include <sys/sleepq.h>
37 #include <sys/syncobj.h>
38 #include <sys/atomic.h>
39
40 #include <rump-sys/kern.h>
41
42 syncobj_t sleep_syncobj;
43
44 void
sleepq_init(sleepq_t * sq)45 sleepq_init(sleepq_t *sq)
46 {
47
48 LIST_INIT(sq);
49 cv_init(&sq->sq_cv, "sleepq");
50 }
51
52 void
sleepq_destroy(sleepq_t * sq)53 sleepq_destroy(sleepq_t *sq)
54 {
55
56 cv_destroy(&sq->sq_cv);
57 }
58
59 int
sleepq_enter(sleepq_t * sq,lwp_t * l,kmutex_t * mp)60 sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
61 {
62 int nlocks;
63
64 lwp_lock(l);
65 if (mp != NULL) {
66 lwp_unlock_to(l, mp);
67 }
68 if ((nlocks = l->l_blcnt) != 0) {
69 KERNEL_UNLOCK_ALL(NULL, NULL);
70 }
71 return nlocks;
72 }
73
74 void
sleepq_enqueue(sleepq_t * sq,wchan_t wc,const char * wmsg,syncobj_t * sob,bool catch_p)75 sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob,
76 bool catch_p)
77 {
78 struct lwp *l = curlwp;
79
80 l->l_wchan = wc;
81 l->l_wmesg = wmsg;
82 l->l_sleepq = sq;
83 LIST_INSERT_HEAD(sq, l, l_sleepchain);
84 }
85
86 int
sleepq_block(int timo,bool catch,syncobj_t * syncobj __unused,int nlocks)87 sleepq_block(int timo, bool catch, syncobj_t *syncobj __unused, int nlocks)
88 {
89 struct lwp *l = curlwp;
90 int error = 0;
91 kmutex_t *mp = l->l_mutex;
92
93 while (l->l_wchan) {
94 l->l_mutex = mp; /* keep sleepq lock until woken up */
95 error = cv_timedwait(&l->l_sleepq->sq_cv, mp, timo);
96 if (error == EWOULDBLOCK || error == EINTR) {
97 if (l->l_wchan) {
98 LIST_REMOVE(l, l_sleepchain);
99 l->l_wchan = NULL;
100 l->l_wmesg = NULL;
101 }
102 }
103 }
104 mutex_spin_exit(mp);
105
106 if (nlocks)
107 KERNEL_LOCK(nlocks, curlwp);
108
109 return error;
110 }
111
112 void
sleepq_wake(sleepq_t * sq,wchan_t wchan,u_int expected,kmutex_t * mp)113 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
114 {
115 struct lwp *l, *l_next;
116 bool found = false;
117
118 for (l = LIST_FIRST(sq); l; l = l_next) {
119 l_next = LIST_NEXT(l, l_sleepchain);
120 if (l->l_wchan == wchan) {
121 found = true;
122 l->l_wchan = NULL;
123 l->l_wmesg = NULL;
124 LIST_REMOVE(l, l_sleepchain);
125 if (--expected == 0)
126 break;
127 }
128 }
129 if (found)
130 cv_broadcast(&sq->sq_cv);
131
132 mutex_spin_exit(mp);
133 }
134
135 void
sleepq_unsleep(struct lwp * l,bool cleanup)136 sleepq_unsleep(struct lwp *l, bool cleanup)
137 {
138
139 l->l_wchan = NULL;
140 l->l_wmesg = NULL;
141 LIST_REMOVE(l, l_sleepchain);
142 cv_broadcast(&l->l_sleepq->sq_cv);
143
144 if (cleanup) {
145 mutex_spin_exit(l->l_mutex);
146 }
147 }
148
149 void
sleepq_remove(sleepq_t * sq,struct lwp * l,bool wakeup)150 sleepq_remove(sleepq_t *sq, struct lwp *l, bool wakeup)
151 {
152
153 sleepq_unsleep(l, false);
154 }
155
156 /*
157 * Thread scheduler handles priorities. Therefore no action here.
158 * (maybe do something if we're deperate?)
159 */
160 void
sleepq_changepri(struct lwp * l,pri_t pri)161 sleepq_changepri(struct lwp *l, pri_t pri)
162 {
163
164 }
165
166 void
sleepq_lendpri(struct lwp * l,pri_t pri)167 sleepq_lendpri(struct lwp *l, pri_t pri)
168 {
169
170 }
171
172 struct lwp *
syncobj_noowner(wchan_t wc)173 syncobj_noowner(wchan_t wc)
174 {
175
176 return NULL;
177 }
178
179 void
lwp_unlock_to(struct lwp * l,kmutex_t * new)180 lwp_unlock_to(struct lwp *l, kmutex_t *new)
181 {
182 kmutex_t *old;
183
184 KASSERT(mutex_owned(l->l_mutex));
185
186 old = l->l_mutex;
187 atomic_store_release(&l->l_mutex, new);
188 mutex_spin_exit(old);
189 }
190
191 void
lwp_lock(lwp_t * l)192 lwp_lock(lwp_t *l)
193 {
194 kmutex_t *old = atomic_load_consume(&l->l_mutex);
195
196 mutex_spin_enter(old);
197 while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) {
198 mutex_spin_exit(old);
199 old = atomic_load_consume(&l->l_mutex);
200 mutex_spin_enter(old);
201 }
202 }
203
204 void
lwp_unlock(lwp_t * l)205 lwp_unlock(lwp_t *l)
206 {
207
208 mutex_spin_exit(l->l_mutex);
209 }
210
211 void
lwp_changepri(lwp_t * l,pri_t pri)212 lwp_changepri(lwp_t *l, pri_t pri)
213 {
214
215 /* fuck */
216 }
217
218 void
lwp_lendpri(lwp_t * l,pri_t pri)219 lwp_lendpri(lwp_t *l, pri_t pri)
220 {
221
222 /* you */
223 }
224
225 pri_t
lwp_eprio(lwp_t * l)226 lwp_eprio(lwp_t *l)
227 {
228
229 /* Antti */
230 return l->l_priority;
231 }
232