xref: /netbsd-src/sys/rump/librump/rumpkern/ltsleep.c (revision 5972ba1600497aa21d9f27b1e0cb2b6707b1c077)
1 /*	$NetBSD: ltsleep.c,v 1.36 2020/03/14 20:23:51 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 2009, 2010 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * Implementation of the tsleep/mtsleep kernel sleep interface.  There
30  * are two sides to our implementation.  For historic spinlocks we
31  * assume the kernel is giantlocked and use kernel giantlock as the
32  * wait interlock.  For mtsleep, we use the interlock supplied by
33  * the caller.  This duality leads to some if/else messiness in the code ...
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ltsleep.c,v 1.36 2020/03/14 20:23:51 ad Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 
44 #include <rump-sys/kern.h>
45 
46 #include <rump/rumpuser.h>
47 
48 struct ltsleeper {
49 	wchan_t id;
50 	union {
51 		struct rumpuser_cv *user;
52 		kcondvar_t kern;
53 	} u;
54 	bool iskwait;
55 	LIST_ENTRY(ltsleeper) entries;
56 };
57 #define ucv u.user
58 #define kcv u.kern
59 
60 static LIST_HEAD(, ltsleeper) sleepers = LIST_HEAD_INITIALIZER(sleepers);
61 static kmutex_t qlock;
62 
63 static int
sleeper(wchan_t ident,int timo,bool kinterlock)64 sleeper(wchan_t ident, int timo, bool kinterlock)
65 {
66 	struct ltsleeper lts;
67 	struct timespec ts;
68 	int rv;
69 
70 	lts.id = ident;
71 	if (kinterlock) {
72 		lts.iskwait = true;
73 		cv_init(&lts.kcv, "mtsleep");
74 	} else {
75 		lts.iskwait = false;
76 		rumpuser_cv_init(&lts.ucv);
77 	}
78 
79 	LIST_INSERT_HEAD(&sleepers, &lts, entries);
80 
81 	if (timo) {
82 		if (kinterlock) {
83 			rv = cv_timedwait(&lts.kcv, &qlock, timo);
84 		} else {
85 			/*
86 			 * Calculate wakeup-time.
87 			 */
88 			ts.tv_sec = timo / hz;
89 			ts.tv_nsec = (timo % hz) * (1000000000/hz);
90 			mutex_spin_exit(&qlock);
91 			rv = rumpuser_cv_timedwait(lts.ucv, rump_giantlock,
92 			    ts.tv_sec, ts.tv_nsec);
93 			mutex_spin_enter(&qlock);
94 		}
95 
96 		if (rv != 0)
97 			rv = EWOULDBLOCK;
98 	} else {
99 		if (kinterlock) {
100 			cv_wait(&lts.kcv, &qlock);
101 		} else {
102 			mutex_spin_exit(&qlock);
103 			rumpuser_cv_wait(lts.ucv, rump_giantlock);
104 			mutex_spin_enter(&qlock);
105 		}
106 		rv = 0;
107 	}
108 
109 	LIST_REMOVE(&lts, entries);
110 	mutex_spin_exit(&qlock);
111 
112 	if (kinterlock)
113 		cv_destroy(&lts.kcv);
114 	else
115 		rumpuser_cv_destroy(lts.ucv);
116 
117 	return rv;
118 }
119 
120 int
tsleep(wchan_t ident,pri_t prio,const char * wmesg,int timo)121 tsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo)
122 {
123 	int rv, nlocks;
124 
125 	/*
126 	 * Since we cannot use slock as the rumpuser interlock,
127 	 * require that everyone using this prehistoric interface
128 	 * is biglocked.  Wrap around the biglock and drop lockcnt,
129 	 * but retain the rumpuser mutex so that we can use it as an
130 	 * interlock to rumpuser_cv_wait().
131 	 */
132 	rump_kernel_bigwrap(&nlocks);
133 	mutex_spin_enter(&qlock);
134 	rv = sleeper(ident, timo, false);
135 	rump_kernel_bigunwrap(nlocks);
136 
137 	return rv;
138 }
139 
140 int
mtsleep(wchan_t ident,pri_t prio,const char * wmesg,int timo,kmutex_t * lock)141 mtsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo, kmutex_t *lock)
142 {
143 	int rv;
144 
145 	mutex_spin_enter(&qlock);
146 	mutex_exit(lock);
147 	rv = sleeper(ident, timo, true);
148 	if ((prio & PNORELOCK) == 0)
149 		mutex_enter(lock);
150 
151 	return rv;
152 }
153 
154 void
wakeup(wchan_t ident)155 wakeup(wchan_t ident)
156 {
157 	struct ltsleeper *ltsp;
158 
159 	mutex_spin_enter(&qlock);
160 	LIST_FOREACH(ltsp, &sleepers, entries) {
161 		if (ltsp->id == ident) {
162 			if (ltsp->iskwait) {
163 				cv_broadcast(&ltsp->kcv);
164 			} else {
165 				rumpuser_cv_broadcast(ltsp->ucv);
166 			}
167 		}
168 	}
169 	mutex_exit(&qlock);
170 }
171 
172 void
rump_tsleep_init()173 rump_tsleep_init()
174 {
175 
176 	mutex_init(&qlock, MUTEX_SPIN, IPL_NONE);
177 }
178