1 /* $NetBSD: klock.c,v 1.11 2023/06/23 21:09:44 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: klock.c,v 1.11 2023/06/23 21:09:44 riastradh Exp $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/evcnt.h>
37
38 #include <rump-sys/kern.h>
39
40 #include <rump/rumpuser.h>
41
42 /*
43 * giant lock
44 */
45
46 struct rumpuser_mtx *rump_giantlock;
47 static int giantcnt;
48 static struct lwp *giantowner;
49
50 static struct evcnt ev_biglock_fast;
51 static struct evcnt ev_biglock_slow;
52 static struct evcnt ev_biglock_recurse;
53
54 void
rump_biglock_init(void)55 rump_biglock_init(void)
56 {
57
58 evcnt_attach_dynamic(&ev_biglock_fast, EVCNT_TYPE_MISC, NULL,
59 "rump biglock", "fast");
60 evcnt_attach_dynamic(&ev_biglock_slow, EVCNT_TYPE_MISC, NULL,
61 "rump biglock", "slow");
62 evcnt_attach_dynamic(&ev_biglock_recurse, EVCNT_TYPE_MISC, NULL,
63 "rump biglock", "recurse");
64 }
65
66 void
rump_kernel_bigwrap(int * nlocks)67 rump_kernel_bigwrap(int *nlocks)
68 {
69
70 KASSERTMSG(giantcnt > 0, "giantcnt=%d", giantcnt);
71 KASSERTMSG(curlwp == giantowner, "curlwp=%p giantowner=%p",
72 curlwp, giantowner);
73 giantowner = NULL;
74 *nlocks = giantcnt;
75 giantcnt = 0;
76 }
77
78 void
rump_kernel_bigunwrap(int nlocks)79 rump_kernel_bigunwrap(int nlocks)
80 {
81
82 KASSERT(giantowner == NULL);
83 giantowner = curlwp;
84 giantcnt = nlocks;
85 }
86
87 void
_kernel_lock(int nlocks)88 _kernel_lock(int nlocks)
89 {
90 struct lwp *l = curlwp;
91
92 while (nlocks) {
93 if (giantowner == l) {
94 giantcnt += nlocks;
95 nlocks = 0;
96 ev_biglock_recurse.ev_count++;
97 } else {
98 if (rumpuser_mutex_tryenter(rump_giantlock) != 0) {
99 rump_unschedule_cpu1(l, NULL);
100 rumpuser_mutex_enter_nowrap(rump_giantlock);
101 rump_schedule_cpu(l);
102 ev_biglock_slow.ev_count++;
103 } else {
104 ev_biglock_fast.ev_count++;
105 }
106 giantowner = l;
107 giantcnt = 1;
108 nlocks--;
109 }
110 }
111 }
112
113 void
_kernel_unlock(int nlocks,int * countp)114 _kernel_unlock(int nlocks, int *countp)
115 {
116
117 if (giantowner != curlwp) {
118 KASSERT(nlocks == 0);
119 if (countp)
120 *countp = 0;
121 return;
122 }
123
124 if (countp)
125 *countp = giantcnt;
126 if (nlocks == 0)
127 nlocks = giantcnt;
128 if (nlocks == -1) {
129 KASSERT(giantcnt == 1);
130 nlocks = 1;
131 }
132 KASSERT(nlocks <= giantcnt);
133 while (nlocks--) {
134 giantcnt--;
135 }
136
137 if (giantcnt == 0) {
138 giantowner = NULL;
139 rumpuser_mutex_exit(rump_giantlock);
140 }
141 }
142
143 bool
_kernel_locked_p(void)144 _kernel_locked_p(void)
145 {
146
147 return giantowner == curlwp;
148 }
149
150 void
rump_user_unschedule(int nlocks,int * countp,void * interlock)151 rump_user_unschedule(int nlocks, int *countp, void *interlock)
152 {
153
154 _kernel_unlock(nlocks, countp);
155 /*
156 * XXX: technically we should unschedule_cpu1() here, but that
157 * requires rump_intr_enter/exit to be implemented.
158 */
159 rump_unschedule_cpu_interlock(curlwp, interlock);
160 }
161
162 void
rump_user_schedule(int nlocks,void * interlock)163 rump_user_schedule(int nlocks, void *interlock)
164 {
165
166 rump_schedule_cpu_interlock(curlwp, interlock);
167
168 if (nlocks)
169 _kernel_lock(nlocks);
170 }
171