1 /* $NetBSD: intr.c,v 1.26 2010/04/28 00:32:30 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.26 2010/04/28 00:32:30 pooka Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/cpu.h> 33 #include <sys/kernel.h> 34 #include <sys/kmem.h> 35 #include <sys/kthread.h> 36 #include <sys/intr.h> 37 #include <sys/timetc.h> 38 39 #include <rump/rumpuser.h> 40 41 #include "rump_private.h" 42 43 /* 44 * Interrupt simulator. It executes hardclock() and softintrs. 45 */ 46 47 #define SI_MPSAFE 0x01 48 #define SI_ONLIST 0x02 49 #define SI_KILLME 0x04 50 51 struct softint { 52 void (*si_func)(void *); 53 void *si_arg; 54 int si_flags; 55 int si_level; 56 57 LIST_ENTRY(softint) si_entries; 58 }; 59 60 static struct rumpuser_mtx *si_mtx; 61 struct softint_lev { 62 struct rumpuser_cv *si_cv; 63 LIST_HEAD(, softint) si_pending; 64 }; 65 66 kcondvar_t lbolt; /* Oh Kath Ra */ 67 68 static u_int ticks; 69 70 static u_int 71 rumptc_get(struct timecounter *tc) 72 { 73 74 KASSERT(rump_threads); 75 return ticks; 76 } 77 78 static struct timecounter rumptc = { 79 .tc_get_timecount = rumptc_get, 80 .tc_poll_pps = NULL, 81 .tc_counter_mask = ~0, 82 .tc_frequency = 0, 83 .tc_name = "rumpclk", 84 .tc_quality = 0, 85 }; 86 87 /* 88 * clock "interrupt" 89 */ 90 static void 91 doclock(void *noarg) 92 { 93 struct timespec clockbase, clockup; 94 struct timespec thetick, curtime; 95 struct rumpuser_cv *clockcv; 96 struct rumpuser_mtx *clockmtx; 97 uint64_t sec, nsec; 98 int error; 99 extern int hz; 100 101 memset(&clockup, 0, sizeof(clockup)); 102 rumpuser_gettime(&sec, &nsec, &error); 103 clockbase.tv_sec = sec; 104 clockbase.tv_nsec = nsec; 105 curtime = clockbase; 106 thetick.tv_sec = 0; 107 thetick.tv_nsec = 1000000000/hz; 108 109 /* XXX: dummies */ 110 rumpuser_cv_init(&clockcv); 111 rumpuser_mutex_init(&clockmtx); 112 113 rumpuser_mutex_enter(clockmtx); 114 for (;;) { 115 callout_hardclock(); 116 117 /* wait until the next tick. XXX: what if the clock changes? */ 118 while (rumpuser_cv_timedwait(clockcv, clockmtx, 119 curtime.tv_sec, curtime.tv_nsec) == 0) 120 continue; 121 122 /* XXX: sync with a) virtual clock b) host clock */ 123 timespecadd(&clockup, &clockbase, &curtime); 124 timespecadd(&clockup, &thetick, &clockup); 125 126 #if 0 127 /* CPU_IS_PRIMARY is MD and hence unreliably correct here */ 128 if (!CPU_IS_PRIMARY(curcpu())) 129 continue; 130 #else 131 if (curcpu()->ci_index == 0) 132 continue; 133 #endif 134 135 if ((++ticks % hz) == 0) { 136 cv_broadcast(&lbolt); 137 } 138 tc_ticktock(); 139 } 140 } 141 142 /* 143 * Soft interrupt execution thread. Note that we run without a CPU 144 * context until we start processing the interrupt. This is to avoid 145 * lock recursion. 146 */ 147 static void 148 sithread(void *arg) 149 { 150 struct softint *si; 151 void (*func)(void *) = NULL; 152 void *funarg; 153 bool mpsafe; 154 int mylevel = (uintptr_t)arg; 155 struct softint_lev *si_lvlp, *si_lvl; 156 struct cpu_data *cd = &curcpu()->ci_data; 157 158 rump_unschedule(); 159 160 si_lvlp = cd->cpu_softcpu; 161 si_lvl = &si_lvlp[mylevel]; 162 163 /* 164 * XXX: si_mtx is unnecessary, and should open an interface 165 * which allows to use schedmtx for the cv wait 166 */ 167 rumpuser_mutex_enter_nowrap(si_mtx); 168 for (;;) { 169 if (!LIST_EMPTY(&si_lvl->si_pending)) { 170 si = LIST_FIRST(&si_lvl->si_pending); 171 func = si->si_func; 172 funarg = si->si_arg; 173 mpsafe = si->si_flags & SI_MPSAFE; 174 175 si->si_flags &= ~SI_ONLIST; 176 LIST_REMOVE(si, si_entries); 177 if (si->si_flags & SI_KILLME) { 178 rumpuser_mutex_exit(si_mtx); 179 rump_schedule(); 180 softint_disestablish(si); 181 rump_unschedule(); 182 rumpuser_mutex_enter_nowrap(si_mtx); 183 continue; 184 } 185 } else { 186 rumpuser_cv_wait_nowrap(si_lvl->si_cv, si_mtx); 187 continue; 188 } 189 rumpuser_mutex_exit(si_mtx); 190 191 rump_schedule(); 192 if (!mpsafe) 193 KERNEL_LOCK(1, curlwp); 194 func(funarg); 195 if (!mpsafe) 196 KERNEL_UNLOCK_ONE(curlwp); 197 rump_unschedule(); 198 199 rumpuser_mutex_enter_nowrap(si_mtx); 200 } 201 202 panic("sithread unreachable"); 203 } 204 205 void 206 rump_intr_init() 207 { 208 209 rumpuser_mutex_init(&si_mtx); 210 cv_init(&lbolt, "oh kath ra"); 211 } 212 213 void 214 softint_init(struct cpu_info *ci) 215 { 216 struct cpu_data *cd = &ci->ci_data; 217 struct softint_lev *slev; 218 int rv, i; 219 220 if (!rump_threads) 221 return; 222 223 /* XXX */ 224 if (ci->ci_index == 0) { 225 rumptc.tc_frequency = hz; 226 tc_init(&rumptc); 227 } 228 229 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP); 230 for (i = 0; i < SOFTINT_COUNT; i++) { 231 rumpuser_cv_init(&slev[i].si_cv); 232 LIST_INIT(&slev[i].si_pending); 233 } 234 cd->cpu_softcpu = slev; 235 236 for (i = 0; i < SOFTINT_COUNT; i++) { 237 rv = kthread_create(PRI_NONE, 238 KTHREAD_MPSAFE | KTHREAD_INTR, ci, 239 sithread, (void *)(uintptr_t)i, 240 NULL, "rumpsi%d", i); 241 } 242 243 rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_INTR, 244 ci, doclock, NULL, NULL, "rumpclk%d", i); 245 if (rv) 246 panic("clock thread creation failed: %d", rv); 247 } 248 249 /* 250 * Soft interrupts bring two choices. If we are running with thread 251 * support enabled, defer execution, otherwise execute in place. 252 * See softint_schedule(). 253 * 254 * As there is currently no clear concept of when a thread finishes 255 * work (although rump_clear_curlwp() is close), simply execute all 256 * softints in the timer thread. This is probably not the most 257 * efficient method, but good enough for now. 258 */ 259 void * 260 softint_establish(u_int flags, void (*func)(void *), void *arg) 261 { 262 struct softint *si; 263 264 si = kmem_alloc(sizeof(*si), KM_SLEEP); 265 si->si_func = func; 266 si->si_arg = arg; 267 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0; 268 si->si_level = flags & SOFTINT_LVLMASK; 269 KASSERT(si->si_level < SOFTINT_COUNT); 270 271 return si; 272 } 273 274 void 275 softint_schedule(void *arg) 276 { 277 struct softint *si = arg; 278 struct cpu_data *cd = &curcpu()->ci_data; 279 struct softint_lev *si_lvl = cd->cpu_softcpu; 280 281 if (!rump_threads) { 282 si->si_func(si->si_arg); 283 } else { 284 if (!(si->si_flags & SI_ONLIST)) { 285 LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending, 286 si, si_entries); 287 si->si_flags |= SI_ONLIST; 288 } 289 } 290 } 291 292 /* flimsy disestablish: should wait for softints to finish */ 293 void 294 softint_disestablish(void *cook) 295 { 296 struct softint *si = cook; 297 298 rumpuser_mutex_enter(si_mtx); 299 if (si->si_flags & SI_ONLIST) { 300 si->si_flags |= SI_KILLME; 301 return; 302 } 303 rumpuser_mutex_exit(si_mtx); 304 kmem_free(si, sizeof(*si)); 305 } 306 307 void 308 rump_softint_run(struct cpu_info *ci) 309 { 310 struct cpu_data *cd = &ci->ci_data; 311 struct softint_lev *si_lvl = cd->cpu_softcpu; 312 int i; 313 314 if (!rump_threads) 315 return; 316 317 for (i = 0; i < SOFTINT_COUNT; i++) { 318 if (!LIST_EMPTY(&si_lvl[i].si_pending)) 319 rumpuser_cv_signal(si_lvl[i].si_cv); 320 } 321 } 322 323 bool 324 cpu_intr_p(void) 325 { 326 327 return false; 328 } 329 330 bool 331 cpu_softintr_p(void) 332 { 333 334 return curlwp->l_pflag & LP_INTR; 335 } 336