1 /* $NetBSD: threads.c,v 1.15 2011/08/07 14:03:16 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved. 5 * 6 * Development of this software was supported by 7 * The Finnish Cultural Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.15 2011/08/07 14:03:16 rmind Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/atomic.h> 36 #include <sys/kmem.h> 37 #include <sys/kthread.h> 38 #include <sys/malloc.h> 39 #include <sys/systm.h> 40 41 #include <rump/rumpuser.h> 42 43 #include "rump_private.h" 44 45 struct kthdesc { 46 void (*f)(void *); 47 void *arg; 48 struct lwp *mylwp; 49 }; 50 51 static void * 52 threadbouncer(void *arg) 53 { 54 struct kthdesc *k = arg; 55 struct lwp *l = k->mylwp; 56 void (*f)(void *); 57 void *thrarg; 58 59 f = k->f; 60 thrarg = k->arg; 61 62 /* schedule ourselves */ 63 rumpuser_set_curlwp(l); 64 rump_schedule(); 65 66 /* free dance struct */ 67 free(k, M_TEMP); 68 69 if ((curlwp->l_pflag & LP_MPSAFE) == 0) 70 KERNEL_LOCK(1, NULL); 71 72 f(thrarg); 73 74 panic("unreachable, should kthread_exit()"); 75 } 76 77 int 78 kthread_create(pri_t pri, int flags, struct cpu_info *ci, 79 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...) 80 { 81 char thrstore[MAXCOMLEN]; 82 const char *thrname = NULL; 83 va_list ap; 84 struct kthdesc *k; 85 struct lwp *l; 86 int rv; 87 88 thrstore[0] = '\0'; 89 if (fmt) { 90 va_start(ap, fmt); 91 vsnprintf(thrstore, sizeof(thrstore), fmt, ap); 92 va_end(ap); 93 thrname = thrstore; 94 } 95 96 /* 97 * We don't want a module unload thread. 98 * (XXX: yes, this is a kludge too, and the kernel should 99 * have a more flexible method for configuring which threads 100 * we want). 101 */ 102 if (strcmp(thrstore, "modunload") == 0) { 103 return 0; 104 } 105 106 if (!rump_threads) { 107 /* fake them */ 108 if (strcmp(thrstore, "vrele") == 0) { 109 printf("rump warning: threads not enabled, not starting" 110 " vrele thread\n"); 111 return 0; 112 } else if (strcmp(thrstore, "cachegc") == 0) { 113 printf("rump warning: threads not enabled, not starting" 114 " namecache g/c thread\n"); 115 return 0; 116 } else if (strcmp(thrstore, "nfssilly") == 0) { 117 printf("rump warning: threads not enabled, not enabling" 118 " nfs silly rename\n"); 119 return 0; 120 } else if (strcmp(thrstore, "unpgc") == 0) { 121 printf("rump warning: threads not enabled, not enabling" 122 " UNP garbage collection\n"); 123 return 0; 124 } else if (strncmp(thrstore, "pmf", sizeof("pmf")-1) == 0) { 125 printf("rump warning: threads not enabled, not enabling" 126 " pmf thread\n"); 127 return 0; 128 } else if (strncmp(thrstore, "xcall", sizeof("xcall")-1) == 0) { 129 printf("rump warning: threads not enabled, CPU xcall" 130 " not functional\n"); 131 return 0; 132 } else 133 panic("threads not available, setenv RUMP_THREADS 1"); 134 } 135 KASSERT(fmt != NULL); 136 137 k = malloc(sizeof(*k), M_TEMP, M_WAITOK); 138 k->f = func; 139 k->arg = arg; 140 k->mylwp = l = rump__lwproc_alloclwp(&proc0); 141 l->l_flag |= LW_SYSTEM; 142 if (flags & KTHREAD_MPSAFE) 143 l->l_pflag |= LP_MPSAFE; 144 if (flags & KTHREAD_INTR) 145 l->l_pflag |= LP_INTR; 146 if (ci) { 147 l->l_pflag |= LP_BOUND; 148 l->l_target_cpu = ci; 149 } 150 if (thrname) { 151 l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 152 strlcpy(l->l_name, thrname, MAXCOMLEN); 153 } 154 155 rv = rumpuser_thread_create(threadbouncer, k, thrname, 156 (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN, &l->l_ctxlink); 157 if (rv) 158 return rv; 159 160 if (newlp) { 161 *newlp = l; 162 } else { 163 KASSERT((flags & KTHREAD_MUSTJOIN) == 0); 164 } 165 166 return 0; 167 } 168 169 void 170 kthread_exit(int ecode) 171 { 172 173 if ((curlwp->l_pflag & LP_MPSAFE) == 0) 174 KERNEL_UNLOCK_LAST(NULL); 175 rump_lwproc_releaselwp(); 176 /* unschedule includes membar */ 177 rump_unschedule(); 178 rumpuser_thread_exit(); 179 } 180 181 int 182 kthread_join(struct lwp *l) 183 { 184 int rv; 185 186 KASSERT(l->l_ctxlink != NULL); 187 rv = rumpuser_thread_join(l->l_ctxlink); 188 membar_consumer(); 189 190 return rv; 191 } 192