1 /* 2 * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved. 3 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 4 * Copyright (c) 2003 Jonathan Lemon. All rights reserved. 5 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon. 9 * 10 * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright 11 * into this one around July 8 2004. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $DragonFly: src/sys/net/netisr.c,v 1.22 2004/09/10 18:23:56 dillon Exp $ 39 */ 40 41 /* 42 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 43 * 44 * License terms: all terms for the DragonFly license above plus the following: 45 * 46 * 4. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * 49 * This product includes software developed by Jeffrey M. Hsu 50 * for the DragonFly Project. 51 * 52 * This requirement may be waived with permission from Jeffrey Hsu. 53 * This requirement will sunset and may be removed on July 8 2005, 54 * after which the standard DragonFly license (as shown above) will 55 * apply. 56 */ 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/malloc.h> 62 #include <sys/msgport.h> 63 #include <sys/proc.h> 64 #include <sys/interrupt.h> 65 #include <sys/socket.h> 66 #include <sys/sysctl.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/netisr.h> 70 #include <machine/cpufunc.h> 71 #include <machine/ipl.h> 72 73 #include <sys/thread2.h> 74 #include <sys/msgport2.h> 75 76 static struct netisr netisrs[NETISR_MAX]; 77 78 /* Per-CPU thread to handle any protocol. */ 79 struct thread netisr_cpu[MAXCPU]; 80 lwkt_port netisr_afree_rport; 81 lwkt_port netisr_adone_rport; 82 lwkt_port netisr_sync_port; 83 84 /* 85 * netisr_afree_rport replymsg function, only used to handle async 86 * messages which the sender has abandoned to their fate. 87 */ 88 static void 89 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 90 { 91 free(msg, M_LWKTMSG); 92 } 93 94 /* 95 * We must construct a custom putport function (which runs in the context 96 * of the message originator) 97 * 98 * Our custom putport must check for self-referential messages, which can 99 * occur when the so_upcall routine is called (e.g. nfs). Self referential 100 * messages are executed synchronously. However, we must panic if the message 101 * is not marked DONE on completion because the self-referential case cannot 102 * block without deadlocking. 103 * 104 * note: ms_target_port does not need to be set when returning a synchronous 105 * error code. 106 */ 107 int 108 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg) 109 { 110 int error; 111 112 if ((lmsg->ms_flags & MSGF_ASYNC) == 0 && port->mp_td == curthread) { 113 error = lmsg->ms_cmd.cm_func(lmsg); 114 if (error == EASYNC && (lmsg->ms_flags & MSGF_DONE) == 0) 115 panic("netmsg_put_port: self-referential deadlock on netport"); 116 return(error); 117 } else { 118 return(lwkt_default_putport(port, lmsg)); 119 } 120 } 121 122 /* 123 * UNIX DOMAIN sockets still have to run their uipc functions synchronously, 124 * because they depend on the user proc context for a number of things 125 * (like creds) which we have not yet incorporated into the message structure. 126 * 127 * However, we maintain or message/port abstraction. Having a special 128 * synchronous port which runs the commands synchronously gives us the 129 * ability to serialize operations in one place later on when we start 130 * removing the BGL. 131 * 132 * We clear MSGF_DONE prior to executing the message in order to close 133 * any potential replymsg races with the flags field. If a synchronous 134 * result code is returned we set MSGF_DONE again. MSGF_DONE's flag state 135 * must be correct or the caller will be confused. 136 */ 137 static int 138 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg) 139 { 140 int error; 141 142 lmsg->ms_flags &= ~MSGF_DONE; 143 lmsg->ms_target_port = port; /* required for abort */ 144 error = lmsg->ms_cmd.cm_func(lmsg); 145 if (error == EASYNC) 146 error = lwkt_waitmsg(lmsg); 147 else 148 lmsg->ms_flags |= MSGF_DONE; 149 return(error); 150 } 151 152 static void 153 netmsg_sync_abortport(lwkt_port_t port, lwkt_msg_t lmsg) 154 { 155 lmsg->ms_abort_port = lmsg->ms_reply_port; 156 lmsg->ms_flags |= MSGF_ABORTED; 157 lmsg->ms_abort.cm_func(lmsg); 158 } 159 160 static void 161 netisr_init(void) 162 { 163 int i; 164 165 /* 166 * Create default per-cpu threads for generic protocol handling. 167 */ 168 for (i = 0; i < ncpus; ++i) { 169 lwkt_create(netmsg_service_loop, NULL, NULL, &netisr_cpu[i], 0, i, 170 "netisr_cpu %d", i); 171 netisr_cpu[i].td_msgport.mp_putport = netmsg_put_port; 172 } 173 174 /* 175 * The netisr_afree_rport is a special reply port which automatically 176 * frees the replied message. The netisr_adone_rport() simply marks 177 * the message as being done. 178 */ 179 lwkt_initport(&netisr_afree_rport, NULL); 180 netisr_afree_rport.mp_replyport = netisr_autofree_reply; 181 lwkt_initport_null_rport(&netisr_adone_rport, NULL); 182 183 /* 184 * The netisr_syncport is a special port which executes the message 185 * synchronously and waits for it if EASYNC is returned. 186 */ 187 lwkt_initport(&netisr_sync_port, NULL); 188 netisr_sync_port.mp_putport = netmsg_sync_putport; 189 netisr_sync_port.mp_abortport = netmsg_sync_abortport; 190 } 191 192 SYSINIT(netisr, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST, netisr_init, NULL); 193 194 void 195 netmsg_service_loop(void *arg) 196 { 197 struct netmsg *msg; 198 199 while ((msg = lwkt_waitport(&curthread->td_msgport, NULL))) { 200 msg->nm_lmsg.ms_cmd.cm_func(&msg->nm_lmsg); 201 } 202 } 203 204 /* 205 * Call the netisr directly. 206 * Queueing may be done in the msg port layer at its discretion. 207 */ 208 void 209 netisr_dispatch(int num, struct mbuf *m) 210 { 211 /* just queue it for now XXX JH */ 212 netisr_queue(num, m); 213 } 214 215 /* 216 * Same as netisr_dispatch(), but always queue. 217 * This is either used in places where we are not confident that 218 * direct dispatch is possible, or where queueing is required. 219 */ 220 int 221 netisr_queue(int num, struct mbuf *m) 222 { 223 struct netisr *ni; 224 struct netmsg_packet *pmsg; 225 lwkt_port_t port; 226 227 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 228 ("netisr_queue: bad isr %d", num)); 229 230 ni = &netisrs[num]; 231 if (ni->ni_handler == NULL) { 232 printf("netisr_queue: unregistered isr %d\n", num); 233 return (EIO); 234 } 235 236 if ((port = ni->ni_mport(&m)) == NULL) 237 return (EIO); 238 239 /* use better message allocation system with limits later XXX JH */ 240 pmsg = malloc(sizeof(struct netmsg_packet), M_LWKTMSG, M_WAITOK); 241 242 lwkt_initmsg(&pmsg->nm_lmsg, &netisr_afree_rport, 0, 243 lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none); 244 pmsg->nm_packet = m; 245 pmsg->nm_lmsg.u.ms_result = num; 246 lwkt_sendmsg(port, &pmsg->nm_lmsg); 247 return (0); 248 } 249 250 void 251 netisr_register(int num, lwkt_portfn_t mportfn, netisr_fn_t handler) 252 { 253 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 254 ("netisr_register: bad isr %d", num)); 255 lwkt_initmsg(&netisrs[num].ni_netmsg.nm_lmsg, &netisr_adone_rport, 0, 256 lwkt_cmd_op_none, lwkt_cmd_op_none); 257 netisrs[num].ni_mport = mportfn; 258 netisrs[num].ni_handler = handler; 259 } 260 261 int 262 netisr_unregister(int num) 263 { 264 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 265 ("unregister_netisr: bad isr number: %d\n", num)); 266 267 /* XXX JH */ 268 return (0); 269 } 270 271 /* 272 * Return message port for default handler thread on CPU 0. 273 */ 274 lwkt_port_t 275 cpu0_portfn(struct mbuf **mptr) 276 { 277 return (&netisr_cpu[0].td_msgport); 278 } 279 280 /* ARGSUSED */ 281 lwkt_port_t 282 cpu0_soport(struct socket *so __unused, struct sockaddr *nam __unused, 283 int req __unused) 284 { 285 return (&netisr_cpu[0].td_msgport); 286 } 287 288 lwkt_port_t 289 sync_soport(struct socket *so __unused, struct sockaddr *nam __unused, 290 int req __unused) 291 { 292 return (&netisr_sync_port); 293 } 294 295 /* 296 * schednetisr() is used to call the netisr handler from the appropriate 297 * netisr thread for polling and other purposes. 298 * 299 * This function may be called from a hard interrupt or IPI and must be 300 * MP SAFE and non-blocking. We use a fixed per-cpu message instead of 301 * trying to allocate one. We must get ourselves onto the target cpu 302 * to safely check the MSGF_DONE bit on the message but since the message 303 * will be sent to that cpu anyway this does not add any extra work beyond 304 * what lwkt_sendmsg() would have already had to do to schedule the target 305 * thread. 306 */ 307 static void 308 schednetisr_remote(void *data) 309 { 310 int num = (int)data; 311 struct netisr *ni = &netisrs[num]; 312 lwkt_port_t port = &netisr_cpu[0].td_msgport; 313 struct netmsg *pmsg; 314 315 pmsg = &netisrs[num].ni_netmsg; 316 crit_enter(); 317 if (pmsg->nm_lmsg.ms_flags & MSGF_DONE) { 318 lwkt_initmsg(&pmsg->nm_lmsg, &netisr_adone_rport, 0, 319 lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none); 320 pmsg->nm_lmsg.u.ms_result = num; 321 lwkt_sendmsg(port, &pmsg->nm_lmsg); 322 } 323 crit_exit(); 324 } 325 326 void 327 schednetisr(int num) 328 { 329 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 330 ("schednetisr: bad isr %d", num)); 331 #ifdef SMP 332 if (mycpu->gd_cpuid != 0) 333 lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)num); 334 else 335 schednetisr_remote((void *)num); 336 #else 337 schednetisr_remote((void *)num); 338 #endif 339 } 340 341