1 /* 2 * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved. 3 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 4 * Copyright (c) 2003 Jonathan Lemon. All rights reserved. 5 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon. 9 * 10 * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright 11 * into this one around July 8 2004. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $DragonFly: src/sys/net/netisr.c,v 1.49 2008/11/01 10:29:31 sephe Exp $ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/msgport.h> 46 #include <sys/proc.h> 47 #include <sys/interrupt.h> 48 #include <sys/socket.h> 49 #include <sys/sysctl.h> 50 #include <sys/socketvar.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/netisr.h> 54 #include <machine/cpufunc.h> 55 56 #include <sys/thread2.h> 57 #include <sys/msgport2.h> 58 #include <net/netmsg2.h> 59 #include <sys/mplock2.h> 60 61 static void netmsg_sync_func(netmsg_t msg); 62 static void netmsg_service_loop(void *arg); 63 static void cpu0_cpufn(struct mbuf **mp, int hoff); 64 65 struct netmsg_port_registration { 66 TAILQ_ENTRY(netmsg_port_registration) npr_entry; 67 lwkt_port_t npr_port; 68 }; 69 70 struct netmsg_rollup { 71 TAILQ_ENTRY(netmsg_rollup) ru_entry; 72 netisr_ru_t ru_func; 73 }; 74 75 static struct netisr netisrs[NETISR_MAX]; 76 static TAILQ_HEAD(,netmsg_port_registration) netreglist; 77 static TAILQ_HEAD(,netmsg_rollup) netrulist; 78 79 /* Per-CPU thread to handle any protocol. */ 80 static struct thread netisr_cpu[MAXCPU]; 81 lwkt_port netisr_afree_rport; 82 lwkt_port netisr_adone_rport; 83 lwkt_port netisr_apanic_rport; 84 lwkt_port netisr_sync_port; 85 86 static int (*netmsg_fwd_port_fn)(lwkt_port_t, lwkt_msg_t); 87 88 SYSCTL_NODE(_net, OID_AUTO, netisr, CTLFLAG_RW, 0, "netisr"); 89 90 /* 91 * netisr_afree_rport replymsg function, only used to handle async 92 * messages which the sender has abandoned to their fate. 93 */ 94 static void 95 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 96 { 97 kfree(msg, M_LWKTMSG); 98 } 99 100 /* 101 * We need a custom putport function to handle the case where the 102 * message target is the current thread's message port. This case 103 * can occur when the TCP or UDP stack does a direct callback to NFS and NFS 104 * then turns around and executes a network operation synchronously. 105 * 106 * To prevent deadlocking, we must execute these self-referential messages 107 * synchronously, effectively turning the message into a glorified direct 108 * procedure call back into the protocol stack. The operation must be 109 * complete on return or we will deadlock, so panic if it isn't. 110 * 111 * However, the target function is under no obligation to immediately 112 * reply the message. It may forward it elsewhere. 113 */ 114 static int 115 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg) 116 { 117 netmsg_base_t nmsg = (void *)lmsg; 118 119 if ((lmsg->ms_flags & MSGF_SYNC) && port == &curthread->td_msgport) { 120 nmsg->nm_dispatch((netmsg_t)nmsg); 121 return(EASYNC); 122 } else { 123 return(netmsg_fwd_port_fn(port, lmsg)); 124 } 125 } 126 127 /* 128 * UNIX DOMAIN sockets still have to run their uipc functions synchronously, 129 * because they depend on the user proc context for a number of things 130 * (like creds) which we have not yet incorporated into the message structure. 131 * 132 * However, we maintain or message/port abstraction. Having a special 133 * synchronous port which runs the commands synchronously gives us the 134 * ability to serialize operations in one place later on when we start 135 * removing the BGL. 136 */ 137 static int 138 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg) 139 { 140 netmsg_base_t nmsg = (void *)lmsg; 141 142 KKASSERT((lmsg->ms_flags & MSGF_DONE) == 0); 143 144 lmsg->ms_target_port = port; /* required for abort */ 145 nmsg->nm_dispatch((netmsg_t)nmsg); 146 return(EASYNC); 147 } 148 149 static void 150 netisr_init(void) 151 { 152 int i; 153 154 TAILQ_INIT(&netreglist); 155 TAILQ_INIT(&netrulist); 156 157 /* 158 * Create default per-cpu threads for generic protocol handling. 159 */ 160 for (i = 0; i < ncpus; ++i) { 161 lwkt_create(netmsg_service_loop, NULL, NULL, 162 &netisr_cpu[i], TDF_STOPREQ, i, 163 "netisr_cpu %d", i); 164 netmsg_service_port_init(&netisr_cpu[i].td_msgport); 165 lwkt_schedule(&netisr_cpu[i]); 166 } 167 168 /* 169 * The netisr_afree_rport is a special reply port which automatically 170 * frees the replied message. The netisr_adone_rport simply marks 171 * the message as being done. The netisr_apanic_rport panics if 172 * the message is replied to. 173 */ 174 lwkt_initport_replyonly(&netisr_afree_rport, netisr_autofree_reply); 175 lwkt_initport_replyonly_null(&netisr_adone_rport); 176 lwkt_initport_panic(&netisr_apanic_rport); 177 178 /* 179 * The netisr_syncport is a special port which executes the message 180 * synchronously and waits for it if EASYNC is returned. 181 */ 182 lwkt_initport_putonly(&netisr_sync_port, netmsg_sync_putport); 183 } 184 185 SYSINIT(netisr, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, netisr_init, NULL); 186 187 /* 188 * Finish initializing the message port for a netmsg service. This also 189 * registers the port for synchronous cleanup operations such as when an 190 * ifnet is being destroyed. There is no deregistration API yet. 191 */ 192 void 193 netmsg_service_port_init(lwkt_port_t port) 194 { 195 struct netmsg_port_registration *reg; 196 197 /* 198 * Override the putport function. Our custom function checks for 199 * self-references and executes such commands synchronously. 200 */ 201 if (netmsg_fwd_port_fn == NULL) 202 netmsg_fwd_port_fn = port->mp_putport; 203 KKASSERT(netmsg_fwd_port_fn == port->mp_putport); 204 port->mp_putport = netmsg_put_port; 205 206 /* 207 * Keep track of ports using the netmsg API so we can synchronize 208 * certain operations (such as freeing an ifnet structure) across all 209 * consumers. 210 */ 211 reg = kmalloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO); 212 reg->npr_port = port; 213 TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry); 214 } 215 216 /* 217 * This function synchronizes the caller with all netmsg services. For 218 * example, if an interface is being removed we must make sure that all 219 * packets related to that interface complete processing before the structure 220 * can actually be freed. This sort of synchronization is an alternative to 221 * ref-counting the netif, removing the ref counting overhead in favor of 222 * placing additional overhead in the netif freeing sequence (where it is 223 * inconsequential). 224 */ 225 void 226 netmsg_service_sync(void) 227 { 228 struct netmsg_port_registration *reg; 229 struct netmsg_base smsg; 230 231 netmsg_init(&smsg, NULL, &curthread->td_msgport, 0, netmsg_sync_func); 232 233 TAILQ_FOREACH(reg, &netreglist, npr_entry) { 234 lwkt_domsg(reg->npr_port, &smsg.lmsg, 0); 235 } 236 } 237 238 /* 239 * The netmsg function simply replies the message. API semantics require 240 * EASYNC to be returned if the netmsg function disposes of the message. 241 */ 242 static void 243 netmsg_sync_func(netmsg_t msg) 244 { 245 lwkt_replymsg(&msg->lmsg, 0); 246 } 247 248 /* 249 * Generic netmsg service loop. Some protocols may roll their own but all 250 * must do the basic command dispatch function call done here. 251 */ 252 static void 253 netmsg_service_loop(void *arg) 254 { 255 struct netmsg_rollup *ru; 256 netmsg_base_t msg; 257 thread_t td = curthread;; 258 int limit; 259 260 while ((msg = lwkt_waitport(&td->td_msgport, 0))) { 261 /* 262 * Run up to 512 pending netmsgs. 263 */ 264 limit = 512; 265 do { 266 KASSERT(msg->nm_dispatch != NULL, 267 ("netmsg_service isr %d badmsg\n", 268 msg->lmsg.u.ms_result)); 269 if (msg->nm_so && 270 msg->nm_so->so_port != &td->td_msgport) { 271 /* 272 * Sockets undergoing connect or disconnect 273 * ops can change ports on us. Chase the 274 * port. 275 */ 276 kprintf("netmsg_service_loop: Warning, " 277 "port changed so=%p\n", msg->nm_so); 278 lwkt_forwardmsg(msg->nm_so->so_port, 279 &msg->lmsg); 280 } else { 281 /* 282 * We are on the correct port, dispatch it. 283 */ 284 msg->nm_dispatch((netmsg_t)msg); 285 } 286 if (--limit == 0) 287 break; 288 } while ((msg = lwkt_getport(&td->td_msgport)) != NULL); 289 290 /* 291 * Run all registered rollup functions for this cpu 292 * (e.g. tcp_willblock()). 293 */ 294 TAILQ_FOREACH(ru, &netrulist, ru_entry) 295 ru->ru_func(); 296 } 297 } 298 299 /* 300 * Forward a packet to a netisr service function. 301 * 302 * If the packet has not been assigned to a protocol thread we call 303 * the port characterization function to assign it. The caller must 304 * clear M_HASH (or not have set it in the first place) if the caller 305 * wishes the packet to be recharacterized. 306 */ 307 int 308 netisr_queue(int num, struct mbuf *m) 309 { 310 struct netisr *ni; 311 struct netmsg_packet *pmsg; 312 lwkt_port_t port; 313 314 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 315 ("Bad isr %d", num)); 316 317 ni = &netisrs[num]; 318 if (ni->ni_handler == NULL) { 319 kprintf("Unregistered isr %d\n", num); 320 m_freem(m); 321 return (EIO); 322 } 323 324 /* 325 * Figure out which protocol thread to send to. This does not 326 * have to be perfect but performance will be really good if it 327 * is correct. Major protocol inputs such as ip_input() will 328 * re-characterize the packet as necessary. 329 */ 330 if ((m->m_flags & M_HASH) == 0) { 331 ni->ni_cpufn(&m, 0); 332 if (m == NULL) { 333 m_freem(m); 334 return (EIO); 335 } 336 if ((m->m_flags & M_HASH) == 0) { 337 kprintf("netisr_queue(%d): packet hash failed\n", num); 338 m_freem(m); 339 return (EIO); 340 } 341 } 342 343 /* 344 * Get the protocol port based on the packet hash, initialize 345 * the netmsg, and send it off. 346 */ 347 port = cpu_portfn(m->m_pkthdr.hash); 348 pmsg = &m->m_hdr.mh_netmsg; 349 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 350 0, ni->ni_handler); 351 pmsg->nm_packet = m; 352 pmsg->base.lmsg.u.ms_result = num; 353 lwkt_sendmsg(port, &pmsg->base.lmsg); 354 355 return (0); 356 } 357 358 /* 359 * Pre-characterization of a deeper portion of the packet for the 360 * requested isr. 361 * 362 * The base of the ISR type (e.g. IP) that we want to characterize is 363 * at (hoff) relative to the beginning of the mbuf. This allows 364 * e.g. ether_input_chain() to not have to adjust the m_data/m_len. 365 */ 366 void 367 netisr_characterize(int num, struct mbuf **mp, int hoff) 368 { 369 struct netisr *ni; 370 struct mbuf *m; 371 372 /* 373 * Validation 374 */ 375 m = *mp; 376 KKASSERT(m != NULL); 377 378 if (num < 0 || num >= NETISR_MAX) { 379 if (num == NETISR_MAX) { 380 m->m_flags |= M_HASH; 381 m->m_pkthdr.hash = 0; 382 return; 383 } 384 panic("Bad isr %d", num); 385 } 386 387 /* 388 * Valid netisr? 389 */ 390 ni = &netisrs[num]; 391 if (ni->ni_handler == NULL) { 392 kprintf("Unregistered isr %d\n", num); 393 m_freem(m); 394 *mp = NULL; 395 } 396 397 /* 398 * Characterize the packet 399 */ 400 if ((m->m_flags & M_HASH) == 0) { 401 ni->ni_cpufn(mp, hoff); 402 m = *mp; 403 if (m && (m->m_flags & M_HASH) == 0) 404 kprintf("netisr_queue(%d): packet hash failed\n", num); 405 } 406 } 407 408 void 409 netisr_register(int num, netisr_fn_t handler, netisr_cpufn_t cpufn) 410 { 411 struct netisr *ni; 412 413 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 414 ("netisr_register: bad isr %d", num)); 415 KKASSERT(handler != NULL); 416 417 if (cpufn == NULL) 418 cpufn = cpu0_cpufn; 419 420 ni = &netisrs[num]; 421 422 ni->ni_handler = handler; 423 ni->ni_cpufn = cpufn; 424 netmsg_init(&ni->ni_netmsg, NULL, &netisr_adone_rport, 0, NULL); 425 } 426 427 void 428 netisr_register_rollup(netisr_ru_t ru_func) 429 { 430 struct netmsg_rollup *ru; 431 432 ru = kmalloc(sizeof(*ru), M_TEMP, M_WAITOK|M_ZERO); 433 ru->ru_func = ru_func; 434 TAILQ_INSERT_TAIL(&netrulist, ru, ru_entry); 435 } 436 437 /* 438 * Return the message port for the general protocol message servicing 439 * thread for a particular cpu. 440 */ 441 lwkt_port_t 442 cpu_portfn(int cpu) 443 { 444 KKASSERT(cpu >= 0 && cpu < ncpus); 445 return (&netisr_cpu[cpu].td_msgport); 446 } 447 448 /* 449 * Return the current cpu's network protocol thread. 450 */ 451 lwkt_port_t 452 cur_netport(void) 453 { 454 return(cpu_portfn(mycpu->gd_cpuid)); 455 } 456 457 /* 458 * Return a default protocol control message processing thread port 459 */ 460 lwkt_port_t 461 cpu0_ctlport(int cmd __unused, struct sockaddr *sa __unused, 462 void *extra __unused) 463 { 464 return (&netisr_cpu[0].td_msgport); 465 } 466 467 /* 468 * This is a default netisr packet characterization function which 469 * sets M_HASH. If a netisr is registered with a NULL cpufn function 470 * this one is assigned. 471 * 472 * This function makes no attempt to validate the packet. 473 */ 474 static void 475 cpu0_cpufn(struct mbuf **mp, int hoff __unused) 476 { 477 struct mbuf *m = *mp; 478 479 m->m_flags |= M_HASH; 480 m->m_pkthdr.hash = 0; 481 } 482 483 /* 484 * schednetisr() is used to call the netisr handler from the appropriate 485 * netisr thread for polling and other purposes. 486 * 487 * This function may be called from a hard interrupt or IPI and must be 488 * MP SAFE and non-blocking. We use a fixed per-cpu message instead of 489 * trying to allocate one. We must get ourselves onto the target cpu 490 * to safely check the MSGF_DONE bit on the message but since the message 491 * will be sent to that cpu anyway this does not add any extra work beyond 492 * what lwkt_sendmsg() would have already had to do to schedule the target 493 * thread. 494 */ 495 static void 496 schednetisr_remote(void *data) 497 { 498 int num = (int)(intptr_t)data; 499 struct netisr *ni = &netisrs[num]; 500 lwkt_port_t port = &netisr_cpu[0].td_msgport; 501 netmsg_base_t pmsg; 502 503 pmsg = &netisrs[num].ni_netmsg; 504 if (pmsg->lmsg.ms_flags & MSGF_DONE) { 505 netmsg_init(pmsg, NULL, &netisr_adone_rport, 0, ni->ni_handler); 506 pmsg->lmsg.u.ms_result = num; 507 lwkt_sendmsg(port, &pmsg->lmsg); 508 } 509 } 510 511 void 512 schednetisr(int num) 513 { 514 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 515 ("schednetisr: bad isr %d", num)); 516 KKASSERT(netisrs[num].ni_handler != NULL); 517 #ifdef SMP 518 if (mycpu->gd_cpuid != 0) { 519 lwkt_send_ipiq(globaldata_find(0), 520 schednetisr_remote, (void *)(intptr_t)num); 521 } else { 522 crit_enter(); 523 schednetisr_remote((void *)(intptr_t)num); 524 crit_exit(); 525 } 526 #else 527 crit_enter(); 528 schednetisr_remote((void *)(intptr_t)num); 529 crit_exit(); 530 #endif 531 } 532