1 /* 2 * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved. 3 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 4 * Copyright (c) 2003 Jonathan Lemon. All rights reserved. 5 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon. 9 * 10 * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright 11 * into this one around July 8 2004. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/msgport.h> 44 #include <sys/proc.h> 45 #include <sys/interrupt.h> 46 #include <sys/socket.h> 47 #include <sys/sysctl.h> 48 #include <sys/socketvar.h> 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <net/netisr2.h> 52 #include <machine/cpufunc.h> 53 #include <machine/smp.h> 54 55 #include <sys/thread2.h> 56 #include <sys/msgport2.h> 57 #include <net/netmsg2.h> 58 #include <sys/mplock2.h> 59 60 static void netmsg_service_port_init(lwkt_port_t); 61 static void netmsg_service_loop(void *arg); 62 static void netisr_hashfn0(struct mbuf **mp, int hoff); 63 static void netisr_nohashck(struct mbuf *, const struct pktinfo *); 64 65 struct netmsg_port_registration { 66 TAILQ_ENTRY(netmsg_port_registration) npr_entry; 67 lwkt_port_t npr_port; 68 }; 69 70 struct netmsg_rollup { 71 TAILQ_ENTRY(netmsg_rollup) ru_entry; 72 netisr_ru_t ru_func; 73 int ru_prio; 74 }; 75 76 struct netmsg_barrier { 77 struct netmsg_base base; 78 volatile cpumask_t *br_cpumask; 79 volatile uint32_t br_done; 80 }; 81 82 #define NETISR_BR_NOTDONE 0x1 83 #define NETISR_BR_WAITDONE 0x80000000 84 85 struct netisr_barrier { 86 struct netmsg_barrier *br_msgs[MAXCPU]; 87 int br_isset; 88 }; 89 90 void *netlastfunc[MAXCPU]; 91 static struct netisr netisrs[NETISR_MAX]; 92 static TAILQ_HEAD(,netmsg_port_registration) netreglist; 93 static TAILQ_HEAD(,netmsg_rollup) netrulist; 94 95 /* Per-CPU thread to handle any protocol. */ 96 struct thread *netisr_threads[MAXCPU]; 97 98 lwkt_port netisr_afree_rport; 99 lwkt_port netisr_afree_free_so_rport; 100 lwkt_port netisr_adone_rport; 101 lwkt_port netisr_apanic_rport; 102 lwkt_port netisr_sync_port; 103 104 static int (*netmsg_fwd_port_fn)(lwkt_port_t, lwkt_msg_t); 105 106 SYSCTL_NODE(_net, OID_AUTO, netisr, CTLFLAG_RW, 0, "netisr"); 107 108 static int netisr_rollup_limit = 32; 109 SYSCTL_INT(_net_netisr, OID_AUTO, rollup_limit, CTLFLAG_RW, 110 &netisr_rollup_limit, 0, "Message to process before rollup"); 111 112 int netisr_ncpus; 113 SYSCTL_INT(_net_netisr, OID_AUTO, ncpus, CTLFLAG_RD, 114 &netisr_ncpus, 0, "# of CPUs to handle network messages"); 115 116 /* 117 * netisr_afree_rport replymsg function, only used to handle async 118 * messages which the sender has abandoned to their fate. 119 */ 120 static void 121 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 122 { 123 kfree(msg, M_LWKTMSG); 124 } 125 126 static void 127 netisr_autofree_free_so_reply(lwkt_port_t port, lwkt_msg_t msg) 128 { 129 sofree(((netmsg_t)msg)->base.nm_so); 130 kfree(msg, M_LWKTMSG); 131 } 132 133 /* 134 * We need a custom putport function to handle the case where the 135 * message target is the current thread's message port. This case 136 * can occur when the TCP or UDP stack does a direct callback to NFS and NFS 137 * then turns around and executes a network operation synchronously. 138 * 139 * To prevent deadlocking, we must execute these self-referential messages 140 * synchronously, effectively turning the message into a glorified direct 141 * procedure call back into the protocol stack. The operation must be 142 * complete on return or we will deadlock, so panic if it isn't. 143 * 144 * However, the target function is under no obligation to immediately 145 * reply the message. It may forward it elsewhere. 146 */ 147 static int 148 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg) 149 { 150 netmsg_base_t nmsg = (void *)lmsg; 151 152 if ((lmsg->ms_flags & MSGF_SYNC) && port == &curthread->td_msgport) { 153 nmsg->nm_dispatch((netmsg_t)nmsg); 154 return(EASYNC); 155 } else { 156 return(netmsg_fwd_port_fn(port, lmsg)); 157 } 158 } 159 160 /* 161 * UNIX DOMAIN sockets still have to run their uipc functions synchronously, 162 * because they depend on the user proc context for a number of things 163 * (like creds) which we have not yet incorporated into the message structure. 164 * 165 * However, we maintain or message/port abstraction. Having a special 166 * synchronous port which runs the commands synchronously gives us the 167 * ability to serialize operations in one place later on when we start 168 * removing the BGL. 169 */ 170 static int 171 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg) 172 { 173 netmsg_base_t nmsg = (void *)lmsg; 174 175 KKASSERT((lmsg->ms_flags & MSGF_DONE) == 0); 176 177 lmsg->ms_target_port = port; /* required for abort */ 178 nmsg->nm_dispatch((netmsg_t)nmsg); 179 return(EASYNC); 180 } 181 182 static void 183 netisr_init(void) 184 { 185 int i; 186 187 netisr_ncpus = ncpus2; 188 189 TAILQ_INIT(&netreglist); 190 TAILQ_INIT(&netrulist); 191 192 /* 193 * Create default per-cpu threads for generic protocol handling. 194 */ 195 for (i = 0; i < ncpus; ++i) { 196 lwkt_create(netmsg_service_loop, NULL, &netisr_threads[i], NULL, 197 TDF_NOSTART|TDF_FORCE_SPINPORT|TDF_FIXEDCPU, 198 i, "netisr %d", i); 199 netmsg_service_port_init(&netisr_threads[i]->td_msgport); 200 lwkt_schedule(netisr_threads[i]); 201 } 202 203 /* 204 * The netisr_afree_rport is a special reply port which automatically 205 * frees the replied message. The netisr_adone_rport simply marks 206 * the message as being done. The netisr_apanic_rport panics if 207 * the message is replied to. 208 */ 209 lwkt_initport_replyonly(&netisr_afree_rport, netisr_autofree_reply); 210 lwkt_initport_replyonly(&netisr_afree_free_so_rport, 211 netisr_autofree_free_so_reply); 212 lwkt_initport_replyonly_null(&netisr_adone_rport); 213 lwkt_initport_panic(&netisr_apanic_rport); 214 215 /* 216 * The netisr_syncport is a special port which executes the message 217 * synchronously and waits for it if EASYNC is returned. 218 */ 219 lwkt_initport_putonly(&netisr_sync_port, netmsg_sync_putport); 220 } 221 SYSINIT(netisr, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, netisr_init, NULL); 222 223 /* 224 * Finish initializing the message port for a netmsg service. This also 225 * registers the port for synchronous cleanup operations such as when an 226 * ifnet is being destroyed. There is no deregistration API yet. 227 */ 228 static void 229 netmsg_service_port_init(lwkt_port_t port) 230 { 231 struct netmsg_port_registration *reg; 232 233 /* 234 * Override the putport function. Our custom function checks for 235 * self-references and executes such commands synchronously. 236 */ 237 if (netmsg_fwd_port_fn == NULL) 238 netmsg_fwd_port_fn = port->mp_putport; 239 KKASSERT(netmsg_fwd_port_fn == port->mp_putport); 240 port->mp_putport = netmsg_put_port; 241 242 /* 243 * Keep track of ports using the netmsg API so we can synchronize 244 * certain operations (such as freeing an ifnet structure) across all 245 * consumers. 246 */ 247 reg = kmalloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO); 248 reg->npr_port = port; 249 TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry); 250 } 251 252 /* 253 * This function synchronizes the caller with all netmsg services. For 254 * example, if an interface is being removed we must make sure that all 255 * packets related to that interface complete processing before the structure 256 * can actually be freed. This sort of synchronization is an alternative to 257 * ref-counting the netif, removing the ref counting overhead in favor of 258 * placing additional overhead in the netif freeing sequence (where it is 259 * inconsequential). 260 */ 261 void 262 netmsg_service_sync(void) 263 { 264 struct netmsg_port_registration *reg; 265 struct netmsg_base smsg; 266 267 netmsg_init(&smsg, NULL, &curthread->td_msgport, 0, netmsg_sync_handler); 268 269 TAILQ_FOREACH(reg, &netreglist, npr_entry) { 270 lwkt_domsg(reg->npr_port, &smsg.lmsg, 0); 271 } 272 } 273 274 /* 275 * The netmsg function simply replies the message. API semantics require 276 * EASYNC to be returned if the netmsg function disposes of the message. 277 */ 278 void 279 netmsg_sync_handler(netmsg_t msg) 280 { 281 lwkt_replymsg(&msg->lmsg, 0); 282 } 283 284 /* 285 * Generic netmsg service loop. Some protocols may roll their own but all 286 * must do the basic command dispatch function call done here. 287 */ 288 static void 289 netmsg_service_loop(void *arg) 290 { 291 struct netmsg_rollup *ru; 292 netmsg_base_t msg; 293 thread_t td = curthread; 294 int limit; 295 296 td->td_type = TD_TYPE_NETISR; 297 298 while ((msg = lwkt_waitport(&td->td_msgport, 0))) { 299 /* 300 * Run up to 512 pending netmsgs. 301 */ 302 limit = netisr_rollup_limit; 303 do { 304 KASSERT(msg->nm_dispatch != NULL, 305 ("netmsg_service isr %d badmsg", 306 msg->lmsg.u.ms_result)); 307 /* 308 * Don't match so_port, if the msg explicitly 309 * asks us to ignore its so_port. 310 */ 311 if ((msg->lmsg.ms_flags & MSGF_IGNSOPORT) == 0 && 312 msg->nm_so && 313 msg->nm_so->so_port != &td->td_msgport) { 314 /* 315 * Sockets undergoing connect or disconnect 316 * ops can change ports on us. Chase the 317 * port. 318 */ 319 #ifdef foo 320 /* 321 * This could be quite common for protocols 322 * which support asynchronous pru_connect, 323 * e.g. TCP, so kprintf socket port chasing 324 * could be too verbose for the console. 325 */ 326 kprintf("%s: Warning, port changed so=%p\n", 327 __func__, msg->nm_so); 328 #endif 329 lwkt_forwardmsg(msg->nm_so->so_port, 330 &msg->lmsg); 331 } else { 332 /* 333 * We are on the correct port, dispatch it. 334 */ 335 netlastfunc[mycpuid] = msg->nm_dispatch; 336 msg->nm_dispatch((netmsg_t)msg); 337 } 338 if (--limit == 0) 339 break; 340 } while ((msg = lwkt_getport(&td->td_msgport)) != NULL); 341 342 /* 343 * Run all registered rollup functions for this cpu 344 * (e.g. tcp_willblock()). 345 */ 346 TAILQ_FOREACH(ru, &netrulist, ru_entry) 347 ru->ru_func(); 348 } 349 } 350 351 /* 352 * Forward a packet to a netisr service function. 353 * 354 * If the packet has not been assigned to a protocol thread we call 355 * the port characterization function to assign it. The caller must 356 * clear M_HASH (or not have set it in the first place) if the caller 357 * wishes the packet to be recharacterized. 358 */ 359 int 360 netisr_queue(int num, struct mbuf *m) 361 { 362 struct netisr *ni; 363 struct netmsg_packet *pmsg; 364 lwkt_port_t port; 365 366 KASSERT((num > 0 && num <= NELEM(netisrs)), 367 ("Bad isr %d", num)); 368 369 ni = &netisrs[num]; 370 if (ni->ni_handler == NULL) { 371 kprintf("%s: Unregistered isr %d\n", __func__, num); 372 m_freem(m); 373 return (EIO); 374 } 375 376 /* 377 * Figure out which protocol thread to send to. This does not 378 * have to be perfect but performance will be really good if it 379 * is correct. Major protocol inputs such as ip_input() will 380 * re-characterize the packet as necessary. 381 */ 382 if ((m->m_flags & M_HASH) == 0) { 383 ni->ni_hashfn(&m, 0); 384 if (m == NULL) 385 return (EIO); 386 if ((m->m_flags & M_HASH) == 0) { 387 kprintf("%s(%d): packet hash failed\n", 388 __func__, num); 389 m_freem(m); 390 return (EIO); 391 } 392 } 393 394 /* 395 * Get the protocol port based on the packet hash, initialize 396 * the netmsg, and send it off. 397 */ 398 port = netisr_hashport(m->m_pkthdr.hash); 399 pmsg = &m->m_hdr.mh_netmsg; 400 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 401 0, ni->ni_handler); 402 pmsg->nm_packet = m; 403 pmsg->base.lmsg.u.ms_result = num; 404 lwkt_sendmsg(port, &pmsg->base.lmsg); 405 406 return (0); 407 } 408 409 /* 410 * Run a netisr service function on the packet. 411 * 412 * The packet must have been correctly characterized! 413 */ 414 int 415 netisr_handle(int num, struct mbuf *m) 416 { 417 struct netisr *ni; 418 struct netmsg_packet *pmsg; 419 lwkt_port_t port; 420 421 /* 422 * Get the protocol port based on the packet hash 423 */ 424 KASSERT((m->m_flags & M_HASH), ("packet not characterized")); 425 port = netisr_hashport(m->m_pkthdr.hash); 426 KASSERT(&curthread->td_msgport == port, ("wrong msgport")); 427 428 KASSERT((num > 0 && num <= NELEM(netisrs)), ("bad isr %d", num)); 429 ni = &netisrs[num]; 430 if (ni->ni_handler == NULL) { 431 kprintf("%s: unregistered isr %d\n", __func__, num); 432 m_freem(m); 433 return EIO; 434 } 435 436 /* 437 * Initialize the netmsg, and run the handler directly. 438 */ 439 pmsg = &m->m_hdr.mh_netmsg; 440 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 441 0, ni->ni_handler); 442 pmsg->nm_packet = m; 443 pmsg->base.lmsg.u.ms_result = num; 444 ni->ni_handler((netmsg_t)&pmsg->base); 445 446 return 0; 447 } 448 449 /* 450 * Pre-characterization of a deeper portion of the packet for the 451 * requested isr. 452 * 453 * The base of the ISR type (e.g. IP) that we want to characterize is 454 * at (hoff) relative to the beginning of the mbuf. This allows 455 * e.g. ether_characterize() to not have to adjust the m_data/m_len. 456 */ 457 void 458 netisr_characterize(int num, struct mbuf **mp, int hoff) 459 { 460 struct netisr *ni; 461 struct mbuf *m; 462 463 /* 464 * Validation 465 */ 466 m = *mp; 467 KKASSERT(m != NULL); 468 469 if (num < 0 || num >= NETISR_MAX) { 470 if (num == NETISR_MAX) { 471 m_sethash(m, 0); 472 return; 473 } 474 panic("Bad isr %d", num); 475 } 476 477 /* 478 * Valid netisr? 479 */ 480 ni = &netisrs[num]; 481 if (ni->ni_handler == NULL) { 482 kprintf("%s: Unregistered isr %d\n", __func__, num); 483 m_freem(m); 484 *mp = NULL; 485 } 486 487 /* 488 * Characterize the packet 489 */ 490 if ((m->m_flags & M_HASH) == 0) { 491 ni->ni_hashfn(mp, hoff); 492 m = *mp; 493 if (m && (m->m_flags & M_HASH) == 0) { 494 kprintf("%s(%d): packet hash failed\n", 495 __func__, num); 496 } 497 } 498 } 499 500 void 501 netisr_register(int num, netisr_fn_t handler, netisr_hashfn_t hashfn) 502 { 503 struct netisr *ni; 504 505 KASSERT((num > 0 && num <= NELEM(netisrs)), 506 ("netisr_register: bad isr %d", num)); 507 KKASSERT(handler != NULL); 508 509 if (hashfn == NULL) 510 hashfn = netisr_hashfn0; 511 512 ni = &netisrs[num]; 513 514 ni->ni_handler = handler; 515 ni->ni_hashck = netisr_nohashck; 516 ni->ni_hashfn = hashfn; 517 netmsg_init(&ni->ni_netmsg, NULL, &netisr_adone_rport, 0, NULL); 518 } 519 520 void 521 netisr_register_hashcheck(int num, netisr_hashck_t hashck) 522 { 523 struct netisr *ni; 524 525 KASSERT((num > 0 && num <= NELEM(netisrs)), 526 ("netisr_register: bad isr %d", num)); 527 528 ni = &netisrs[num]; 529 ni->ni_hashck = hashck; 530 } 531 532 void 533 netisr_register_rollup(netisr_ru_t ru_func, int prio) 534 { 535 struct netmsg_rollup *new_ru, *ru; 536 537 new_ru = kmalloc(sizeof(*new_ru), M_TEMP, M_WAITOK|M_ZERO); 538 new_ru->ru_func = ru_func; 539 new_ru->ru_prio = prio; 540 541 /* 542 * Higher priority "rollup" appears first 543 */ 544 TAILQ_FOREACH(ru, &netrulist, ru_entry) { 545 if (ru->ru_prio < new_ru->ru_prio) { 546 TAILQ_INSERT_BEFORE(ru, new_ru, ru_entry); 547 return; 548 } 549 } 550 TAILQ_INSERT_TAIL(&netrulist, new_ru, ru_entry); 551 } 552 553 /* 554 * Return a default protocol control message processing thread port 555 */ 556 lwkt_port_t 557 cpu0_ctlport(int cmd __unused, struct sockaddr *sa __unused, 558 void *extra __unused, int *cpuid) 559 { 560 *cpuid = 0; 561 return netisr_cpuport(*cpuid); 562 } 563 564 /* 565 * This is a default netisr packet characterization function which 566 * sets M_HASH. If a netisr is registered with a NULL hashfn function 567 * this one is assigned. 568 * 569 * This function makes no attempt to validate the packet. 570 */ 571 static void 572 netisr_hashfn0(struct mbuf **mp, int hoff __unused) 573 { 574 575 m_sethash(*mp, 0); 576 } 577 578 /* 579 * schednetisr() is used to call the netisr handler from the appropriate 580 * netisr thread for polling and other purposes. 581 * 582 * This function may be called from a hard interrupt or IPI and must be 583 * MP SAFE and non-blocking. We use a fixed per-cpu message instead of 584 * trying to allocate one. We must get ourselves onto the target cpu 585 * to safely check the MSGF_DONE bit on the message but since the message 586 * will be sent to that cpu anyway this does not add any extra work beyond 587 * what lwkt_sendmsg() would have already had to do to schedule the target 588 * thread. 589 */ 590 static void 591 schednetisr_remote(void *data) 592 { 593 int num = (int)(intptr_t)data; 594 struct netisr *ni = &netisrs[num]; 595 lwkt_port_t port = &netisr_threads[0]->td_msgport; 596 netmsg_base_t pmsg; 597 598 pmsg = &netisrs[num].ni_netmsg; 599 if (pmsg->lmsg.ms_flags & MSGF_DONE) { 600 netmsg_init(pmsg, NULL, &netisr_adone_rport, 0, ni->ni_handler); 601 pmsg->lmsg.u.ms_result = num; 602 lwkt_sendmsg(port, &pmsg->lmsg); 603 } 604 } 605 606 void 607 schednetisr(int num) 608 { 609 KASSERT((num > 0 && num <= NELEM(netisrs)), 610 ("schednetisr: bad isr %d", num)); 611 KKASSERT(netisrs[num].ni_handler != NULL); 612 if (mycpu->gd_cpuid != 0) { 613 lwkt_send_ipiq(globaldata_find(0), 614 schednetisr_remote, (void *)(intptr_t)num); 615 } else { 616 crit_enter(); 617 schednetisr_remote((void *)(intptr_t)num); 618 crit_exit(); 619 } 620 } 621 622 static void 623 netisr_barrier_dispatch(netmsg_t nmsg) 624 { 625 struct netmsg_barrier *msg = (struct netmsg_barrier *)nmsg; 626 627 ATOMIC_CPUMASK_NANDBIT(*msg->br_cpumask, mycpu->gd_cpuid); 628 if (CPUMASK_TESTZERO(*msg->br_cpumask)) 629 wakeup(msg->br_cpumask); 630 631 for (;;) { 632 uint32_t done = msg->br_done; 633 634 cpu_ccfence(); 635 if ((done & NETISR_BR_NOTDONE) == 0) 636 break; 637 638 tsleep_interlock(&msg->br_done, 0); 639 if (atomic_cmpset_int(&msg->br_done, 640 done, done | NETISR_BR_WAITDONE)) 641 tsleep(&msg->br_done, PINTERLOCKED, "nbrdsp", 0); 642 } 643 644 lwkt_replymsg(&nmsg->lmsg, 0); 645 } 646 647 struct netisr_barrier * 648 netisr_barrier_create(void) 649 { 650 struct netisr_barrier *br; 651 652 br = kmalloc(sizeof(*br), M_LWKTMSG, M_WAITOK | M_ZERO); 653 return br; 654 } 655 656 void 657 netisr_barrier_set(struct netisr_barrier *br) 658 { 659 volatile cpumask_t other_cpumask; 660 int i, cur_cpuid; 661 662 ASSERT_IN_NETISR(0); 663 KKASSERT(!br->br_isset); 664 665 other_cpumask = mycpu->gd_other_cpus; 666 CPUMASK_ANDMASK(other_cpumask, smp_active_mask); 667 cur_cpuid = mycpuid; 668 669 for (i = 0; i < ncpus; ++i) { 670 struct netmsg_barrier *msg; 671 672 if (i == cur_cpuid) 673 continue; 674 675 msg = kmalloc(sizeof(struct netmsg_barrier), 676 M_LWKTMSG, M_WAITOK); 677 678 /* 679 * Don't use priority message here; mainly to keep 680 * it ordered w/ the previous data packets sent by 681 * the caller. 682 */ 683 netmsg_init(&msg->base, NULL, &netisr_afree_rport, 0, 684 netisr_barrier_dispatch); 685 msg->br_cpumask = &other_cpumask; 686 msg->br_done = NETISR_BR_NOTDONE; 687 688 KKASSERT(br->br_msgs[i] == NULL); 689 br->br_msgs[i] = msg; 690 } 691 692 for (i = 0; i < ncpus; ++i) { 693 if (i == cur_cpuid) 694 continue; 695 lwkt_sendmsg(netisr_cpuport(i), &br->br_msgs[i]->base.lmsg); 696 } 697 698 while (CPUMASK_TESTNZERO(other_cpumask)) { 699 tsleep_interlock(&other_cpumask, 0); 700 if (CPUMASK_TESTNZERO(other_cpumask)) 701 tsleep(&other_cpumask, PINTERLOCKED, "nbrset", 0); 702 } 703 br->br_isset = 1; 704 } 705 706 void 707 netisr_barrier_rem(struct netisr_barrier *br) 708 { 709 int i, cur_cpuid; 710 711 ASSERT_IN_NETISR(0); 712 KKASSERT(br->br_isset); 713 714 cur_cpuid = mycpuid; 715 for (i = 0; i < ncpus; ++i) { 716 struct netmsg_barrier *msg = br->br_msgs[i]; 717 uint32_t done; 718 719 msg = br->br_msgs[i]; 720 br->br_msgs[i] = NULL; 721 722 if (i == cur_cpuid) 723 continue; 724 725 done = atomic_swap_int(&msg->br_done, 0); 726 if (done & NETISR_BR_WAITDONE) 727 wakeup(&msg->br_done); 728 } 729 br->br_isset = 0; 730 } 731 732 static void 733 netisr_nohashck(struct mbuf *m, const struct pktinfo *pi __unused) 734 { 735 m->m_flags &= ~M_HASH; 736 } 737 738 void 739 netisr_hashcheck(int num, struct mbuf *m, const struct pktinfo *pi) 740 { 741 struct netisr *ni; 742 743 if (num < 0 || num >= NETISR_MAX) 744 panic("Bad isr %d", num); 745 746 /* 747 * Valid netisr? 748 */ 749 ni = &netisrs[num]; 750 if (ni->ni_handler == NULL) 751 panic("Unregistered isr %d", num); 752 753 ni->ni_hashck(m, pi); 754 } 755