1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 #include "opt_ifpoll.h" 36 37 #include <sys/param.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/socketops.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/mutex.h> 50 #include <sys/sockio.h> 51 #include <sys/syslog.h> 52 #include <sys/sysctl.h> 53 #include <sys/domain.h> 54 #include <sys/thread.h> 55 #include <sys/serialize.h> 56 #include <sys/bus.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/mutex2.h> 61 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_var.h> 67 #include <net/if_ringmap.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 struct netmsg_ifaddr { 91 struct netmsg_base base; 92 struct ifaddr *ifa; 93 struct ifnet *ifp; 94 int tail; 95 }; 96 97 struct ifsubq_stage_head { 98 TAILQ_HEAD(, ifsubq_stage) stg_head; 99 } __cachealign; 100 101 struct if_ringmap { 102 int rm_cnt; 103 int rm_grid; 104 int rm_cpumap[]; 105 }; 106 107 /* 108 * System initialization 109 */ 110 static void if_attachdomain(void *); 111 static void if_attachdomain1(struct ifnet *); 112 static int ifconf(u_long, caddr_t, struct ucred *); 113 static void ifinit(void *); 114 static void ifnetinit(void *); 115 static void if_slowtimo(void *); 116 static void link_rtrequest(int, struct rtentry *); 117 static int if_rtdel(struct radix_node *, void *); 118 static void if_slowtimo_dispatch(netmsg_t); 119 120 /* Helper functions */ 121 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 122 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 123 static struct ifnet_array *ifnet_array_alloc(int); 124 static void ifnet_array_free(struct ifnet_array *); 125 static struct ifnet_array *ifnet_array_add(struct ifnet *, 126 const struct ifnet_array *); 127 static struct ifnet_array *ifnet_array_del(struct ifnet *, 128 const struct ifnet_array *); 129 130 #ifdef INET6 131 /* 132 * XXX: declare here to avoid to include many inet6 related files.. 133 * should be more generalized? 134 */ 135 extern void nd6_setmtu(struct ifnet *); 136 #endif 137 138 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 139 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 140 SYSCTL_NODE(_net_link, OID_AUTO, ringmap, CTLFLAG_RW, 0, "link ringmap"); 141 142 static int ifsq_stage_cntmax = 4; 143 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 144 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 145 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 146 147 static int if_stats_compat = 0; 148 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 149 &if_stats_compat, 0, "Compat the old ifnet stats"); 150 151 static int if_ringmap_dumprdr = 0; 152 SYSCTL_INT(_net_link_ringmap, OID_AUTO, dump_rdr, CTLFLAG_RW, 153 &if_ringmap_dumprdr, 0, "dump redirect table"); 154 155 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 156 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifnetinit, NULL); 157 158 static if_com_alloc_t *if_com_alloc[256]; 159 static if_com_free_t *if_com_free[256]; 160 161 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 162 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 163 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 164 165 int ifqmaxlen = IFQ_MAXLEN; 166 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 167 168 static struct ifnet_array ifnet_array0; 169 static struct ifnet_array *ifnet_array = &ifnet_array0; 170 171 static struct callout if_slowtimo_timer; 172 static struct netmsg_base if_slowtimo_netmsg; 173 174 int if_index = 0; 175 struct ifnet **ifindex2ifnet = NULL; 176 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 177 178 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 179 180 #ifdef notyet 181 #define IFQ_KTR_STRING "ifq=%p" 182 #define IFQ_KTR_ARGS struct ifaltq *ifq 183 #ifndef KTR_IFQ 184 #define KTR_IFQ KTR_ALL 185 #endif 186 KTR_INFO_MASTER(ifq); 187 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 188 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 189 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 190 191 #define IF_START_KTR_STRING "ifp=%p" 192 #define IF_START_KTR_ARGS struct ifnet *ifp 193 #ifndef KTR_IF_START 194 #define KTR_IF_START KTR_ALL 195 #endif 196 KTR_INFO_MASTER(if_start); 197 KTR_INFO(KTR_IF_START, if_start, run, 0, 198 IF_START_KTR_STRING, IF_START_KTR_ARGS); 199 KTR_INFO(KTR_IF_START, if_start, sched, 1, 200 IF_START_KTR_STRING, IF_START_KTR_ARGS); 201 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 202 IF_START_KTR_STRING, IF_START_KTR_ARGS); 203 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 204 IF_START_KTR_STRING, IF_START_KTR_ARGS); 205 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 206 IF_START_KTR_STRING, IF_START_KTR_ARGS); 207 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 208 #endif 209 210 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 211 212 /* 213 * Network interface utility routines. 214 * 215 * Routines with ifa_ifwith* names take sockaddr *'s as 216 * parameters. 217 */ 218 /* ARGSUSED*/ 219 static void 220 ifinit(void *dummy) 221 { 222 struct ifnet *ifp; 223 224 callout_init_mp(&if_slowtimo_timer); 225 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 226 MSGF_PRIORITY, if_slowtimo_dispatch); 227 228 /* XXX is this necessary? */ 229 ifnet_lock(); 230 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 231 if (ifp->if_snd.altq_maxlen == 0) { 232 if_printf(ifp, "XXX: driver didn't set altq_maxlen\n"); 233 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 234 } 235 } 236 ifnet_unlock(); 237 238 /* Start if_slowtimo */ 239 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 240 } 241 242 static void 243 ifsq_ifstart_ipifunc(void *arg) 244 { 245 struct ifaltq_subque *ifsq = arg; 246 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 247 248 crit_enter(); 249 if (lmsg->ms_flags & MSGF_DONE) 250 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 251 crit_exit(); 252 } 253 254 static __inline void 255 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 256 { 257 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 258 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 259 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 260 stage->stg_cnt = 0; 261 stage->stg_len = 0; 262 } 263 264 static __inline void 265 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 266 { 267 KKASSERT((stage->stg_flags & 268 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 269 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 270 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 271 } 272 273 /* 274 * Schedule ifnet.if_start on the subqueue owner CPU 275 */ 276 static void 277 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 278 { 279 int cpu; 280 281 if (!force && curthread->td_type == TD_TYPE_NETISR && 282 ifsq_stage_cntmax > 0) { 283 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 284 285 stage->stg_cnt = 0; 286 stage->stg_len = 0; 287 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 288 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 289 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 290 return; 291 } 292 293 cpu = ifsq_get_cpuid(ifsq); 294 if (cpu != mycpuid) 295 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 296 else 297 ifsq_ifstart_ipifunc(ifsq); 298 } 299 300 /* 301 * NOTE: 302 * This function will release ifnet.if_start subqueue interlock, 303 * if ifnet.if_start for the subqueue does not need to be scheduled 304 */ 305 static __inline int 306 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 307 { 308 if (!running || ifsq_is_empty(ifsq) 309 #ifdef ALTQ 310 || ifsq->ifsq_altq->altq_tbr != NULL 311 #endif 312 ) { 313 ALTQ_SQ_LOCK(ifsq); 314 /* 315 * ifnet.if_start subqueue interlock is released, if: 316 * 1) Hardware can not take any packets, due to 317 * o interface is marked down 318 * o hardware queue is full (ifsq_is_oactive) 319 * Under the second situation, hardware interrupt 320 * or polling(4) will call/schedule ifnet.if_start 321 * on the subqueue when hardware queue is ready 322 * 2) There is no packet in the subqueue. 323 * Further ifq_dispatch or ifq_handoff will call/ 324 * schedule ifnet.if_start on the subqueue. 325 * 3) TBR is used and it does not allow further 326 * dequeueing. 327 * TBR callout will call ifnet.if_start on the 328 * subqueue. 329 */ 330 if (!running || !ifsq_data_ready(ifsq)) { 331 ifsq_clr_started(ifsq); 332 ALTQ_SQ_UNLOCK(ifsq); 333 return 0; 334 } 335 ALTQ_SQ_UNLOCK(ifsq); 336 } 337 return 1; 338 } 339 340 static void 341 ifsq_ifstart_dispatch(netmsg_t msg) 342 { 343 struct lwkt_msg *lmsg = &msg->base.lmsg; 344 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 345 struct ifnet *ifp = ifsq_get_ifp(ifsq); 346 struct globaldata *gd = mycpu; 347 int running = 0, need_sched; 348 349 crit_enter_gd(gd); 350 351 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 352 353 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 354 /* 355 * We need to chase the subqueue owner CPU change. 356 */ 357 ifsq_ifstart_schedule(ifsq, 1); 358 crit_exit_gd(gd); 359 return; 360 } 361 362 ifsq_serialize_hw(ifsq); 363 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 364 ifp->if_start(ifp, ifsq); 365 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 366 running = 1; 367 } 368 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 369 ifsq_deserialize_hw(ifsq); 370 371 if (need_sched) { 372 /* 373 * More data need to be transmitted, ifnet.if_start is 374 * scheduled on the subqueue owner CPU, and we keep going. 375 * NOTE: ifnet.if_start subqueue interlock is not released. 376 */ 377 ifsq_ifstart_schedule(ifsq, 0); 378 } 379 380 crit_exit_gd(gd); 381 } 382 383 /* Device driver ifnet.if_start helper function */ 384 void 385 ifsq_devstart(struct ifaltq_subque *ifsq) 386 { 387 struct ifnet *ifp = ifsq_get_ifp(ifsq); 388 int running = 0; 389 390 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 391 392 ALTQ_SQ_LOCK(ifsq); 393 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 394 ALTQ_SQ_UNLOCK(ifsq); 395 return; 396 } 397 ifsq_set_started(ifsq); 398 ALTQ_SQ_UNLOCK(ifsq); 399 400 ifp->if_start(ifp, ifsq); 401 402 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 403 running = 1; 404 405 if (ifsq_ifstart_need_schedule(ifsq, running)) { 406 /* 407 * More data need to be transmitted, ifnet.if_start is 408 * scheduled on ifnet's CPU, and we keep going. 409 * NOTE: ifnet.if_start interlock is not released. 410 */ 411 ifsq_ifstart_schedule(ifsq, 0); 412 } 413 } 414 415 void 416 if_devstart(struct ifnet *ifp) 417 { 418 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 419 } 420 421 /* Device driver ifnet.if_start schedule helper function */ 422 void 423 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 424 { 425 ifsq_ifstart_schedule(ifsq, 1); 426 } 427 428 void 429 if_devstart_sched(struct ifnet *ifp) 430 { 431 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 432 } 433 434 static void 435 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 436 { 437 lwkt_serialize_enter(ifp->if_serializer); 438 } 439 440 static void 441 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 442 { 443 lwkt_serialize_exit(ifp->if_serializer); 444 } 445 446 static int 447 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 448 { 449 return lwkt_serialize_try(ifp->if_serializer); 450 } 451 452 #ifdef INVARIANTS 453 static void 454 if_default_serialize_assert(struct ifnet *ifp, 455 enum ifnet_serialize slz __unused, 456 boolean_t serialized) 457 { 458 if (serialized) 459 ASSERT_SERIALIZED(ifp->if_serializer); 460 else 461 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 462 } 463 #endif 464 465 /* 466 * Attach an interface to the list of "active" interfaces. 467 * 468 * The serializer is optional. 469 */ 470 void 471 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 472 { 473 unsigned socksize; 474 int namelen, masklen; 475 struct sockaddr_dl *sdl, *sdl_addr; 476 struct ifaddr *ifa; 477 struct ifaltq *ifq; 478 struct ifnet **old_ifindex2ifnet = NULL; 479 struct ifnet_array *old_ifnet_array; 480 int i, q; 481 482 static int if_indexlim = 8; 483 484 if (ifp->if_serialize != NULL) { 485 KASSERT(ifp->if_deserialize != NULL && 486 ifp->if_tryserialize != NULL && 487 ifp->if_serialize_assert != NULL, 488 ("serialize functions are partially setup")); 489 490 /* 491 * If the device supplies serialize functions, 492 * then clear if_serializer to catch any invalid 493 * usage of this field. 494 */ 495 KASSERT(serializer == NULL, 496 ("both serialize functions and default serializer " 497 "are supplied")); 498 ifp->if_serializer = NULL; 499 } else { 500 KASSERT(ifp->if_deserialize == NULL && 501 ifp->if_tryserialize == NULL && 502 ifp->if_serialize_assert == NULL, 503 ("serialize functions are partially setup")); 504 ifp->if_serialize = if_default_serialize; 505 ifp->if_deserialize = if_default_deserialize; 506 ifp->if_tryserialize = if_default_tryserialize; 507 #ifdef INVARIANTS 508 ifp->if_serialize_assert = if_default_serialize_assert; 509 #endif 510 511 /* 512 * The serializer can be passed in from the device, 513 * allowing the same serializer to be used for both 514 * the interrupt interlock and the device queue. 515 * If not specified, the netif structure will use an 516 * embedded serializer. 517 */ 518 if (serializer == NULL) { 519 serializer = &ifp->if_default_serializer; 520 lwkt_serialize_init(serializer); 521 } 522 ifp->if_serializer = serializer; 523 } 524 525 /* 526 * XXX - 527 * The old code would work if the interface passed a pre-existing 528 * chain of ifaddrs to this code. We don't trust our callers to 529 * properly initialize the tailq, however, so we no longer allow 530 * this unlikely case. 531 */ 532 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 533 M_IFADDR, M_WAITOK | M_ZERO); 534 for (i = 0; i < ncpus; ++i) 535 TAILQ_INIT(&ifp->if_addrheads[i]); 536 537 TAILQ_INIT(&ifp->if_multiaddrs); 538 TAILQ_INIT(&ifp->if_groups); 539 getmicrotime(&ifp->if_lastchange); 540 541 /* 542 * create a Link Level name for this device 543 */ 544 namelen = strlen(ifp->if_xname); 545 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 546 socksize = masklen + ifp->if_addrlen; 547 if (socksize < sizeof(*sdl)) 548 socksize = sizeof(*sdl); 549 socksize = RT_ROUNDUP(socksize); 550 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 551 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 552 sdl->sdl_len = socksize; 553 sdl->sdl_family = AF_LINK; 554 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 555 sdl->sdl_nlen = namelen; 556 sdl->sdl_type = ifp->if_type; 557 ifp->if_lladdr = ifa; 558 ifa->ifa_ifp = ifp; 559 ifa->ifa_rtrequest = link_rtrequest; 560 ifa->ifa_addr = (struct sockaddr *)sdl; 561 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 562 ifa->ifa_netmask = (struct sockaddr *)sdl; 563 sdl->sdl_len = masklen; 564 while (namelen != 0) 565 sdl->sdl_data[--namelen] = 0xff; 566 ifa_iflink(ifa, ifp, 0 /* Insert head */); 567 568 ifp->if_data_pcpu = kmalloc_cachealign( 569 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 570 571 if (ifp->if_mapsubq == NULL) 572 ifp->if_mapsubq = ifq_mapsubq_default; 573 574 ifq = &ifp->if_snd; 575 ifq->altq_type = 0; 576 ifq->altq_disc = NULL; 577 ifq->altq_flags &= ALTQF_CANTCHANGE; 578 ifq->altq_tbr = NULL; 579 ifq->altq_ifp = ifp; 580 581 if (ifq->altq_subq_cnt <= 0) 582 ifq->altq_subq_cnt = 1; 583 ifq->altq_subq = kmalloc_cachealign( 584 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 585 M_DEVBUF, M_WAITOK | M_ZERO); 586 587 if (ifq->altq_maxlen == 0) { 588 if_printf(ifp, "driver didn't set altq_maxlen\n"); 589 ifq_set_maxlen(ifq, ifqmaxlen); 590 } 591 592 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 593 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 594 595 ALTQ_SQ_LOCK_INIT(ifsq); 596 ifsq->ifsq_index = q; 597 598 ifsq->ifsq_altq = ifq; 599 ifsq->ifsq_ifp = ifp; 600 601 ifsq->ifsq_maxlen = ifq->altq_maxlen; 602 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 603 ifsq->ifsq_prepended = NULL; 604 ifsq->ifsq_started = 0; 605 ifsq->ifsq_hw_oactive = 0; 606 ifsq_set_cpuid(ifsq, 0); 607 if (ifp->if_serializer != NULL) 608 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 609 610 ifsq->ifsq_stage = 611 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 612 M_DEVBUF, M_WAITOK | M_ZERO); 613 for (i = 0; i < ncpus; ++i) 614 ifsq->ifsq_stage[i].stg_subq = ifsq; 615 616 ifsq->ifsq_ifstart_nmsg = 617 kmalloc(ncpus * sizeof(struct netmsg_base), 618 M_LWKTMSG, M_WAITOK); 619 for (i = 0; i < ncpus; ++i) { 620 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 621 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 622 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 623 } 624 } 625 ifq_set_classic(ifq); 626 627 /* 628 * Increase mbuf cluster/jcluster limits for the mbufs that 629 * could sit on the device queues for quite some time. 630 */ 631 if (ifp->if_nmbclusters > 0) 632 mcl_inclimit(ifp->if_nmbclusters); 633 if (ifp->if_nmbjclusters > 0) 634 mjcl_inclimit(ifp->if_nmbjclusters); 635 636 /* 637 * Install this ifp into ifindex2inet, ifnet queue and ifnet 638 * array after it is setup. 639 * 640 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 641 * by ifnet lock, so that non-netisr threads could get a 642 * consistent view. 643 */ 644 ifnet_lock(); 645 646 /* Don't update if_index until ifindex2ifnet is setup */ 647 ifp->if_index = if_index + 1; 648 sdl_addr->sdl_index = ifp->if_index; 649 650 /* 651 * Install this ifp into ifindex2ifnet 652 */ 653 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 654 unsigned int n; 655 struct ifnet **q; 656 657 /* 658 * Grow ifindex2ifnet 659 */ 660 if_indexlim <<= 1; 661 n = if_indexlim * sizeof(*q); 662 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 663 if (ifindex2ifnet != NULL) { 664 bcopy(ifindex2ifnet, q, n/2); 665 /* Free old ifindex2ifnet after sync all netisrs */ 666 old_ifindex2ifnet = ifindex2ifnet; 667 } 668 ifindex2ifnet = q; 669 } 670 ifindex2ifnet[ifp->if_index] = ifp; 671 /* 672 * Update if_index after this ifp is installed into ifindex2ifnet, 673 * so that netisrs could get a consistent view of ifindex2ifnet. 674 */ 675 cpu_sfence(); 676 if_index = ifp->if_index; 677 678 /* 679 * Install this ifp into ifnet array. 680 */ 681 /* Free old ifnet array after sync all netisrs */ 682 old_ifnet_array = ifnet_array; 683 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 684 685 /* 686 * Install this ifp into ifnet queue. 687 */ 688 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 689 690 ifnet_unlock(); 691 692 /* 693 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 694 * are no longer accessed and we can free them safely later on. 695 */ 696 netmsg_service_sync(); 697 if (old_ifindex2ifnet != NULL) 698 kfree(old_ifindex2ifnet, M_IFADDR); 699 ifnet_array_free(old_ifnet_array); 700 701 if (!SLIST_EMPTY(&domains)) 702 if_attachdomain1(ifp); 703 704 /* Announce the interface. */ 705 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 706 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 707 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 708 } 709 710 static void 711 if_attachdomain(void *dummy) 712 { 713 struct ifnet *ifp; 714 715 ifnet_lock(); 716 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 717 if_attachdomain1(ifp); 718 ifnet_unlock(); 719 } 720 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 721 if_attachdomain, NULL); 722 723 static void 724 if_attachdomain1(struct ifnet *ifp) 725 { 726 struct domain *dp; 727 728 crit_enter(); 729 730 /* address family dependent data region */ 731 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 732 SLIST_FOREACH(dp, &domains, dom_next) 733 if (dp->dom_ifattach) 734 ifp->if_afdata[dp->dom_family] = 735 (*dp->dom_ifattach)(ifp); 736 crit_exit(); 737 } 738 739 /* 740 * Purge all addresses whose type is _not_ AF_LINK 741 */ 742 static void 743 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 744 { 745 struct lwkt_msg *lmsg = &nmsg->lmsg; 746 struct ifnet *ifp = lmsg->u.ms_resultp; 747 struct ifaddr_container *ifac, *next; 748 749 ASSERT_IN_NETISR(0); 750 751 /* 752 * The ifaddr processing in the following loop will block, 753 * however, this function is called in netisr0, in which 754 * ifaddr list changes happen, so we don't care about the 755 * blockness of the ifaddr processing here. 756 */ 757 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 758 ifa_link, next) { 759 struct ifaddr *ifa = ifac->ifa; 760 761 /* Ignore marker */ 762 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 763 continue; 764 765 /* Leave link ifaddr as it is */ 766 if (ifa->ifa_addr->sa_family == AF_LINK) 767 continue; 768 #ifdef INET 769 /* XXX: Ugly!! ad hoc just for INET */ 770 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 771 struct ifaliasreq ifr; 772 #ifdef IFADDR_DEBUG_VERBOSE 773 int i; 774 775 kprintf("purge in4 addr %p: ", ifa); 776 for (i = 0; i < ncpus; ++i) 777 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 778 kprintf("\n"); 779 #endif 780 781 bzero(&ifr, sizeof ifr); 782 ifr.ifra_addr = *ifa->ifa_addr; 783 if (ifa->ifa_dstaddr) 784 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 785 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 786 NULL) == 0) 787 continue; 788 } 789 #endif /* INET */ 790 #ifdef INET6 791 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { 792 #ifdef IFADDR_DEBUG_VERBOSE 793 int i; 794 795 kprintf("purge in6 addr %p: ", ifa); 796 for (i = 0; i < ncpus; ++i) 797 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 798 kprintf("\n"); 799 #endif 800 801 in6_purgeaddr(ifa); 802 /* ifp_addrhead is already updated */ 803 continue; 804 } 805 #endif /* INET6 */ 806 ifa_ifunlink(ifa, ifp); 807 ifa_destroy(ifa); 808 } 809 810 lwkt_replymsg(lmsg, 0); 811 } 812 813 void 814 if_purgeaddrs_nolink(struct ifnet *ifp) 815 { 816 struct netmsg_base nmsg; 817 struct lwkt_msg *lmsg = &nmsg.lmsg; 818 819 ASSERT_CANDOMSG_NETISR0(curthread); 820 821 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 822 if_purgeaddrs_nolink_dispatch); 823 lmsg->u.ms_resultp = ifp; 824 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 825 } 826 827 static void 828 ifq_stage_detach_handler(netmsg_t nmsg) 829 { 830 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 831 int q; 832 833 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 834 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 835 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 836 837 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 838 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 839 } 840 lwkt_replymsg(&nmsg->lmsg, 0); 841 } 842 843 static void 844 ifq_stage_detach(struct ifaltq *ifq) 845 { 846 struct netmsg_base base; 847 int cpu; 848 849 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 850 ifq_stage_detach_handler); 851 base.lmsg.u.ms_resultp = ifq; 852 853 for (cpu = 0; cpu < ncpus; ++cpu) 854 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 855 } 856 857 struct netmsg_if_rtdel { 858 struct netmsg_base base; 859 struct ifnet *ifp; 860 }; 861 862 static void 863 if_rtdel_dispatch(netmsg_t msg) 864 { 865 struct netmsg_if_rtdel *rmsg = (void *)msg; 866 int i, nextcpu, cpu; 867 868 cpu = mycpuid; 869 for (i = 1; i <= AF_MAX; i++) { 870 struct radix_node_head *rnh; 871 872 if ((rnh = rt_tables[cpu][i]) == NULL) 873 continue; 874 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 875 } 876 877 nextcpu = cpu + 1; 878 if (nextcpu < ncpus) 879 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 880 else 881 lwkt_replymsg(&rmsg->base.lmsg, 0); 882 } 883 884 /* 885 * Detach an interface, removing it from the 886 * list of "active" interfaces. 887 */ 888 void 889 if_detach(struct ifnet *ifp) 890 { 891 struct ifnet_array *old_ifnet_array; 892 struct netmsg_if_rtdel msg; 893 struct domain *dp; 894 int q; 895 896 /* Announce that the interface is gone. */ 897 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 898 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 899 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 900 901 /* 902 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 903 * array before it is whacked. 904 * 905 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 906 * by ifnet lock, so that non-netisr threads could get a 907 * consistent view. 908 */ 909 ifnet_lock(); 910 911 /* 912 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 913 */ 914 ifindex2ifnet[ifp->if_index] = NULL; 915 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 916 if_index--; 917 918 /* 919 * Remove this ifp from ifnet queue. 920 */ 921 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 922 923 /* 924 * Remove this ifp from ifnet array. 925 */ 926 /* Free old ifnet array after sync all netisrs */ 927 old_ifnet_array = ifnet_array; 928 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 929 930 ifnet_unlock(); 931 932 /* 933 * Sync all netisrs so that the old ifnet array is no longer 934 * accessed and we can free it safely later on. 935 */ 936 netmsg_service_sync(); 937 ifnet_array_free(old_ifnet_array); 938 939 /* 940 * Remove routes and flush queues. 941 */ 942 crit_enter(); 943 #ifdef IFPOLL_ENABLE 944 if (ifp->if_flags & IFF_NPOLLING) 945 ifpoll_deregister(ifp); 946 #endif 947 if_down(ifp); 948 949 /* Decrease the mbuf clusters/jclusters limits increased by us */ 950 if (ifp->if_nmbclusters > 0) 951 mcl_inclimit(-ifp->if_nmbclusters); 952 if (ifp->if_nmbjclusters > 0) 953 mjcl_inclimit(-ifp->if_nmbjclusters); 954 955 #ifdef ALTQ 956 if (ifq_is_enabled(&ifp->if_snd)) 957 altq_disable(&ifp->if_snd); 958 if (ifq_is_attached(&ifp->if_snd)) 959 altq_detach(&ifp->if_snd); 960 #endif 961 962 /* 963 * Clean up all addresses. 964 */ 965 ifp->if_lladdr = NULL; 966 967 if_purgeaddrs_nolink(ifp); 968 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 969 struct ifaddr *ifa; 970 971 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 972 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 973 ("non-link ifaddr is left on if_addrheads")); 974 975 ifa_ifunlink(ifa, ifp); 976 ifa_destroy(ifa); 977 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 978 ("there are still ifaddrs left on if_addrheads")); 979 } 980 981 #ifdef INET 982 /* 983 * Remove all IPv4 kernel structures related to ifp. 984 */ 985 in_ifdetach(ifp); 986 #endif 987 988 #ifdef INET6 989 /* 990 * Remove all IPv6 kernel structs related to ifp. This should be done 991 * before removing routing entries below, since IPv6 interface direct 992 * routes are expected to be removed by the IPv6-specific kernel API. 993 * Otherwise, the kernel will detect some inconsistency and bark it. 994 */ 995 in6_ifdetach(ifp); 996 #endif 997 998 /* 999 * Delete all remaining routes using this interface 1000 */ 1001 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1002 if_rtdel_dispatch); 1003 msg.ifp = ifp; 1004 rt_domsg_global(&msg.base); 1005 1006 SLIST_FOREACH(dp, &domains, dom_next) 1007 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1008 (*dp->dom_ifdetach)(ifp, 1009 ifp->if_afdata[dp->dom_family]); 1010 1011 kfree(ifp->if_addrheads, M_IFADDR); 1012 1013 lwkt_synchronize_ipiqs("if_detach"); 1014 ifq_stage_detach(&ifp->if_snd); 1015 1016 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1017 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1018 1019 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1020 kfree(ifsq->ifsq_stage, M_DEVBUF); 1021 } 1022 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1023 1024 kfree(ifp->if_data_pcpu, M_DEVBUF); 1025 1026 crit_exit(); 1027 } 1028 1029 /* 1030 * Create interface group without members 1031 */ 1032 struct ifg_group * 1033 if_creategroup(const char *groupname) 1034 { 1035 struct ifg_group *ifg = NULL; 1036 1037 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1038 M_TEMP, M_NOWAIT)) == NULL) 1039 return (NULL); 1040 1041 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1042 ifg->ifg_refcnt = 0; 1043 ifg->ifg_carp_demoted = 0; 1044 TAILQ_INIT(&ifg->ifg_members); 1045 #if NPF > 0 1046 pfi_attach_ifgroup(ifg); 1047 #endif 1048 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1049 1050 return (ifg); 1051 } 1052 1053 /* 1054 * Add a group to an interface 1055 */ 1056 int 1057 if_addgroup(struct ifnet *ifp, const char *groupname) 1058 { 1059 struct ifg_list *ifgl; 1060 struct ifg_group *ifg = NULL; 1061 struct ifg_member *ifgm; 1062 1063 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1064 groupname[strlen(groupname) - 1] <= '9') 1065 return (EINVAL); 1066 1067 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1068 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1069 return (EEXIST); 1070 1071 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1072 return (ENOMEM); 1073 1074 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1075 kfree(ifgl, M_TEMP); 1076 return (ENOMEM); 1077 } 1078 1079 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1080 if (!strcmp(ifg->ifg_group, groupname)) 1081 break; 1082 1083 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1084 kfree(ifgl, M_TEMP); 1085 kfree(ifgm, M_TEMP); 1086 return (ENOMEM); 1087 } 1088 1089 ifg->ifg_refcnt++; 1090 ifgl->ifgl_group = ifg; 1091 ifgm->ifgm_ifp = ifp; 1092 1093 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1094 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1095 1096 #if NPF > 0 1097 pfi_group_change(groupname); 1098 #endif 1099 1100 return (0); 1101 } 1102 1103 /* 1104 * Remove a group from an interface 1105 */ 1106 int 1107 if_delgroup(struct ifnet *ifp, const char *groupname) 1108 { 1109 struct ifg_list *ifgl; 1110 struct ifg_member *ifgm; 1111 1112 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1113 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1114 break; 1115 if (ifgl == NULL) 1116 return (ENOENT); 1117 1118 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1119 1120 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1121 if (ifgm->ifgm_ifp == ifp) 1122 break; 1123 1124 if (ifgm != NULL) { 1125 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1126 kfree(ifgm, M_TEMP); 1127 } 1128 1129 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1130 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1131 #if NPF > 0 1132 pfi_detach_ifgroup(ifgl->ifgl_group); 1133 #endif 1134 kfree(ifgl->ifgl_group, M_TEMP); 1135 } 1136 1137 kfree(ifgl, M_TEMP); 1138 1139 #if NPF > 0 1140 pfi_group_change(groupname); 1141 #endif 1142 1143 return (0); 1144 } 1145 1146 /* 1147 * Stores all groups from an interface in memory pointed 1148 * to by data 1149 */ 1150 int 1151 if_getgroup(caddr_t data, struct ifnet *ifp) 1152 { 1153 int len, error; 1154 struct ifg_list *ifgl; 1155 struct ifg_req ifgrq, *ifgp; 1156 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1157 1158 if (ifgr->ifgr_len == 0) { 1159 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1160 ifgr->ifgr_len += sizeof(struct ifg_req); 1161 return (0); 1162 } 1163 1164 len = ifgr->ifgr_len; 1165 ifgp = ifgr->ifgr_groups; 1166 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1167 if (len < sizeof(ifgrq)) 1168 return (EINVAL); 1169 bzero(&ifgrq, sizeof ifgrq); 1170 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1171 sizeof(ifgrq.ifgrq_group)); 1172 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1173 sizeof(struct ifg_req)))) 1174 return (error); 1175 len -= sizeof(ifgrq); 1176 ifgp++; 1177 } 1178 1179 return (0); 1180 } 1181 1182 /* 1183 * Stores all members of a group in memory pointed to by data 1184 */ 1185 int 1186 if_getgroupmembers(caddr_t data) 1187 { 1188 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1189 struct ifg_group *ifg; 1190 struct ifg_member *ifgm; 1191 struct ifg_req ifgrq, *ifgp; 1192 int len, error; 1193 1194 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1195 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1196 break; 1197 if (ifg == NULL) 1198 return (ENOENT); 1199 1200 if (ifgr->ifgr_len == 0) { 1201 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1202 ifgr->ifgr_len += sizeof(ifgrq); 1203 return (0); 1204 } 1205 1206 len = ifgr->ifgr_len; 1207 ifgp = ifgr->ifgr_groups; 1208 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1209 if (len < sizeof(ifgrq)) 1210 return (EINVAL); 1211 bzero(&ifgrq, sizeof ifgrq); 1212 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1213 sizeof(ifgrq.ifgrq_member)); 1214 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1215 sizeof(struct ifg_req)))) 1216 return (error); 1217 len -= sizeof(ifgrq); 1218 ifgp++; 1219 } 1220 1221 return (0); 1222 } 1223 1224 /* 1225 * Delete Routes for a Network Interface 1226 * 1227 * Called for each routing entry via the rnh->rnh_walktree() call above 1228 * to delete all route entries referencing a detaching network interface. 1229 * 1230 * Arguments: 1231 * rn pointer to node in the routing table 1232 * arg argument passed to rnh->rnh_walktree() - detaching interface 1233 * 1234 * Returns: 1235 * 0 successful 1236 * errno failed - reason indicated 1237 * 1238 */ 1239 static int 1240 if_rtdel(struct radix_node *rn, void *arg) 1241 { 1242 struct rtentry *rt = (struct rtentry *)rn; 1243 struct ifnet *ifp = arg; 1244 int err; 1245 1246 if (rt->rt_ifp == ifp) { 1247 1248 /* 1249 * Protect (sorta) against walktree recursion problems 1250 * with cloned routes 1251 */ 1252 if (!(rt->rt_flags & RTF_UP)) 1253 return (0); 1254 1255 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1256 rt_mask(rt), rt->rt_flags, 1257 NULL); 1258 if (err) { 1259 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1260 } 1261 } 1262 1263 return (0); 1264 } 1265 1266 static __inline boolean_t 1267 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1268 { 1269 if (old_ifa == NULL) 1270 return TRUE; 1271 1272 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1273 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1274 return TRUE; 1275 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1276 (cur_ifa->ifa_flags & IFA_ROUTE)) 1277 return TRUE; 1278 return FALSE; 1279 } 1280 1281 /* 1282 * Locate an interface based on a complete address. 1283 */ 1284 struct ifaddr * 1285 ifa_ifwithaddr(struct sockaddr *addr) 1286 { 1287 const struct ifnet_array *arr; 1288 int i; 1289 1290 arr = ifnet_array_get(); 1291 for (i = 0; i < arr->ifnet_count; ++i) { 1292 struct ifnet *ifp = arr->ifnet_arr[i]; 1293 struct ifaddr_container *ifac; 1294 1295 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1296 struct ifaddr *ifa = ifac->ifa; 1297 1298 if (ifa->ifa_addr->sa_family != addr->sa_family) 1299 continue; 1300 if (sa_equal(addr, ifa->ifa_addr)) 1301 return (ifa); 1302 if ((ifp->if_flags & IFF_BROADCAST) && 1303 ifa->ifa_broadaddr && 1304 /* IPv6 doesn't have broadcast */ 1305 ifa->ifa_broadaddr->sa_len != 0 && 1306 sa_equal(ifa->ifa_broadaddr, addr)) 1307 return (ifa); 1308 } 1309 } 1310 return (NULL); 1311 } 1312 1313 /* 1314 * Locate the point to point interface with a given destination address. 1315 */ 1316 struct ifaddr * 1317 ifa_ifwithdstaddr(struct sockaddr *addr) 1318 { 1319 const struct ifnet_array *arr; 1320 int i; 1321 1322 arr = ifnet_array_get(); 1323 for (i = 0; i < arr->ifnet_count; ++i) { 1324 struct ifnet *ifp = arr->ifnet_arr[i]; 1325 struct ifaddr_container *ifac; 1326 1327 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1328 continue; 1329 1330 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1331 struct ifaddr *ifa = ifac->ifa; 1332 1333 if (ifa->ifa_addr->sa_family != addr->sa_family) 1334 continue; 1335 if (ifa->ifa_dstaddr && 1336 sa_equal(addr, ifa->ifa_dstaddr)) 1337 return (ifa); 1338 } 1339 } 1340 return (NULL); 1341 } 1342 1343 /* 1344 * Find an interface on a specific network. If many, choice 1345 * is most specific found. 1346 */ 1347 struct ifaddr * 1348 ifa_ifwithnet(struct sockaddr *addr) 1349 { 1350 struct ifaddr *ifa_maybe = NULL; 1351 u_int af = addr->sa_family; 1352 char *addr_data = addr->sa_data, *cplim; 1353 const struct ifnet_array *arr; 1354 int i; 1355 1356 /* 1357 * AF_LINK addresses can be looked up directly by their index number, 1358 * so do that if we can. 1359 */ 1360 if (af == AF_LINK) { 1361 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1362 1363 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1364 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1365 } 1366 1367 /* 1368 * Scan though each interface, looking for ones that have 1369 * addresses in this address family. 1370 */ 1371 arr = ifnet_array_get(); 1372 for (i = 0; i < arr->ifnet_count; ++i) { 1373 struct ifnet *ifp = arr->ifnet_arr[i]; 1374 struct ifaddr_container *ifac; 1375 1376 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1377 struct ifaddr *ifa = ifac->ifa; 1378 char *cp, *cp2, *cp3; 1379 1380 if (ifa->ifa_addr->sa_family != af) 1381 next: continue; 1382 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1383 /* 1384 * This is a bit broken as it doesn't 1385 * take into account that the remote end may 1386 * be a single node in the network we are 1387 * looking for. 1388 * The trouble is that we don't know the 1389 * netmask for the remote end. 1390 */ 1391 if (ifa->ifa_dstaddr != NULL && 1392 sa_equal(addr, ifa->ifa_dstaddr)) 1393 return (ifa); 1394 } else { 1395 /* 1396 * if we have a special address handler, 1397 * then use it instead of the generic one. 1398 */ 1399 if (ifa->ifa_claim_addr) { 1400 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1401 return (ifa); 1402 } else { 1403 continue; 1404 } 1405 } 1406 1407 /* 1408 * Scan all the bits in the ifa's address. 1409 * If a bit dissagrees with what we are 1410 * looking for, mask it with the netmask 1411 * to see if it really matters. 1412 * (A byte at a time) 1413 */ 1414 if (ifa->ifa_netmask == 0) 1415 continue; 1416 cp = addr_data; 1417 cp2 = ifa->ifa_addr->sa_data; 1418 cp3 = ifa->ifa_netmask->sa_data; 1419 cplim = ifa->ifa_netmask->sa_len + 1420 (char *)ifa->ifa_netmask; 1421 while (cp3 < cplim) 1422 if ((*cp++ ^ *cp2++) & *cp3++) 1423 goto next; /* next address! */ 1424 /* 1425 * If the netmask of what we just found 1426 * is more specific than what we had before 1427 * (if we had one) then remember the new one 1428 * before continuing to search for an even 1429 * better one. If the netmasks are equal, 1430 * we prefer the this ifa based on the result 1431 * of ifa_prefer(). 1432 */ 1433 if (ifa_maybe == NULL || 1434 rn_refines((char *)ifa->ifa_netmask, 1435 (char *)ifa_maybe->ifa_netmask) || 1436 (sa_equal(ifa_maybe->ifa_netmask, 1437 ifa->ifa_netmask) && 1438 ifa_prefer(ifa, ifa_maybe))) 1439 ifa_maybe = ifa; 1440 } 1441 } 1442 } 1443 return (ifa_maybe); 1444 } 1445 1446 /* 1447 * Find an interface address specific to an interface best matching 1448 * a given address. 1449 */ 1450 struct ifaddr * 1451 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1452 { 1453 struct ifaddr_container *ifac; 1454 char *cp, *cp2, *cp3; 1455 char *cplim; 1456 struct ifaddr *ifa_maybe = NULL; 1457 u_int af = addr->sa_family; 1458 1459 if (af >= AF_MAX) 1460 return (0); 1461 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1462 struct ifaddr *ifa = ifac->ifa; 1463 1464 if (ifa->ifa_addr->sa_family != af) 1465 continue; 1466 if (ifa_maybe == NULL) 1467 ifa_maybe = ifa; 1468 if (ifa->ifa_netmask == NULL) { 1469 if (sa_equal(addr, ifa->ifa_addr) || 1470 (ifa->ifa_dstaddr != NULL && 1471 sa_equal(addr, ifa->ifa_dstaddr))) 1472 return (ifa); 1473 continue; 1474 } 1475 if (ifp->if_flags & IFF_POINTOPOINT) { 1476 if (sa_equal(addr, ifa->ifa_dstaddr)) 1477 return (ifa); 1478 } else { 1479 cp = addr->sa_data; 1480 cp2 = ifa->ifa_addr->sa_data; 1481 cp3 = ifa->ifa_netmask->sa_data; 1482 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1483 for (; cp3 < cplim; cp3++) 1484 if ((*cp++ ^ *cp2++) & *cp3) 1485 break; 1486 if (cp3 == cplim) 1487 return (ifa); 1488 } 1489 } 1490 return (ifa_maybe); 1491 } 1492 1493 /* 1494 * Default action when installing a route with a Link Level gateway. 1495 * Lookup an appropriate real ifa to point to. 1496 * This should be moved to /sys/net/link.c eventually. 1497 */ 1498 static void 1499 link_rtrequest(int cmd, struct rtentry *rt) 1500 { 1501 struct ifaddr *ifa; 1502 struct sockaddr *dst; 1503 struct ifnet *ifp; 1504 1505 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1506 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1507 return; 1508 ifa = ifaof_ifpforaddr(dst, ifp); 1509 if (ifa != NULL) { 1510 IFAFREE(rt->rt_ifa); 1511 IFAREF(ifa); 1512 rt->rt_ifa = ifa; 1513 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1514 ifa->ifa_rtrequest(cmd, rt); 1515 } 1516 } 1517 1518 struct netmsg_ifroute { 1519 struct netmsg_base base; 1520 struct ifnet *ifp; 1521 int flag; 1522 int fam; 1523 }; 1524 1525 /* 1526 * Mark an interface down and notify protocols of the transition. 1527 */ 1528 static void 1529 if_unroute_dispatch(netmsg_t nmsg) 1530 { 1531 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1532 struct ifnet *ifp = msg->ifp; 1533 int flag = msg->flag, fam = msg->fam; 1534 struct ifaddr_container *ifac; 1535 1536 ifp->if_flags &= ~flag; 1537 getmicrotime(&ifp->if_lastchange); 1538 /* 1539 * The ifaddr processing in the following loop will block, 1540 * however, this function is called in netisr0, in which 1541 * ifaddr list changes happen, so we don't care about the 1542 * blockness of the ifaddr processing here. 1543 */ 1544 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1545 struct ifaddr *ifa = ifac->ifa; 1546 1547 /* Ignore marker */ 1548 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1549 continue; 1550 1551 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1552 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1553 } 1554 ifq_purge_all(&ifp->if_snd); 1555 rt_ifmsg(ifp); 1556 1557 lwkt_replymsg(&nmsg->lmsg, 0); 1558 } 1559 1560 void 1561 if_unroute(struct ifnet *ifp, int flag, int fam) 1562 { 1563 struct netmsg_ifroute msg; 1564 1565 ASSERT_CANDOMSG_NETISR0(curthread); 1566 1567 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1568 if_unroute_dispatch); 1569 msg.ifp = ifp; 1570 msg.flag = flag; 1571 msg.fam = fam; 1572 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1573 } 1574 1575 /* 1576 * Mark an interface up and notify protocols of the transition. 1577 */ 1578 static void 1579 if_route_dispatch(netmsg_t nmsg) 1580 { 1581 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1582 struct ifnet *ifp = msg->ifp; 1583 int flag = msg->flag, fam = msg->fam; 1584 struct ifaddr_container *ifac; 1585 1586 ifq_purge_all(&ifp->if_snd); 1587 ifp->if_flags |= flag; 1588 getmicrotime(&ifp->if_lastchange); 1589 /* 1590 * The ifaddr processing in the following loop will block, 1591 * however, this function is called in netisr0, in which 1592 * ifaddr list changes happen, so we don't care about the 1593 * blockness of the ifaddr processing here. 1594 */ 1595 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1596 struct ifaddr *ifa = ifac->ifa; 1597 1598 /* Ignore marker */ 1599 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1600 continue; 1601 1602 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1603 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1604 } 1605 rt_ifmsg(ifp); 1606 #ifdef INET6 1607 in6_if_up(ifp); 1608 #endif 1609 1610 lwkt_replymsg(&nmsg->lmsg, 0); 1611 } 1612 1613 void 1614 if_route(struct ifnet *ifp, int flag, int fam) 1615 { 1616 struct netmsg_ifroute msg; 1617 1618 ASSERT_CANDOMSG_NETISR0(curthread); 1619 1620 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1621 if_route_dispatch); 1622 msg.ifp = ifp; 1623 msg.flag = flag; 1624 msg.fam = fam; 1625 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1626 } 1627 1628 /* 1629 * Mark an interface down and notify protocols of the transition. An 1630 * interface going down is also considered to be a synchronizing event. 1631 * We must ensure that all packet processing related to the interface 1632 * has completed before we return so e.g. the caller can free the ifnet 1633 * structure that the mbufs may be referencing. 1634 * 1635 * NOTE: must be called at splnet or eqivalent. 1636 */ 1637 void 1638 if_down(struct ifnet *ifp) 1639 { 1640 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1641 netmsg_service_sync(); 1642 } 1643 1644 /* 1645 * Mark an interface up and notify protocols of 1646 * the transition. 1647 * NOTE: must be called at splnet or eqivalent. 1648 */ 1649 void 1650 if_up(struct ifnet *ifp) 1651 { 1652 if_route(ifp, IFF_UP, AF_UNSPEC); 1653 } 1654 1655 /* 1656 * Process a link state change. 1657 * NOTE: must be called at splsoftnet or equivalent. 1658 */ 1659 void 1660 if_link_state_change(struct ifnet *ifp) 1661 { 1662 int link_state = ifp->if_link_state; 1663 1664 rt_ifmsg(ifp); 1665 devctl_notify("IFNET", ifp->if_xname, 1666 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1667 } 1668 1669 /* 1670 * Handle interface watchdog timer routines. Called 1671 * from softclock, we decrement timers (if set) and 1672 * call the appropriate interface routine on expiration. 1673 */ 1674 static void 1675 if_slowtimo_dispatch(netmsg_t nmsg) 1676 { 1677 struct globaldata *gd = mycpu; 1678 const struct ifnet_array *arr; 1679 int i; 1680 1681 ASSERT_IN_NETISR(0); 1682 1683 crit_enter_gd(gd); 1684 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1685 crit_exit_gd(gd); 1686 1687 arr = ifnet_array_get(); 1688 for (i = 0; i < arr->ifnet_count; ++i) { 1689 struct ifnet *ifp = arr->ifnet_arr[i]; 1690 1691 crit_enter_gd(gd); 1692 1693 if (if_stats_compat) { 1694 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1695 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1696 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1697 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1698 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1699 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1700 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1701 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1702 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1703 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1704 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1705 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1706 } 1707 1708 if (ifp->if_timer == 0 || --ifp->if_timer) { 1709 crit_exit_gd(gd); 1710 continue; 1711 } 1712 if (ifp->if_watchdog) { 1713 if (ifnet_tryserialize_all(ifp)) { 1714 (*ifp->if_watchdog)(ifp); 1715 ifnet_deserialize_all(ifp); 1716 } else { 1717 /* try again next timeout */ 1718 ++ifp->if_timer; 1719 } 1720 } 1721 1722 crit_exit_gd(gd); 1723 } 1724 1725 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1726 } 1727 1728 static void 1729 if_slowtimo(void *arg __unused) 1730 { 1731 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1732 1733 KASSERT(mycpuid == 0, ("not on cpu0")); 1734 crit_enter(); 1735 if (lmsg->ms_flags & MSGF_DONE) 1736 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1737 crit_exit(); 1738 } 1739 1740 /* 1741 * Map interface name to 1742 * interface structure pointer. 1743 */ 1744 struct ifnet * 1745 ifunit(const char *name) 1746 { 1747 struct ifnet *ifp; 1748 1749 /* 1750 * Search all the interfaces for this name/number 1751 */ 1752 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1753 1754 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1755 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1756 break; 1757 } 1758 return (ifp); 1759 } 1760 1761 struct ifnet * 1762 ifunit_netisr(const char *name) 1763 { 1764 const struct ifnet_array *arr; 1765 int i; 1766 1767 /* 1768 * Search all the interfaces for this name/number 1769 */ 1770 1771 arr = ifnet_array_get(); 1772 for (i = 0; i < arr->ifnet_count; ++i) { 1773 struct ifnet *ifp = arr->ifnet_arr[i]; 1774 1775 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1776 return ifp; 1777 } 1778 return NULL; 1779 } 1780 1781 /* 1782 * Interface ioctls. 1783 */ 1784 int 1785 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1786 { 1787 struct ifnet *ifp; 1788 struct ifreq *ifr; 1789 struct ifstat *ifs; 1790 int error, do_ifup = 0; 1791 short oif_flags; 1792 int new_flags; 1793 size_t namelen, onamelen; 1794 char new_name[IFNAMSIZ]; 1795 struct ifaddr *ifa; 1796 struct sockaddr_dl *sdl; 1797 1798 switch (cmd) { 1799 case SIOCGIFCONF: 1800 case OSIOCGIFCONF: 1801 return (ifconf(cmd, data, cred)); 1802 default: 1803 break; 1804 } 1805 1806 ifr = (struct ifreq *)data; 1807 1808 switch (cmd) { 1809 case SIOCIFCREATE: 1810 case SIOCIFCREATE2: 1811 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1812 return (error); 1813 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1814 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1815 case SIOCIFDESTROY: 1816 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1817 return (error); 1818 return (if_clone_destroy(ifr->ifr_name)); 1819 case SIOCIFGCLONERS: 1820 return (if_clone_list((struct if_clonereq *)data)); 1821 default: 1822 break; 1823 } 1824 1825 /* 1826 * Nominal ioctl through interface, lookup the ifp and obtain a 1827 * lock to serialize the ifconfig ioctl operation. 1828 */ 1829 ifnet_lock(); 1830 1831 ifp = ifunit(ifr->ifr_name); 1832 if (ifp == NULL) { 1833 ifnet_unlock(); 1834 return (ENXIO); 1835 } 1836 error = 0; 1837 1838 switch (cmd) { 1839 case SIOCGIFINDEX: 1840 ifr->ifr_index = ifp->if_index; 1841 break; 1842 1843 case SIOCGIFFLAGS: 1844 ifr->ifr_flags = ifp->if_flags; 1845 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1846 break; 1847 1848 case SIOCGIFCAP: 1849 ifr->ifr_reqcap = ifp->if_capabilities; 1850 ifr->ifr_curcap = ifp->if_capenable; 1851 break; 1852 1853 case SIOCGIFMETRIC: 1854 ifr->ifr_metric = ifp->if_metric; 1855 break; 1856 1857 case SIOCGIFMTU: 1858 ifr->ifr_mtu = ifp->if_mtu; 1859 break; 1860 1861 case SIOCGIFTSOLEN: 1862 ifr->ifr_tsolen = ifp->if_tsolen; 1863 break; 1864 1865 case SIOCGIFDATA: 1866 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1867 sizeof(ifp->if_data)); 1868 break; 1869 1870 case SIOCGIFPHYS: 1871 ifr->ifr_phys = ifp->if_physical; 1872 break; 1873 1874 case SIOCGIFPOLLCPU: 1875 ifr->ifr_pollcpu = -1; 1876 break; 1877 1878 case SIOCSIFPOLLCPU: 1879 break; 1880 1881 case SIOCSIFFLAGS: 1882 error = priv_check_cred(cred, PRIV_ROOT, 0); 1883 if (error) 1884 break; 1885 new_flags = (ifr->ifr_flags & 0xffff) | 1886 (ifr->ifr_flagshigh << 16); 1887 if (ifp->if_flags & IFF_SMART) { 1888 /* Smart drivers twiddle their own routes */ 1889 } else if (ifp->if_flags & IFF_UP && 1890 (new_flags & IFF_UP) == 0) { 1891 if_down(ifp); 1892 } else if (new_flags & IFF_UP && 1893 (ifp->if_flags & IFF_UP) == 0) { 1894 do_ifup = 1; 1895 } 1896 1897 #ifdef IFPOLL_ENABLE 1898 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1899 if (new_flags & IFF_NPOLLING) 1900 ifpoll_register(ifp); 1901 else 1902 ifpoll_deregister(ifp); 1903 } 1904 #endif 1905 1906 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1907 (new_flags &~ IFF_CANTCHANGE); 1908 if (new_flags & IFF_PPROMISC) { 1909 /* Permanently promiscuous mode requested */ 1910 ifp->if_flags |= IFF_PROMISC; 1911 } else if (ifp->if_pcount == 0) { 1912 ifp->if_flags &= ~IFF_PROMISC; 1913 } 1914 if (ifp->if_ioctl) { 1915 ifnet_serialize_all(ifp); 1916 ifp->if_ioctl(ifp, cmd, data, cred); 1917 ifnet_deserialize_all(ifp); 1918 } 1919 if (do_ifup) 1920 if_up(ifp); 1921 getmicrotime(&ifp->if_lastchange); 1922 break; 1923 1924 case SIOCSIFCAP: 1925 error = priv_check_cred(cred, PRIV_ROOT, 0); 1926 if (error) 1927 break; 1928 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1929 error = EINVAL; 1930 break; 1931 } 1932 ifnet_serialize_all(ifp); 1933 ifp->if_ioctl(ifp, cmd, data, cred); 1934 ifnet_deserialize_all(ifp); 1935 break; 1936 1937 case SIOCSIFNAME: 1938 error = priv_check_cred(cred, PRIV_ROOT, 0); 1939 if (error) 1940 break; 1941 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1942 if (error) 1943 break; 1944 if (new_name[0] == '\0') { 1945 error = EINVAL; 1946 break; 1947 } 1948 if (ifunit(new_name) != NULL) { 1949 error = EEXIST; 1950 break; 1951 } 1952 1953 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1954 1955 /* Announce the departure of the interface. */ 1956 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1957 1958 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1959 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1960 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1961 namelen = strlen(new_name); 1962 onamelen = sdl->sdl_nlen; 1963 /* 1964 * Move the address if needed. This is safe because we 1965 * allocate space for a name of length IFNAMSIZ when we 1966 * create this in if_attach(). 1967 */ 1968 if (namelen != onamelen) { 1969 bcopy(sdl->sdl_data + onamelen, 1970 sdl->sdl_data + namelen, sdl->sdl_alen); 1971 } 1972 bcopy(new_name, sdl->sdl_data, namelen); 1973 sdl->sdl_nlen = namelen; 1974 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 1975 bzero(sdl->sdl_data, onamelen); 1976 while (namelen != 0) 1977 sdl->sdl_data[--namelen] = 0xff; 1978 1979 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 1980 1981 /* Announce the return of the interface. */ 1982 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 1983 break; 1984 1985 case SIOCSIFMETRIC: 1986 error = priv_check_cred(cred, PRIV_ROOT, 0); 1987 if (error) 1988 break; 1989 ifp->if_metric = ifr->ifr_metric; 1990 getmicrotime(&ifp->if_lastchange); 1991 break; 1992 1993 case SIOCSIFPHYS: 1994 error = priv_check_cred(cred, PRIV_ROOT, 0); 1995 if (error) 1996 break; 1997 if (ifp->if_ioctl == NULL) { 1998 error = EOPNOTSUPP; 1999 break; 2000 } 2001 ifnet_serialize_all(ifp); 2002 error = ifp->if_ioctl(ifp, cmd, data, cred); 2003 ifnet_deserialize_all(ifp); 2004 if (error == 0) 2005 getmicrotime(&ifp->if_lastchange); 2006 break; 2007 2008 case SIOCSIFMTU: 2009 { 2010 u_long oldmtu = ifp->if_mtu; 2011 2012 error = priv_check_cred(cred, PRIV_ROOT, 0); 2013 if (error) 2014 break; 2015 if (ifp->if_ioctl == NULL) { 2016 error = EOPNOTSUPP; 2017 break; 2018 } 2019 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2020 error = EINVAL; 2021 break; 2022 } 2023 ifnet_serialize_all(ifp); 2024 error = ifp->if_ioctl(ifp, cmd, data, cred); 2025 ifnet_deserialize_all(ifp); 2026 if (error == 0) { 2027 getmicrotime(&ifp->if_lastchange); 2028 rt_ifmsg(ifp); 2029 } 2030 /* 2031 * If the link MTU changed, do network layer specific procedure. 2032 */ 2033 if (ifp->if_mtu != oldmtu) { 2034 #ifdef INET6 2035 nd6_setmtu(ifp); 2036 #endif 2037 } 2038 break; 2039 } 2040 2041 case SIOCSIFTSOLEN: 2042 error = priv_check_cred(cred, PRIV_ROOT, 0); 2043 if (error) 2044 break; 2045 2046 /* XXX need driver supplied upper limit */ 2047 if (ifr->ifr_tsolen <= 0) { 2048 error = EINVAL; 2049 break; 2050 } 2051 ifp->if_tsolen = ifr->ifr_tsolen; 2052 break; 2053 2054 case SIOCADDMULTI: 2055 case SIOCDELMULTI: 2056 error = priv_check_cred(cred, PRIV_ROOT, 0); 2057 if (error) 2058 break; 2059 2060 /* Don't allow group membership on non-multicast interfaces. */ 2061 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2062 error = EOPNOTSUPP; 2063 break; 2064 } 2065 2066 /* Don't let users screw up protocols' entries. */ 2067 if (ifr->ifr_addr.sa_family != AF_LINK) { 2068 error = EINVAL; 2069 break; 2070 } 2071 2072 if (cmd == SIOCADDMULTI) { 2073 struct ifmultiaddr *ifma; 2074 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2075 } else { 2076 error = if_delmulti(ifp, &ifr->ifr_addr); 2077 } 2078 if (error == 0) 2079 getmicrotime(&ifp->if_lastchange); 2080 break; 2081 2082 case SIOCSIFPHYADDR: 2083 case SIOCDIFPHYADDR: 2084 #ifdef INET6 2085 case SIOCSIFPHYADDR_IN6: 2086 #endif 2087 case SIOCSLIFPHYADDR: 2088 case SIOCSIFMEDIA: 2089 case SIOCSIFGENERIC: 2090 error = priv_check_cred(cred, PRIV_ROOT, 0); 2091 if (error) 2092 break; 2093 if (ifp->if_ioctl == 0) { 2094 error = EOPNOTSUPP; 2095 break; 2096 } 2097 ifnet_serialize_all(ifp); 2098 error = ifp->if_ioctl(ifp, cmd, data, cred); 2099 ifnet_deserialize_all(ifp); 2100 if (error == 0) 2101 getmicrotime(&ifp->if_lastchange); 2102 break; 2103 2104 case SIOCGIFSTATUS: 2105 ifs = (struct ifstat *)data; 2106 ifs->ascii[0] = '\0'; 2107 /* fall through */ 2108 case SIOCGIFPSRCADDR: 2109 case SIOCGIFPDSTADDR: 2110 case SIOCGLIFPHYADDR: 2111 case SIOCGIFMEDIA: 2112 case SIOCGIFGENERIC: 2113 if (ifp->if_ioctl == NULL) { 2114 error = EOPNOTSUPP; 2115 break; 2116 } 2117 ifnet_serialize_all(ifp); 2118 error = ifp->if_ioctl(ifp, cmd, data, cred); 2119 ifnet_deserialize_all(ifp); 2120 break; 2121 2122 case SIOCSIFLLADDR: 2123 error = priv_check_cred(cred, PRIV_ROOT, 0); 2124 if (error) 2125 break; 2126 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2127 ifr->ifr_addr.sa_len); 2128 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2129 break; 2130 2131 default: 2132 oif_flags = ifp->if_flags; 2133 if (so->so_proto == 0) { 2134 error = EOPNOTSUPP; 2135 break; 2136 } 2137 error = so_pru_control_direct(so, cmd, data, ifp); 2138 2139 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2140 #ifdef INET6 2141 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2142 if (ifp->if_flags & IFF_UP) { 2143 crit_enter(); 2144 in6_if_up(ifp); 2145 crit_exit(); 2146 } 2147 #endif 2148 } 2149 break; 2150 } 2151 2152 ifnet_unlock(); 2153 return (error); 2154 } 2155 2156 /* 2157 * Set/clear promiscuous mode on interface ifp based on the truth value 2158 * of pswitch. The calls are reference counted so that only the first 2159 * "on" request actually has an effect, as does the final "off" request. 2160 * Results are undefined if the "off" and "on" requests are not matched. 2161 */ 2162 int 2163 ifpromisc(struct ifnet *ifp, int pswitch) 2164 { 2165 struct ifreq ifr; 2166 int error; 2167 int oldflags; 2168 2169 oldflags = ifp->if_flags; 2170 if (ifp->if_flags & IFF_PPROMISC) { 2171 /* Do nothing if device is in permanently promiscuous mode */ 2172 ifp->if_pcount += pswitch ? 1 : -1; 2173 return (0); 2174 } 2175 if (pswitch) { 2176 /* 2177 * If the device is not configured up, we cannot put it in 2178 * promiscuous mode. 2179 */ 2180 if ((ifp->if_flags & IFF_UP) == 0) 2181 return (ENETDOWN); 2182 if (ifp->if_pcount++ != 0) 2183 return (0); 2184 ifp->if_flags |= IFF_PROMISC; 2185 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2186 ifp->if_xname); 2187 } else { 2188 if (--ifp->if_pcount > 0) 2189 return (0); 2190 ifp->if_flags &= ~IFF_PROMISC; 2191 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2192 ifp->if_xname); 2193 } 2194 ifr.ifr_flags = ifp->if_flags; 2195 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2196 ifnet_serialize_all(ifp); 2197 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2198 ifnet_deserialize_all(ifp); 2199 if (error == 0) 2200 rt_ifmsg(ifp); 2201 else 2202 ifp->if_flags = oldflags; 2203 return error; 2204 } 2205 2206 /* 2207 * Return interface configuration 2208 * of system. List may be used 2209 * in later ioctl's (above) to get 2210 * other information. 2211 */ 2212 static int 2213 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2214 { 2215 struct ifconf *ifc = (struct ifconf *)data; 2216 struct ifnet *ifp; 2217 struct sockaddr *sa; 2218 struct ifreq ifr, *ifrp; 2219 int space = ifc->ifc_len, error = 0; 2220 2221 ifrp = ifc->ifc_req; 2222 2223 ifnet_lock(); 2224 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2225 struct ifaddr_container *ifac, *ifac_mark; 2226 struct ifaddr_marker mark; 2227 struct ifaddrhead *head; 2228 int addrs; 2229 2230 if (space <= sizeof ifr) 2231 break; 2232 2233 /* 2234 * Zero the stack declared structure first to prevent 2235 * memory disclosure. 2236 */ 2237 bzero(&ifr, sizeof(ifr)); 2238 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2239 >= sizeof(ifr.ifr_name)) { 2240 error = ENAMETOOLONG; 2241 break; 2242 } 2243 2244 /* 2245 * Add a marker, since copyout() could block and during that 2246 * period the list could be changed. Inserting the marker to 2247 * the header of the list will not cause trouble for the code 2248 * assuming that the first element of the list is AF_LINK; the 2249 * marker will be moved to the next position w/o blocking. 2250 */ 2251 ifa_marker_init(&mark, ifp); 2252 ifac_mark = &mark.ifac; 2253 head = &ifp->if_addrheads[mycpuid]; 2254 2255 addrs = 0; 2256 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2257 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2258 struct ifaddr *ifa = ifac->ifa; 2259 2260 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2261 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2262 2263 /* Ignore marker */ 2264 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2265 continue; 2266 2267 if (space <= sizeof ifr) 2268 break; 2269 sa = ifa->ifa_addr; 2270 if (cred->cr_prison && 2271 prison_if(cred, sa)) 2272 continue; 2273 addrs++; 2274 /* 2275 * Keep a reference on this ifaddr, so that it will 2276 * not be destroyed when its address is copied to 2277 * the userland, which could block. 2278 */ 2279 IFAREF(ifa); 2280 if (sa->sa_len <= sizeof(*sa)) { 2281 ifr.ifr_addr = *sa; 2282 error = copyout(&ifr, ifrp, sizeof ifr); 2283 ifrp++; 2284 } else { 2285 if (space < (sizeof ifr) + sa->sa_len - 2286 sizeof(*sa)) { 2287 IFAFREE(ifa); 2288 break; 2289 } 2290 space -= sa->sa_len - sizeof(*sa); 2291 error = copyout(&ifr, ifrp, 2292 sizeof ifr.ifr_name); 2293 if (error == 0) 2294 error = copyout(sa, &ifrp->ifr_addr, 2295 sa->sa_len); 2296 ifrp = (struct ifreq *) 2297 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2298 } 2299 IFAFREE(ifa); 2300 if (error) 2301 break; 2302 space -= sizeof ifr; 2303 } 2304 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2305 if (error) 2306 break; 2307 if (!addrs) { 2308 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2309 error = copyout(&ifr, ifrp, sizeof ifr); 2310 if (error) 2311 break; 2312 space -= sizeof ifr; 2313 ifrp++; 2314 } 2315 } 2316 ifnet_unlock(); 2317 2318 ifc->ifc_len -= space; 2319 return (error); 2320 } 2321 2322 /* 2323 * Just like if_promisc(), but for all-multicast-reception mode. 2324 */ 2325 int 2326 if_allmulti(struct ifnet *ifp, int onswitch) 2327 { 2328 int error = 0; 2329 struct ifreq ifr; 2330 2331 crit_enter(); 2332 2333 if (onswitch) { 2334 if (ifp->if_amcount++ == 0) { 2335 ifp->if_flags |= IFF_ALLMULTI; 2336 ifr.ifr_flags = ifp->if_flags; 2337 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2338 ifnet_serialize_all(ifp); 2339 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2340 NULL); 2341 ifnet_deserialize_all(ifp); 2342 } 2343 } else { 2344 if (ifp->if_amcount > 1) { 2345 ifp->if_amcount--; 2346 } else { 2347 ifp->if_amcount = 0; 2348 ifp->if_flags &= ~IFF_ALLMULTI; 2349 ifr.ifr_flags = ifp->if_flags; 2350 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2351 ifnet_serialize_all(ifp); 2352 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2353 NULL); 2354 ifnet_deserialize_all(ifp); 2355 } 2356 } 2357 2358 crit_exit(); 2359 2360 if (error == 0) 2361 rt_ifmsg(ifp); 2362 return error; 2363 } 2364 2365 /* 2366 * Add a multicast listenership to the interface in question. 2367 * The link layer provides a routine which converts 2368 */ 2369 int 2370 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2371 struct ifmultiaddr **retifma) 2372 { 2373 struct sockaddr *llsa, *dupsa; 2374 int error; 2375 struct ifmultiaddr *ifma; 2376 2377 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2378 2379 /* 2380 * If the matching multicast address already exists 2381 * then don't add a new one, just add a reference 2382 */ 2383 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2384 if (sa_equal(sa, ifma->ifma_addr)) { 2385 ifma->ifma_refcount++; 2386 if (retifma) 2387 *retifma = ifma; 2388 return 0; 2389 } 2390 } 2391 2392 /* 2393 * Give the link layer a chance to accept/reject it, and also 2394 * find out which AF_LINK address this maps to, if it isn't one 2395 * already. 2396 */ 2397 if (ifp->if_resolvemulti) { 2398 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2399 if (error) 2400 return error; 2401 } else { 2402 llsa = NULL; 2403 } 2404 2405 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2406 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2407 bcopy(sa, dupsa, sa->sa_len); 2408 2409 ifma->ifma_addr = dupsa; 2410 ifma->ifma_lladdr = llsa; 2411 ifma->ifma_ifp = ifp; 2412 ifma->ifma_refcount = 1; 2413 ifma->ifma_protospec = NULL; 2414 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2415 2416 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2417 if (retifma) 2418 *retifma = ifma; 2419 2420 if (llsa != NULL) { 2421 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2422 if (sa_equal(ifma->ifma_addr, llsa)) 2423 break; 2424 } 2425 if (ifma) { 2426 ifma->ifma_refcount++; 2427 } else { 2428 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2429 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2430 bcopy(llsa, dupsa, llsa->sa_len); 2431 ifma->ifma_addr = dupsa; 2432 ifma->ifma_ifp = ifp; 2433 ifma->ifma_refcount = 1; 2434 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2435 } 2436 } 2437 /* 2438 * We are certain we have added something, so call down to the 2439 * interface to let them know about it. 2440 */ 2441 if (ifp->if_ioctl) 2442 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2443 2444 return 0; 2445 } 2446 2447 int 2448 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2449 struct ifmultiaddr **retifma) 2450 { 2451 int error; 2452 2453 ifnet_serialize_all(ifp); 2454 error = if_addmulti_serialized(ifp, sa, retifma); 2455 ifnet_deserialize_all(ifp); 2456 2457 return error; 2458 } 2459 2460 /* 2461 * Remove a reference to a multicast address on this interface. Yell 2462 * if the request does not match an existing membership. 2463 */ 2464 static int 2465 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2466 { 2467 struct ifmultiaddr *ifma; 2468 2469 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2470 2471 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2472 if (sa_equal(sa, ifma->ifma_addr)) 2473 break; 2474 if (ifma == NULL) 2475 return ENOENT; 2476 2477 if (ifma->ifma_refcount > 1) { 2478 ifma->ifma_refcount--; 2479 return 0; 2480 } 2481 2482 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2483 sa = ifma->ifma_lladdr; 2484 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2485 /* 2486 * Make sure the interface driver is notified 2487 * in the case of a link layer mcast group being left. 2488 */ 2489 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2490 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2491 kfree(ifma->ifma_addr, M_IFMADDR); 2492 kfree(ifma, M_IFMADDR); 2493 if (sa == NULL) 2494 return 0; 2495 2496 /* 2497 * Now look for the link-layer address which corresponds to 2498 * this network address. It had been squirreled away in 2499 * ifma->ifma_lladdr for this purpose (so we don't have 2500 * to call ifp->if_resolvemulti() again), and we saved that 2501 * value in sa above. If some nasty deleted the 2502 * link-layer address out from underneath us, we can deal because 2503 * the address we stored was is not the same as the one which was 2504 * in the record for the link-layer address. (So we don't complain 2505 * in that case.) 2506 */ 2507 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2508 if (sa_equal(sa, ifma->ifma_addr)) 2509 break; 2510 if (ifma == NULL) 2511 return 0; 2512 2513 if (ifma->ifma_refcount > 1) { 2514 ifma->ifma_refcount--; 2515 return 0; 2516 } 2517 2518 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2519 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2520 kfree(ifma->ifma_addr, M_IFMADDR); 2521 kfree(sa, M_IFMADDR); 2522 kfree(ifma, M_IFMADDR); 2523 2524 return 0; 2525 } 2526 2527 int 2528 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2529 { 2530 int error; 2531 2532 ifnet_serialize_all(ifp); 2533 error = if_delmulti_serialized(ifp, sa); 2534 ifnet_deserialize_all(ifp); 2535 2536 return error; 2537 } 2538 2539 /* 2540 * Delete all multicast group membership for an interface. 2541 * Should be used to quickly flush all multicast filters. 2542 */ 2543 void 2544 if_delallmulti_serialized(struct ifnet *ifp) 2545 { 2546 struct ifmultiaddr *ifma, mark; 2547 struct sockaddr sa; 2548 2549 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2550 2551 bzero(&sa, sizeof(sa)); 2552 sa.sa_family = AF_UNSPEC; 2553 sa.sa_len = sizeof(sa); 2554 2555 bzero(&mark, sizeof(mark)); 2556 mark.ifma_addr = &sa; 2557 2558 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2559 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2560 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2561 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2562 ifma_link); 2563 2564 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2565 continue; 2566 2567 if_delmulti_serialized(ifp, ifma->ifma_addr); 2568 } 2569 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2570 } 2571 2572 2573 /* 2574 * Set the link layer address on an interface. 2575 * 2576 * At this time we only support certain types of interfaces, 2577 * and we don't allow the length of the address to change. 2578 */ 2579 int 2580 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2581 { 2582 struct sockaddr_dl *sdl; 2583 struct ifreq ifr; 2584 2585 sdl = IF_LLSOCKADDR(ifp); 2586 if (sdl == NULL) 2587 return (EINVAL); 2588 if (len != sdl->sdl_alen) /* don't allow length to change */ 2589 return (EINVAL); 2590 switch (ifp->if_type) { 2591 case IFT_ETHER: /* these types use struct arpcom */ 2592 case IFT_XETHER: 2593 case IFT_L2VLAN: 2594 case IFT_IEEE8023ADLAG: 2595 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2596 bcopy(lladdr, LLADDR(sdl), len); 2597 break; 2598 default: 2599 return (ENODEV); 2600 } 2601 /* 2602 * If the interface is already up, we need 2603 * to re-init it in order to reprogram its 2604 * address filter. 2605 */ 2606 ifnet_serialize_all(ifp); 2607 if ((ifp->if_flags & IFF_UP) != 0) { 2608 #ifdef INET 2609 struct ifaddr_container *ifac; 2610 #endif 2611 2612 ifp->if_flags &= ~IFF_UP; 2613 ifr.ifr_flags = ifp->if_flags; 2614 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2615 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2616 NULL); 2617 ifp->if_flags |= IFF_UP; 2618 ifr.ifr_flags = ifp->if_flags; 2619 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2620 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2621 NULL); 2622 #ifdef INET 2623 /* 2624 * Also send gratuitous ARPs to notify other nodes about 2625 * the address change. 2626 */ 2627 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2628 struct ifaddr *ifa = ifac->ifa; 2629 2630 if (ifa->ifa_addr != NULL && 2631 ifa->ifa_addr->sa_family == AF_INET) 2632 arp_gratuitous(ifp, ifa); 2633 } 2634 #endif 2635 } 2636 ifnet_deserialize_all(ifp); 2637 return (0); 2638 } 2639 2640 struct ifmultiaddr * 2641 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2642 { 2643 struct ifmultiaddr *ifma; 2644 2645 /* TODO: need ifnet_serialize_main */ 2646 ifnet_serialize_all(ifp); 2647 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2648 if (sa_equal(ifma->ifma_addr, sa)) 2649 break; 2650 ifnet_deserialize_all(ifp); 2651 2652 return ifma; 2653 } 2654 2655 /* 2656 * This function locates the first real ethernet MAC from a network 2657 * card and loads it into node, returning 0 on success or ENOENT if 2658 * no suitable interfaces were found. It is used by the uuid code to 2659 * generate a unique 6-byte number. 2660 */ 2661 int 2662 if_getanyethermac(uint16_t *node, int minlen) 2663 { 2664 struct ifnet *ifp; 2665 struct sockaddr_dl *sdl; 2666 2667 ifnet_lock(); 2668 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2669 if (ifp->if_type != IFT_ETHER) 2670 continue; 2671 sdl = IF_LLSOCKADDR(ifp); 2672 if (sdl->sdl_alen < minlen) 2673 continue; 2674 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2675 minlen); 2676 ifnet_unlock(); 2677 return(0); 2678 } 2679 ifnet_unlock(); 2680 return (ENOENT); 2681 } 2682 2683 /* 2684 * The name argument must be a pointer to storage which will last as 2685 * long as the interface does. For physical devices, the result of 2686 * device_get_name(dev) is a good choice and for pseudo-devices a 2687 * static string works well. 2688 */ 2689 void 2690 if_initname(struct ifnet *ifp, const char *name, int unit) 2691 { 2692 ifp->if_dname = name; 2693 ifp->if_dunit = unit; 2694 if (unit != IF_DUNIT_NONE) 2695 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2696 else 2697 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2698 } 2699 2700 int 2701 if_printf(struct ifnet *ifp, const char *fmt, ...) 2702 { 2703 __va_list ap; 2704 int retval; 2705 2706 retval = kprintf("%s: ", ifp->if_xname); 2707 __va_start(ap, fmt); 2708 retval += kvprintf(fmt, ap); 2709 __va_end(ap); 2710 return (retval); 2711 } 2712 2713 struct ifnet * 2714 if_alloc(uint8_t type) 2715 { 2716 struct ifnet *ifp; 2717 size_t size; 2718 2719 /* 2720 * XXX temporary hack until arpcom is setup in if_l2com 2721 */ 2722 if (type == IFT_ETHER) 2723 size = sizeof(struct arpcom); 2724 else 2725 size = sizeof(struct ifnet); 2726 2727 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2728 2729 ifp->if_type = type; 2730 2731 if (if_com_alloc[type] != NULL) { 2732 ifp->if_l2com = if_com_alloc[type](type, ifp); 2733 if (ifp->if_l2com == NULL) { 2734 kfree(ifp, M_IFNET); 2735 return (NULL); 2736 } 2737 } 2738 return (ifp); 2739 } 2740 2741 void 2742 if_free(struct ifnet *ifp) 2743 { 2744 kfree(ifp, M_IFNET); 2745 } 2746 2747 void 2748 ifq_set_classic(struct ifaltq *ifq) 2749 { 2750 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2751 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2752 } 2753 2754 void 2755 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2756 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2757 { 2758 int q; 2759 2760 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2761 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2762 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2763 KASSERT(request != NULL, ("request is not specified")); 2764 2765 ifq->altq_mapsubq = mapsubq; 2766 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2767 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2768 2769 ifsq->ifsq_enqueue = enqueue; 2770 ifsq->ifsq_dequeue = dequeue; 2771 ifsq->ifsq_request = request; 2772 } 2773 } 2774 2775 static void 2776 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2777 { 2778 2779 classq_add(&ifsq->ifsq_norm, m); 2780 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2781 } 2782 2783 static void 2784 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2785 { 2786 2787 classq_add(&ifsq->ifsq_prio, m); 2788 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2789 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2790 } 2791 2792 static struct mbuf * 2793 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2794 { 2795 struct mbuf *m; 2796 2797 m = classq_get(&ifsq->ifsq_norm); 2798 if (m != NULL) 2799 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2800 return (m); 2801 } 2802 2803 static struct mbuf * 2804 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2805 { 2806 struct mbuf *m; 2807 2808 m = classq_get(&ifsq->ifsq_prio); 2809 if (m != NULL) { 2810 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2811 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2812 } 2813 return (m); 2814 } 2815 2816 int 2817 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2818 struct altq_pktattr *pa __unused) 2819 { 2820 2821 M_ASSERTPKTHDR(m); 2822 again: 2823 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2824 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2825 struct mbuf *m_drop; 2826 2827 if (m->m_flags & M_PRIO) { 2828 m_drop = NULL; 2829 if (ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen >> 1) && 2830 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt >> 1)) { 2831 /* Try dropping some from normal queue. */ 2832 m_drop = ifsq_norm_dequeue(ifsq); 2833 } 2834 if (m_drop == NULL) 2835 m_drop = ifsq_prio_dequeue(ifsq); 2836 } else { 2837 m_drop = ifsq_norm_dequeue(ifsq); 2838 } 2839 if (m_drop != NULL) { 2840 IFNET_STAT_INC(ifsq->ifsq_ifp, oqdrops, 1); 2841 m_freem(m_drop); 2842 goto again; 2843 } 2844 /* 2845 * No old packets could be dropped! 2846 * NOTE: Caller increases oqdrops. 2847 */ 2848 m_freem(m); 2849 return (ENOBUFS); 2850 } else { 2851 if (m->m_flags & M_PRIO) 2852 ifsq_prio_enqueue(ifsq, m); 2853 else 2854 ifsq_norm_enqueue(ifsq, m); 2855 return (0); 2856 } 2857 } 2858 2859 struct mbuf * 2860 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2861 { 2862 struct mbuf *m; 2863 2864 switch (op) { 2865 case ALTDQ_POLL: 2866 m = classq_head(&ifsq->ifsq_prio); 2867 if (m == NULL) 2868 m = classq_head(&ifsq->ifsq_norm); 2869 break; 2870 2871 case ALTDQ_REMOVE: 2872 m = ifsq_prio_dequeue(ifsq); 2873 if (m == NULL) 2874 m = ifsq_norm_dequeue(ifsq); 2875 break; 2876 2877 default: 2878 panic("unsupported ALTQ dequeue op: %d", op); 2879 } 2880 return m; 2881 } 2882 2883 int 2884 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2885 { 2886 switch (req) { 2887 case ALTRQ_PURGE: 2888 for (;;) { 2889 struct mbuf *m; 2890 2891 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2892 if (m == NULL) 2893 break; 2894 m_freem(m); 2895 } 2896 break; 2897 2898 default: 2899 panic("unsupported ALTQ request: %d", req); 2900 } 2901 return 0; 2902 } 2903 2904 static void 2905 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2906 { 2907 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2908 int running = 0, need_sched; 2909 2910 /* 2911 * Try to do direct ifnet.if_start on the subqueue first, if there is 2912 * contention on the subqueue hardware serializer, ifnet.if_start on 2913 * the subqueue will be scheduled on the subqueue owner CPU. 2914 */ 2915 if (!ifsq_tryserialize_hw(ifsq)) { 2916 /* 2917 * Subqueue hardware serializer contention happened, 2918 * ifnet.if_start on the subqueue is scheduled on 2919 * the subqueue owner CPU, and we keep going. 2920 */ 2921 ifsq_ifstart_schedule(ifsq, 1); 2922 return; 2923 } 2924 2925 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2926 ifp->if_start(ifp, ifsq); 2927 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2928 running = 1; 2929 } 2930 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2931 2932 ifsq_deserialize_hw(ifsq); 2933 2934 if (need_sched) { 2935 /* 2936 * More data need to be transmitted, ifnet.if_start on the 2937 * subqueue is scheduled on the subqueue owner CPU, and we 2938 * keep going. 2939 * NOTE: ifnet.if_start subqueue interlock is not released. 2940 */ 2941 ifsq_ifstart_schedule(ifsq, force_sched); 2942 } 2943 } 2944 2945 /* 2946 * Subqeue packets staging mechanism: 2947 * 2948 * The packets enqueued into the subqueue are staged to a certain amount 2949 * before the ifnet.if_start on the subqueue is called. In this way, the 2950 * driver could avoid writing to hardware registers upon every packet, 2951 * instead, hardware registers could be written when certain amount of 2952 * packets are put onto hardware TX ring. The measurement on several modern 2953 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 2954 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 2955 * datagrams are transmitted at 1.48Mpps. The performance improvement by 2956 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 2957 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 2958 * 2959 * Subqueue packets staging is performed for two entry points into drivers' 2960 * transmission function: 2961 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 2962 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 2963 * 2964 * Subqueue packets staging will be stopped upon any of the following 2965 * conditions: 2966 * - If the count of packets enqueued on the current CPU is great than or 2967 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 2968 * - If the total length of packets enqueued on the current CPU is great 2969 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 2970 * cut from the hardware's MTU mainly bacause a full TCP segment's size 2971 * is usually less than hardware's MTU. 2972 * - ifsq_ifstart_schedule() is not pending on the current CPU and 2973 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 2974 * released. 2975 * - The if_start_rollup(), which is registered as low priority netisr 2976 * rollup function, is called; probably because no more work is pending 2977 * for netisr. 2978 * 2979 * NOTE: 2980 * Currently subqueue packet staging is only performed in netisr threads. 2981 */ 2982 int 2983 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 2984 { 2985 struct ifaltq *ifq = &ifp->if_snd; 2986 struct ifaltq_subque *ifsq; 2987 int error, start = 0, len, mcast = 0, avoid_start = 0; 2988 struct ifsubq_stage_head *head = NULL; 2989 struct ifsubq_stage *stage = NULL; 2990 struct globaldata *gd = mycpu; 2991 struct thread *td = gd->gd_curthread; 2992 2993 crit_enter_quick(td); 2994 2995 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 2996 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 2997 2998 len = m->m_pkthdr.len; 2999 if (m->m_flags & M_MCAST) 3000 mcast = 1; 3001 3002 if (td->td_type == TD_TYPE_NETISR) { 3003 head = &ifsubq_stage_heads[mycpuid]; 3004 stage = ifsq_get_stage(ifsq, mycpuid); 3005 3006 stage->stg_cnt++; 3007 stage->stg_len += len; 3008 if (stage->stg_cnt < ifsq_stage_cntmax && 3009 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3010 avoid_start = 1; 3011 } 3012 3013 ALTQ_SQ_LOCK(ifsq); 3014 error = ifsq_enqueue_locked(ifsq, m, pa); 3015 if (error) { 3016 IFNET_STAT_INC(ifp, oqdrops, 1); 3017 if (!ifsq_data_ready(ifsq)) { 3018 ALTQ_SQ_UNLOCK(ifsq); 3019 crit_exit_quick(td); 3020 return error; 3021 } 3022 avoid_start = 0; 3023 } 3024 if (!ifsq_is_started(ifsq)) { 3025 if (avoid_start) { 3026 ALTQ_SQ_UNLOCK(ifsq); 3027 3028 KKASSERT(!error); 3029 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3030 ifsq_stage_insert(head, stage); 3031 3032 IFNET_STAT_INC(ifp, obytes, len); 3033 if (mcast) 3034 IFNET_STAT_INC(ifp, omcasts, 1); 3035 crit_exit_quick(td); 3036 return error; 3037 } 3038 3039 /* 3040 * Hold the subqueue interlock of ifnet.if_start 3041 */ 3042 ifsq_set_started(ifsq); 3043 start = 1; 3044 } 3045 ALTQ_SQ_UNLOCK(ifsq); 3046 3047 if (!error) { 3048 IFNET_STAT_INC(ifp, obytes, len); 3049 if (mcast) 3050 IFNET_STAT_INC(ifp, omcasts, 1); 3051 } 3052 3053 if (stage != NULL) { 3054 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3055 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3056 if (!avoid_start) { 3057 ifsq_stage_remove(head, stage); 3058 ifsq_ifstart_schedule(ifsq, 1); 3059 } 3060 crit_exit_quick(td); 3061 return error; 3062 } 3063 3064 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3065 ifsq_stage_remove(head, stage); 3066 } else { 3067 stage->stg_cnt = 0; 3068 stage->stg_len = 0; 3069 } 3070 } 3071 3072 if (!start) { 3073 crit_exit_quick(td); 3074 return error; 3075 } 3076 3077 ifsq_ifstart_try(ifsq, 0); 3078 3079 crit_exit_quick(td); 3080 return error; 3081 } 3082 3083 void * 3084 ifa_create(int size) 3085 { 3086 struct ifaddr *ifa; 3087 int i; 3088 3089 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3090 3091 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3092 ifa->ifa_containers = 3093 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3094 M_IFADDR, M_INTWAIT | M_ZERO); 3095 3096 ifa->ifa_ncnt = ncpus; 3097 for (i = 0; i < ncpus; ++i) { 3098 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3099 3100 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3101 ifac->ifa = ifa; 3102 ifac->ifa_refcnt = 1; 3103 } 3104 #ifdef IFADDR_DEBUG 3105 kprintf("alloc ifa %p %d\n", ifa, size); 3106 #endif 3107 return ifa; 3108 } 3109 3110 void 3111 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3112 { 3113 struct ifaddr *ifa = ifac->ifa; 3114 3115 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3116 KKASSERT(ifac->ifa_refcnt == 0); 3117 KASSERT(ifac->ifa_listmask == 0, 3118 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3119 3120 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3121 3122 #ifdef IFADDR_DEBUG_VERBOSE 3123 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3124 #endif 3125 3126 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3127 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3128 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3129 #ifdef IFADDR_DEBUG 3130 kprintf("free ifa %p\n", ifa); 3131 #endif 3132 kfree(ifa->ifa_containers, M_IFADDR); 3133 kfree(ifa, M_IFADDR); 3134 } 3135 } 3136 3137 static void 3138 ifa_iflink_dispatch(netmsg_t nmsg) 3139 { 3140 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3141 struct ifaddr *ifa = msg->ifa; 3142 struct ifnet *ifp = msg->ifp; 3143 int cpu = mycpuid; 3144 struct ifaddr_container *ifac; 3145 3146 crit_enter(); 3147 3148 ifac = &ifa->ifa_containers[cpu]; 3149 ASSERT_IFAC_VALID(ifac); 3150 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3151 ("ifaddr is on if_addrheads")); 3152 3153 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3154 if (msg->tail) 3155 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3156 else 3157 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3158 3159 crit_exit(); 3160 3161 netisr_forwardmsg(&nmsg->base, cpu + 1); 3162 } 3163 3164 void 3165 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3166 { 3167 struct netmsg_ifaddr msg; 3168 3169 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3170 0, ifa_iflink_dispatch); 3171 msg.ifa = ifa; 3172 msg.ifp = ifp; 3173 msg.tail = tail; 3174 3175 netisr_domsg(&msg.base, 0); 3176 } 3177 3178 static void 3179 ifa_ifunlink_dispatch(netmsg_t nmsg) 3180 { 3181 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3182 struct ifaddr *ifa = msg->ifa; 3183 struct ifnet *ifp = msg->ifp; 3184 int cpu = mycpuid; 3185 struct ifaddr_container *ifac; 3186 3187 crit_enter(); 3188 3189 ifac = &ifa->ifa_containers[cpu]; 3190 ASSERT_IFAC_VALID(ifac); 3191 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3192 ("ifaddr is not on if_addrhead")); 3193 3194 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3195 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3196 3197 crit_exit(); 3198 3199 netisr_forwardmsg(&nmsg->base, cpu + 1); 3200 } 3201 3202 void 3203 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3204 { 3205 struct netmsg_ifaddr msg; 3206 3207 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3208 0, ifa_ifunlink_dispatch); 3209 msg.ifa = ifa; 3210 msg.ifp = ifp; 3211 3212 netisr_domsg(&msg.base, 0); 3213 } 3214 3215 static void 3216 ifa_destroy_dispatch(netmsg_t nmsg) 3217 { 3218 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3219 3220 IFAFREE(msg->ifa); 3221 netisr_forwardmsg(&nmsg->base, mycpuid + 1); 3222 } 3223 3224 void 3225 ifa_destroy(struct ifaddr *ifa) 3226 { 3227 struct netmsg_ifaddr msg; 3228 3229 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3230 0, ifa_destroy_dispatch); 3231 msg.ifa = ifa; 3232 3233 netisr_domsg(&msg.base, 0); 3234 } 3235 3236 static void 3237 if_start_rollup(void) 3238 { 3239 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3240 struct ifsubq_stage *stage; 3241 3242 crit_enter(); 3243 3244 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3245 struct ifaltq_subque *ifsq = stage->stg_subq; 3246 int is_sched = 0; 3247 3248 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3249 is_sched = 1; 3250 ifsq_stage_remove(head, stage); 3251 3252 if (is_sched) { 3253 ifsq_ifstart_schedule(ifsq, 1); 3254 } else { 3255 int start = 0; 3256 3257 ALTQ_SQ_LOCK(ifsq); 3258 if (!ifsq_is_started(ifsq)) { 3259 /* 3260 * Hold the subqueue interlock of 3261 * ifnet.if_start 3262 */ 3263 ifsq_set_started(ifsq); 3264 start = 1; 3265 } 3266 ALTQ_SQ_UNLOCK(ifsq); 3267 3268 if (start) 3269 ifsq_ifstart_try(ifsq, 1); 3270 } 3271 KKASSERT((stage->stg_flags & 3272 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3273 } 3274 3275 crit_exit(); 3276 } 3277 3278 static void 3279 ifnetinit(void *dummy __unused) 3280 { 3281 int i; 3282 3283 for (i = 0; i < ncpus; ++i) 3284 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3285 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3286 } 3287 3288 void 3289 if_register_com_alloc(u_char type, 3290 if_com_alloc_t *a, if_com_free_t *f) 3291 { 3292 3293 KASSERT(if_com_alloc[type] == NULL, 3294 ("if_register_com_alloc: %d already registered", type)); 3295 KASSERT(if_com_free[type] == NULL, 3296 ("if_register_com_alloc: %d free already registered", type)); 3297 3298 if_com_alloc[type] = a; 3299 if_com_free[type] = f; 3300 } 3301 3302 void 3303 if_deregister_com_alloc(u_char type) 3304 { 3305 3306 KASSERT(if_com_alloc[type] != NULL, 3307 ("if_deregister_com_alloc: %d not registered", type)); 3308 KASSERT(if_com_free[type] != NULL, 3309 ("if_deregister_com_alloc: %d free not registered", type)); 3310 if_com_alloc[type] = NULL; 3311 if_com_free[type] = NULL; 3312 } 3313 3314 void 3315 ifq_set_maxlen(struct ifaltq *ifq, int len) 3316 { 3317 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3318 } 3319 3320 int 3321 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3322 { 3323 return ALTQ_SUBQ_INDEX_DEFAULT; 3324 } 3325 3326 int 3327 ifq_mapsubq_mask(struct ifaltq *ifq, int cpuid) 3328 { 3329 3330 return (cpuid & ifq->altq_subq_mappriv); 3331 } 3332 3333 int 3334 ifq_mapsubq_modulo(struct ifaltq *ifq, int cpuid) 3335 { 3336 3337 return (cpuid % ifq->altq_subq_mappriv); 3338 } 3339 3340 static void 3341 ifsq_watchdog(void *arg) 3342 { 3343 struct ifsubq_watchdog *wd = arg; 3344 struct ifnet *ifp; 3345 3346 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3347 goto done; 3348 3349 ifp = ifsq_get_ifp(wd->wd_subq); 3350 if (ifnet_tryserialize_all(ifp)) { 3351 wd->wd_watchdog(wd->wd_subq); 3352 ifnet_deserialize_all(ifp); 3353 } else { 3354 /* try again next timeout */ 3355 wd->wd_timer = 1; 3356 } 3357 done: 3358 ifsq_watchdog_reset(wd); 3359 } 3360 3361 static void 3362 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3363 { 3364 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3365 ifsq_get_cpuid(wd->wd_subq)); 3366 } 3367 3368 void 3369 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3370 ifsq_watchdog_t watchdog) 3371 { 3372 callout_init_mp(&wd->wd_callout); 3373 wd->wd_timer = 0; 3374 wd->wd_subq = ifsq; 3375 wd->wd_watchdog = watchdog; 3376 } 3377 3378 void 3379 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3380 { 3381 wd->wd_timer = 0; 3382 ifsq_watchdog_reset(wd); 3383 } 3384 3385 void 3386 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3387 { 3388 wd->wd_timer = 0; 3389 callout_stop(&wd->wd_callout); 3390 } 3391 3392 void 3393 ifnet_lock(void) 3394 { 3395 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3396 ("try holding ifnet lock in netisr")); 3397 mtx_lock(&ifnet_mtx); 3398 } 3399 3400 void 3401 ifnet_unlock(void) 3402 { 3403 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3404 ("try holding ifnet lock in netisr")); 3405 mtx_unlock(&ifnet_mtx); 3406 } 3407 3408 static struct ifnet_array * 3409 ifnet_array_alloc(int count) 3410 { 3411 struct ifnet_array *arr; 3412 3413 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3414 M_IFNET, M_WAITOK); 3415 arr->ifnet_count = count; 3416 3417 return arr; 3418 } 3419 3420 static void 3421 ifnet_array_free(struct ifnet_array *arr) 3422 { 3423 if (arr == &ifnet_array0) 3424 return; 3425 kfree(arr, M_IFNET); 3426 } 3427 3428 static struct ifnet_array * 3429 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3430 { 3431 struct ifnet_array *arr; 3432 int count, i; 3433 3434 KASSERT(old_arr->ifnet_count >= 0, 3435 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3436 count = old_arr->ifnet_count + 1; 3437 arr = ifnet_array_alloc(count); 3438 3439 /* 3440 * Save the old ifnet array and append this ifp to the end of 3441 * the new ifnet array. 3442 */ 3443 for (i = 0; i < old_arr->ifnet_count; ++i) { 3444 KASSERT(old_arr->ifnet_arr[i] != ifp, 3445 ("%s is already in ifnet array", ifp->if_xname)); 3446 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3447 } 3448 KASSERT(i == count - 1, 3449 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3450 ifp->if_xname, count - 1, i)); 3451 arr->ifnet_arr[i] = ifp; 3452 3453 return arr; 3454 } 3455 3456 static struct ifnet_array * 3457 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3458 { 3459 struct ifnet_array *arr; 3460 int count, i, idx, found = 0; 3461 3462 KASSERT(old_arr->ifnet_count > 0, 3463 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3464 count = old_arr->ifnet_count - 1; 3465 arr = ifnet_array_alloc(count); 3466 3467 /* 3468 * Save the old ifnet array, but skip this ifp. 3469 */ 3470 idx = 0; 3471 for (i = 0; i < old_arr->ifnet_count; ++i) { 3472 if (old_arr->ifnet_arr[i] == ifp) { 3473 KASSERT(!found, 3474 ("dup %s is in ifnet array", ifp->if_xname)); 3475 found = 1; 3476 continue; 3477 } 3478 KASSERT(idx < count, 3479 ("invalid ifnet array index %d, count %d", idx, count)); 3480 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3481 ++idx; 3482 } 3483 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3484 KASSERT(idx == count, 3485 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3486 ifp->if_xname, count, idx)); 3487 3488 return arr; 3489 } 3490 3491 const struct ifnet_array * 3492 ifnet_array_get(void) 3493 { 3494 const struct ifnet_array *ret; 3495 3496 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3497 ret = ifnet_array; 3498 /* Make sure 'ret' is really used. */ 3499 cpu_ccfence(); 3500 return (ret); 3501 } 3502 3503 int 3504 ifnet_array_isempty(void) 3505 { 3506 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3507 if (ifnet_array->ifnet_count == 0) 3508 return 1; 3509 else 3510 return 0; 3511 } 3512 3513 void 3514 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3515 { 3516 struct ifaddr *ifa; 3517 3518 memset(mark, 0, sizeof(*mark)); 3519 ifa = &mark->ifa; 3520 3521 mark->ifac.ifa = ifa; 3522 3523 ifa->ifa_addr = &mark->addr; 3524 ifa->ifa_dstaddr = &mark->dstaddr; 3525 ifa->ifa_netmask = &mark->netmask; 3526 ifa->ifa_ifp = ifp; 3527 } 3528 3529 static int 3530 if_ringcnt_fixup(int ring_cnt, int ring_cntmax) 3531 { 3532 3533 KASSERT(ring_cntmax > 0, ("invalid ring count max %d", ring_cntmax)); 3534 if (ring_cnt == 1 || ring_cntmax == 1 || netisr_ncpus == 1) 3535 return (1); 3536 3537 if (ring_cnt <= 0 || ring_cnt > ring_cntmax) 3538 ring_cnt = ring_cntmax; 3539 if (ring_cnt > netisr_ncpus) 3540 ring_cnt = netisr_ncpus; 3541 return (ring_cnt); 3542 } 3543 3544 static void 3545 if_ringmap_set_grid(device_t dev, struct if_ringmap *rm, int grid) 3546 { 3547 int i, offset; 3548 3549 KASSERT(grid > 0, ("invalid if_ringmap grid %d", grid)); 3550 rm->rm_grid = grid; 3551 3552 offset = (rm->rm_grid * device_get_unit(dev)) % netisr_ncpus; 3553 for (i = 0; i < rm->rm_cnt; ++i) { 3554 rm->rm_cpumap[i] = offset + i; 3555 KASSERT(rm->rm_cpumap[i] < netisr_ncpus, 3556 ("invalid cpumap[%d] = %d, offset %d", i, 3557 rm->rm_cpumap[i], offset)); 3558 } 3559 } 3560 3561 struct if_ringmap * 3562 if_ringmap_alloc(device_t dev, int ring_cnt, int ring_cntmax) 3563 { 3564 struct if_ringmap *rm; 3565 int i, grid = 0; 3566 3567 ring_cnt = if_ringcnt_fixup(ring_cnt, ring_cntmax); 3568 rm = kmalloc(__offsetof(struct if_ringmap, rm_cpumap[ring_cnt]), 3569 M_DEVBUF, M_WAITOK | M_ZERO); 3570 3571 rm->rm_cnt = ring_cnt; 3572 for (i = 0; i < netisr_ncpus; ++i) { 3573 if (netisr_ncpus % (i + 1) != 0) 3574 continue; 3575 3576 if (rm->rm_cnt > netisr_ncpus / (i + 2)) { 3577 grid = netisr_ncpus / (i + 1); 3578 if (rm->rm_cnt > grid) 3579 rm->rm_cnt = grid; 3580 break; 3581 } 3582 } 3583 if_ringmap_set_grid(dev, rm, grid); 3584 3585 return (rm); 3586 } 3587 3588 void 3589 if_ringmap_free(struct if_ringmap *rm) 3590 { 3591 3592 kfree(rm, M_DEVBUF); 3593 } 3594 3595 /* 3596 * Align the two ringmaps. 3597 * 3598 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3599 * 3600 * Before: 3601 * 3602 * CPU 0 1 2 3 4 5 6 7 3603 * NIC_RX n0 n1 n2 n3 3604 * NIC_TX N0 N1 3605 * 3606 * After: 3607 * 3608 * CPU 0 1 2 3 4 5 6 7 3609 * NIC_RX n0 n1 n2 n3 3610 * NIC_TX N0 N1 3611 */ 3612 void 3613 if_ringmap_align(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3614 { 3615 3616 if (rm0->rm_grid > rm1->rm_grid) 3617 if_ringmap_set_grid(dev, rm1, rm0->rm_grid); 3618 else if (rm0->rm_grid < rm1->rm_grid) 3619 if_ringmap_set_grid(dev, rm0, rm1->rm_grid); 3620 } 3621 3622 void 3623 if_ringmap_match(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3624 { 3625 int subset_grid, cnt, divisor, mod, offset, i; 3626 struct if_ringmap *subset_rm, *rm; 3627 int old_rm0_grid, old_rm1_grid; 3628 3629 if (rm0->rm_grid == rm1->rm_grid) 3630 return; 3631 3632 /* Save grid for later use */ 3633 old_rm0_grid = rm0->rm_grid; 3634 old_rm1_grid = rm1->rm_grid; 3635 3636 if_ringmap_align(dev, rm0, rm1); 3637 3638 /* 3639 * Re-shuffle rings to get more even distribution. 3640 * 3641 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3642 * 3643 * CPU 0 1 2 3 4 5 6 7 8 9 10 11 3644 * 3645 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 3646 * NIC_TX A0 A1 B0 B1 C0 C1 3647 * 3648 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3 3649 * NIC_TX D0 D1 E0 E1 F0 F1 3650 */ 3651 3652 if (rm0->rm_cnt >= (2 * old_rm1_grid)) { 3653 cnt = rm0->rm_cnt; 3654 subset_grid = old_rm1_grid; 3655 subset_rm = rm1; 3656 rm = rm0; 3657 } else if (rm1->rm_cnt > (2 * old_rm0_grid)) { 3658 cnt = rm1->rm_cnt; 3659 subset_grid = old_rm0_grid; 3660 subset_rm = rm0; 3661 rm = rm1; 3662 } else { 3663 /* No space to shuffle. */ 3664 return; 3665 } 3666 3667 mod = cnt / subset_grid; 3668 KKASSERT(mod >= 2); 3669 divisor = netisr_ncpus / rm->rm_grid; 3670 offset = ((device_get_unit(dev) / divisor) % mod) * subset_grid; 3671 3672 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3673 subset_rm->rm_cpumap[i] += offset; 3674 KASSERT(subset_rm->rm_cpumap[i] < netisr_ncpus, 3675 ("match: invalid cpumap[%d] = %d, offset %d", 3676 i, subset_rm->rm_cpumap[i], offset)); 3677 } 3678 #ifdef INVARIANTS 3679 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3680 int j; 3681 3682 for (j = 0; j < rm->rm_cnt; ++j) { 3683 if (rm->rm_cpumap[j] == subset_rm->rm_cpumap[i]) 3684 break; 3685 } 3686 KASSERT(j < rm->rm_cnt, 3687 ("subset cpumap[%d] = %d not found in superset", 3688 i, subset_rm->rm_cpumap[i])); 3689 } 3690 #endif 3691 } 3692 3693 int 3694 if_ringmap_count(const struct if_ringmap *rm) 3695 { 3696 3697 return (rm->rm_cnt); 3698 } 3699 3700 int 3701 if_ringmap_cpumap(const struct if_ringmap *rm, int ring) 3702 { 3703 3704 KASSERT(ring >= 0 && ring < rm->rm_cnt, ("invalid ring %d", ring)); 3705 return (rm->rm_cpumap[ring]); 3706 } 3707 3708 void 3709 if_ringmap_rdrtable(const struct if_ringmap *rm, int table[], int table_nent) 3710 { 3711 int i, grid_idx, grid_cnt, patch_off, patch_cnt, ncopy; 3712 3713 KASSERT(table_nent > 0 && (table_nent & NETISR_CPUMASK) == 0, 3714 ("invalid redirect table entries %d", table_nent)); 3715 3716 grid_idx = 0; 3717 for (i = 0; i < NETISR_CPUMAX; ++i) { 3718 table[i] = grid_idx++ % rm->rm_cnt; 3719 3720 if (grid_idx == rm->rm_grid) 3721 grid_idx = 0; 3722 } 3723 3724 /* 3725 * Make the ring distributed more evenly for the remainder 3726 * of each grid. 3727 * 3728 * e.g. 12 netisrs, rm contains 8 rings. 3729 * 3730 * Redirect table before: 3731 * 3732 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3 3733 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7 3734 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3 3735 * .... 3736 * 3737 * Redirect table after being patched (pX, patched entries): 3738 * 3739 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3 3740 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7 3741 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7 3742 * .... 3743 */ 3744 patch_cnt = rm->rm_grid % rm->rm_cnt; 3745 if (patch_cnt == 0) 3746 goto done; 3747 patch_off = rm->rm_grid - (rm->rm_grid % rm->rm_cnt); 3748 3749 grid_cnt = roundup(NETISR_CPUMAX, rm->rm_grid) / rm->rm_grid; 3750 grid_idx = 0; 3751 for (i = 0; i < grid_cnt; ++i) { 3752 int j; 3753 3754 for (j = 0; j < patch_cnt; ++j) { 3755 int fix_idx; 3756 3757 fix_idx = (i * rm->rm_grid) + patch_off + j; 3758 if (fix_idx >= NETISR_CPUMAX) 3759 goto done; 3760 table[fix_idx] = grid_idx++ % rm->rm_cnt; 3761 } 3762 } 3763 done: 3764 /* 3765 * If the device supports larger redirect table, duplicate 3766 * the first NETISR_CPUMAX entries to the rest of the table, 3767 * so that it matches upper layer's expectation: 3768 * (hash & NETISR_CPUMASK) % netisr_ncpus 3769 */ 3770 ncopy = table_nent / NETISR_CPUMAX; 3771 for (i = 1; i < ncopy; ++i) { 3772 memcpy(&table[i * NETISR_CPUMAX], table, 3773 NETISR_CPUMAX * sizeof(table[0])); 3774 } 3775 if (if_ringmap_dumprdr) { 3776 for (i = 0; i < table_nent; ++i) { 3777 if (i != 0 && i % 16 == 0) 3778 kprintf("\n"); 3779 kprintf("%03d ", table[i]); 3780 } 3781 kprintf("\n"); 3782 } 3783 } 3784 3785 int 3786 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS) 3787 { 3788 struct if_ringmap *rm = arg1; 3789 int i, error = 0; 3790 3791 for (i = 0; i < rm->rm_cnt; ++i) { 3792 int cpu = rm->rm_cpumap[i]; 3793 3794 error = SYSCTL_OUT(req, &cpu, sizeof(cpu)); 3795 if (error) 3796 break; 3797 } 3798 return (error); 3799 } 3800 3801 int 3802 if_ring_count2(int ring_cnt, int ring_cntmax) 3803 { 3804 3805 ring_cnt = if_ringcnt_fixup(ring_cnt, ring_cntmax); 3806 return (1 << (fls(ring_cnt) - 1)); 3807 } 3808