1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 #include "opt_ifpoll.h" 36 37 #include <sys/param.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/socketops.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/mutex.h> 50 #include <sys/sockio.h> 51 #include <sys/syslog.h> 52 #include <sys/sysctl.h> 53 #include <sys/domain.h> 54 #include <sys/thread.h> 55 #include <sys/serialize.h> 56 #include <sys/bus.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/mutex2.h> 61 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_var.h> 67 #include <net/if_ringmap.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 struct netmsg_ifaddr { 91 struct netmsg_base base; 92 struct ifaddr *ifa; 93 struct ifnet *ifp; 94 int tail; 95 }; 96 97 struct ifsubq_stage_head { 98 TAILQ_HEAD(, ifsubq_stage) stg_head; 99 } __cachealign; 100 101 struct if_ringmap { 102 int rm_cnt; 103 int rm_grid; 104 int rm_cpumap[]; 105 }; 106 107 #define RINGMAP_FLAG_NONE 0x0 108 #define RINGMAP_FLAG_POWEROF2 0x1 109 110 /* 111 * System initialization 112 */ 113 static void if_attachdomain(void *); 114 static void if_attachdomain1(struct ifnet *); 115 static int ifconf(u_long, caddr_t, struct ucred *); 116 static void ifinit(void *); 117 static void ifnetinit(void *); 118 static void if_slowtimo(void *); 119 static void link_rtrequest(int, struct rtentry *); 120 static int if_rtdel(struct radix_node *, void *); 121 static void if_slowtimo_dispatch(netmsg_t); 122 123 /* Helper functions */ 124 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 125 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 126 static struct ifnet_array *ifnet_array_alloc(int); 127 static void ifnet_array_free(struct ifnet_array *); 128 static struct ifnet_array *ifnet_array_add(struct ifnet *, 129 const struct ifnet_array *); 130 static struct ifnet_array *ifnet_array_del(struct ifnet *, 131 const struct ifnet_array *); 132 133 #ifdef INET6 134 /* 135 * XXX: declare here to avoid to include many inet6 related files.. 136 * should be more generalized? 137 */ 138 extern void nd6_setmtu(struct ifnet *); 139 #endif 140 141 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 142 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 143 SYSCTL_NODE(_net_link, OID_AUTO, ringmap, CTLFLAG_RW, 0, "link ringmap"); 144 145 static int ifsq_stage_cntmax = 16; 146 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 147 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 148 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 149 150 static int if_stats_compat = 0; 151 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 152 &if_stats_compat, 0, "Compat the old ifnet stats"); 153 154 static int if_ringmap_dumprdr = 0; 155 SYSCTL_INT(_net_link_ringmap, OID_AUTO, dump_rdr, CTLFLAG_RW, 156 &if_ringmap_dumprdr, 0, "dump redirect table"); 157 158 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 159 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifnetinit, NULL); 160 161 static if_com_alloc_t *if_com_alloc[256]; 162 static if_com_free_t *if_com_free[256]; 163 164 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 165 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 166 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 167 168 int ifqmaxlen = IFQ_MAXLEN; 169 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 170 171 static struct ifnet_array ifnet_array0; 172 static struct ifnet_array *ifnet_array = &ifnet_array0; 173 174 static struct callout if_slowtimo_timer; 175 static struct netmsg_base if_slowtimo_netmsg; 176 177 int if_index = 0; 178 struct ifnet **ifindex2ifnet = NULL; 179 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 180 181 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 182 183 #ifdef notyet 184 #define IFQ_KTR_STRING "ifq=%p" 185 #define IFQ_KTR_ARGS struct ifaltq *ifq 186 #ifndef KTR_IFQ 187 #define KTR_IFQ KTR_ALL 188 #endif 189 KTR_INFO_MASTER(ifq); 190 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 191 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 192 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 193 194 #define IF_START_KTR_STRING "ifp=%p" 195 #define IF_START_KTR_ARGS struct ifnet *ifp 196 #ifndef KTR_IF_START 197 #define KTR_IF_START KTR_ALL 198 #endif 199 KTR_INFO_MASTER(if_start); 200 KTR_INFO(KTR_IF_START, if_start, run, 0, 201 IF_START_KTR_STRING, IF_START_KTR_ARGS); 202 KTR_INFO(KTR_IF_START, if_start, sched, 1, 203 IF_START_KTR_STRING, IF_START_KTR_ARGS); 204 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 205 IF_START_KTR_STRING, IF_START_KTR_ARGS); 206 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 207 IF_START_KTR_STRING, IF_START_KTR_ARGS); 208 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 209 IF_START_KTR_STRING, IF_START_KTR_ARGS); 210 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 211 #endif 212 213 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 214 215 /* 216 * Network interface utility routines. 217 * 218 * Routines with ifa_ifwith* names take sockaddr *'s as 219 * parameters. 220 */ 221 /* ARGSUSED*/ 222 static void 223 ifinit(void *dummy) 224 { 225 226 callout_init_mp(&if_slowtimo_timer); 227 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 228 MSGF_PRIORITY, if_slowtimo_dispatch); 229 230 /* Start if_slowtimo */ 231 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 232 } 233 234 static void 235 ifsq_ifstart_ipifunc(void *arg) 236 { 237 struct ifaltq_subque *ifsq = arg; 238 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 239 240 crit_enter(); 241 if (lmsg->ms_flags & MSGF_DONE) 242 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 243 crit_exit(); 244 } 245 246 static __inline void 247 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 248 { 249 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 250 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 251 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 252 stage->stg_cnt = 0; 253 stage->stg_len = 0; 254 } 255 256 static __inline void 257 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 258 { 259 KKASSERT((stage->stg_flags & 260 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 261 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 262 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 263 } 264 265 /* 266 * Schedule ifnet.if_start on the subqueue owner CPU 267 */ 268 static void 269 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 270 { 271 int cpu; 272 273 if (!force && curthread->td_type == TD_TYPE_NETISR && 274 ifsq_stage_cntmax > 0) { 275 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 276 277 stage->stg_cnt = 0; 278 stage->stg_len = 0; 279 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 280 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 281 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 282 return; 283 } 284 285 cpu = ifsq_get_cpuid(ifsq); 286 if (cpu != mycpuid) 287 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 288 else 289 ifsq_ifstart_ipifunc(ifsq); 290 } 291 292 /* 293 * NOTE: 294 * This function will release ifnet.if_start subqueue interlock, 295 * if ifnet.if_start for the subqueue does not need to be scheduled 296 */ 297 static __inline int 298 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 299 { 300 if (!running || ifsq_is_empty(ifsq) 301 #ifdef ALTQ 302 || ifsq->ifsq_altq->altq_tbr != NULL 303 #endif 304 ) { 305 ALTQ_SQ_LOCK(ifsq); 306 /* 307 * ifnet.if_start subqueue interlock is released, if: 308 * 1) Hardware can not take any packets, due to 309 * o interface is marked down 310 * o hardware queue is full (ifsq_is_oactive) 311 * Under the second situation, hardware interrupt 312 * or polling(4) will call/schedule ifnet.if_start 313 * on the subqueue when hardware queue is ready 314 * 2) There is no packet in the subqueue. 315 * Further ifq_dispatch or ifq_handoff will call/ 316 * schedule ifnet.if_start on the subqueue. 317 * 3) TBR is used and it does not allow further 318 * dequeueing. 319 * TBR callout will call ifnet.if_start on the 320 * subqueue. 321 */ 322 if (!running || !ifsq_data_ready(ifsq)) { 323 ifsq_clr_started(ifsq); 324 ALTQ_SQ_UNLOCK(ifsq); 325 return 0; 326 } 327 ALTQ_SQ_UNLOCK(ifsq); 328 } 329 return 1; 330 } 331 332 static void 333 ifsq_ifstart_dispatch(netmsg_t msg) 334 { 335 struct lwkt_msg *lmsg = &msg->base.lmsg; 336 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 337 struct ifnet *ifp = ifsq_get_ifp(ifsq); 338 struct globaldata *gd = mycpu; 339 int running = 0, need_sched; 340 341 crit_enter_gd(gd); 342 343 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 344 345 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 346 /* 347 * We need to chase the subqueue owner CPU change. 348 */ 349 ifsq_ifstart_schedule(ifsq, 1); 350 crit_exit_gd(gd); 351 return; 352 } 353 354 ifsq_serialize_hw(ifsq); 355 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 356 ifp->if_start(ifp, ifsq); 357 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 358 running = 1; 359 } 360 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 361 ifsq_deserialize_hw(ifsq); 362 363 if (need_sched) { 364 /* 365 * More data need to be transmitted, ifnet.if_start is 366 * scheduled on the subqueue owner CPU, and we keep going. 367 * NOTE: ifnet.if_start subqueue interlock is not released. 368 */ 369 ifsq_ifstart_schedule(ifsq, 0); 370 } 371 372 crit_exit_gd(gd); 373 } 374 375 /* Device driver ifnet.if_start helper function */ 376 void 377 ifsq_devstart(struct ifaltq_subque *ifsq) 378 { 379 struct ifnet *ifp = ifsq_get_ifp(ifsq); 380 int running = 0; 381 382 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 383 384 ALTQ_SQ_LOCK(ifsq); 385 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 386 ALTQ_SQ_UNLOCK(ifsq); 387 return; 388 } 389 ifsq_set_started(ifsq); 390 ALTQ_SQ_UNLOCK(ifsq); 391 392 ifp->if_start(ifp, ifsq); 393 394 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 395 running = 1; 396 397 if (ifsq_ifstart_need_schedule(ifsq, running)) { 398 /* 399 * More data need to be transmitted, ifnet.if_start is 400 * scheduled on ifnet's CPU, and we keep going. 401 * NOTE: ifnet.if_start interlock is not released. 402 */ 403 ifsq_ifstart_schedule(ifsq, 0); 404 } 405 } 406 407 void 408 if_devstart(struct ifnet *ifp) 409 { 410 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 411 } 412 413 /* Device driver ifnet.if_start schedule helper function */ 414 void 415 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 416 { 417 ifsq_ifstart_schedule(ifsq, 1); 418 } 419 420 void 421 if_devstart_sched(struct ifnet *ifp) 422 { 423 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 424 } 425 426 static void 427 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 428 { 429 lwkt_serialize_enter(ifp->if_serializer); 430 } 431 432 static void 433 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 434 { 435 lwkt_serialize_exit(ifp->if_serializer); 436 } 437 438 static int 439 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 440 { 441 return lwkt_serialize_try(ifp->if_serializer); 442 } 443 444 #ifdef INVARIANTS 445 static void 446 if_default_serialize_assert(struct ifnet *ifp, 447 enum ifnet_serialize slz __unused, 448 boolean_t serialized) 449 { 450 if (serialized) 451 ASSERT_SERIALIZED(ifp->if_serializer); 452 else 453 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 454 } 455 #endif 456 457 /* 458 * Attach an interface to the list of "active" interfaces. 459 * 460 * The serializer is optional. 461 */ 462 void 463 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 464 { 465 unsigned socksize; 466 int namelen, masklen; 467 struct sockaddr_dl *sdl, *sdl_addr; 468 struct ifaddr *ifa; 469 struct ifaltq *ifq; 470 struct ifnet **old_ifindex2ifnet = NULL; 471 struct ifnet_array *old_ifnet_array; 472 int i, q, qlen; 473 char qlenname[64]; 474 475 static int if_indexlim = 8; 476 477 if (ifp->if_serialize != NULL) { 478 KASSERT(ifp->if_deserialize != NULL && 479 ifp->if_tryserialize != NULL && 480 ifp->if_serialize_assert != NULL, 481 ("serialize functions are partially setup")); 482 483 /* 484 * If the device supplies serialize functions, 485 * then clear if_serializer to catch any invalid 486 * usage of this field. 487 */ 488 KASSERT(serializer == NULL, 489 ("both serialize functions and default serializer " 490 "are supplied")); 491 ifp->if_serializer = NULL; 492 } else { 493 KASSERT(ifp->if_deserialize == NULL && 494 ifp->if_tryserialize == NULL && 495 ifp->if_serialize_assert == NULL, 496 ("serialize functions are partially setup")); 497 ifp->if_serialize = if_default_serialize; 498 ifp->if_deserialize = if_default_deserialize; 499 ifp->if_tryserialize = if_default_tryserialize; 500 #ifdef INVARIANTS 501 ifp->if_serialize_assert = if_default_serialize_assert; 502 #endif 503 504 /* 505 * The serializer can be passed in from the device, 506 * allowing the same serializer to be used for both 507 * the interrupt interlock and the device queue. 508 * If not specified, the netif structure will use an 509 * embedded serializer. 510 */ 511 if (serializer == NULL) { 512 serializer = &ifp->if_default_serializer; 513 lwkt_serialize_init(serializer); 514 } 515 ifp->if_serializer = serializer; 516 } 517 518 /* 519 * Make if_addrhead available on all CPUs, since they 520 * could be accessed by any threads. 521 */ 522 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 523 M_IFADDR, M_WAITOK | M_ZERO); 524 for (i = 0; i < ncpus; ++i) 525 TAILQ_INIT(&ifp->if_addrheads[i]); 526 527 TAILQ_INIT(&ifp->if_multiaddrs); 528 TAILQ_INIT(&ifp->if_groups); 529 getmicrotime(&ifp->if_lastchange); 530 531 /* 532 * create a Link Level name for this device 533 */ 534 namelen = strlen(ifp->if_xname); 535 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 536 socksize = masklen + ifp->if_addrlen; 537 if (socksize < sizeof(*sdl)) 538 socksize = sizeof(*sdl); 539 socksize = RT_ROUNDUP(socksize); 540 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 541 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 542 sdl->sdl_len = socksize; 543 sdl->sdl_family = AF_LINK; 544 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 545 sdl->sdl_nlen = namelen; 546 sdl->sdl_type = ifp->if_type; 547 ifp->if_lladdr = ifa; 548 ifa->ifa_ifp = ifp; 549 ifa->ifa_rtrequest = link_rtrequest; 550 ifa->ifa_addr = (struct sockaddr *)sdl; 551 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 552 ifa->ifa_netmask = (struct sockaddr *)sdl; 553 sdl->sdl_len = masklen; 554 while (namelen != 0) 555 sdl->sdl_data[--namelen] = 0xff; 556 ifa_iflink(ifa, ifp, 0 /* Insert head */); 557 558 /* 559 * Make if_data available on all CPUs, since they could 560 * be updated by hardware interrupt routing, which could 561 * be bound to any CPU. 562 */ 563 ifp->if_data_pcpu = kmalloc_cachealign( 564 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 565 566 if (ifp->if_mapsubq == NULL) 567 ifp->if_mapsubq = ifq_mapsubq_default; 568 569 ifq = &ifp->if_snd; 570 ifq->altq_type = 0; 571 ifq->altq_disc = NULL; 572 ifq->altq_flags &= ALTQF_CANTCHANGE; 573 ifq->altq_tbr = NULL; 574 ifq->altq_ifp = ifp; 575 576 if (ifq->altq_subq_cnt <= 0) 577 ifq->altq_subq_cnt = 1; 578 ifq->altq_subq = kmalloc_cachealign( 579 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 580 M_DEVBUF, M_WAITOK | M_ZERO); 581 582 if (ifq->altq_maxlen == 0) { 583 if_printf(ifp, "driver didn't set altq_maxlen\n"); 584 ifq_set_maxlen(ifq, ifqmaxlen); 585 } 586 587 /* Allow user to override driver's setting. */ 588 ksnprintf(qlenname, sizeof(qlenname), "net.%s.qlenmax", ifp->if_xname); 589 qlen = -1; 590 TUNABLE_INT_FETCH(qlenname, &qlen); 591 if (qlen > 0) { 592 if_printf(ifp, "qlenmax -> %d\n", qlen); 593 ifq_set_maxlen(ifq, qlen); 594 } 595 596 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 597 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 598 599 ALTQ_SQ_LOCK_INIT(ifsq); 600 ifsq->ifsq_index = q; 601 602 ifsq->ifsq_altq = ifq; 603 ifsq->ifsq_ifp = ifp; 604 605 ifsq->ifsq_maxlen = ifq->altq_maxlen; 606 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 607 ifsq->ifsq_prepended = NULL; 608 ifsq->ifsq_started = 0; 609 ifsq->ifsq_hw_oactive = 0; 610 ifsq_set_cpuid(ifsq, 0); 611 if (ifp->if_serializer != NULL) 612 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 613 614 /* XXX: netisr_ncpus */ 615 ifsq->ifsq_stage = 616 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 617 M_DEVBUF, M_WAITOK | M_ZERO); 618 for (i = 0; i < ncpus; ++i) 619 ifsq->ifsq_stage[i].stg_subq = ifsq; 620 621 /* 622 * Allocate one if_start message for each CPU, since 623 * the hardware TX ring could be assigned to any CPU. 624 * 625 * NOTE: 626 * If the hardware TX ring polling CPU and the hardware 627 * TX ring interrupt CPU are same, one if_start message 628 * should be enough. 629 */ 630 ifsq->ifsq_ifstart_nmsg = 631 kmalloc(ncpus * sizeof(struct netmsg_base), 632 M_LWKTMSG, M_WAITOK); 633 for (i = 0; i < ncpus; ++i) { 634 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 635 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 636 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 637 } 638 } 639 ifq_set_classic(ifq); 640 641 /* 642 * Increase mbuf cluster/jcluster limits for the mbufs that 643 * could sit on the device queues for quite some time. 644 */ 645 if (ifp->if_nmbclusters > 0) 646 mcl_inclimit(ifp->if_nmbclusters); 647 if (ifp->if_nmbjclusters > 0) 648 mjcl_inclimit(ifp->if_nmbjclusters); 649 650 /* 651 * Install this ifp into ifindex2inet, ifnet queue and ifnet 652 * array after it is setup. 653 * 654 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 655 * by ifnet lock, so that non-netisr threads could get a 656 * consistent view. 657 */ 658 ifnet_lock(); 659 660 /* Don't update if_index until ifindex2ifnet is setup */ 661 ifp->if_index = if_index + 1; 662 sdl_addr->sdl_index = ifp->if_index; 663 664 /* 665 * Install this ifp into ifindex2ifnet 666 */ 667 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 668 unsigned int n; 669 struct ifnet **q; 670 671 /* 672 * Grow ifindex2ifnet 673 */ 674 if_indexlim <<= 1; 675 n = if_indexlim * sizeof(*q); 676 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 677 if (ifindex2ifnet != NULL) { 678 bcopy(ifindex2ifnet, q, n/2); 679 /* Free old ifindex2ifnet after sync all netisrs */ 680 old_ifindex2ifnet = ifindex2ifnet; 681 } 682 ifindex2ifnet = q; 683 } 684 ifindex2ifnet[ifp->if_index] = ifp; 685 /* 686 * Update if_index after this ifp is installed into ifindex2ifnet, 687 * so that netisrs could get a consistent view of ifindex2ifnet. 688 */ 689 cpu_sfence(); 690 if_index = ifp->if_index; 691 692 /* 693 * Install this ifp into ifnet array. 694 */ 695 /* Free old ifnet array after sync all netisrs */ 696 old_ifnet_array = ifnet_array; 697 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 698 699 /* 700 * Install this ifp into ifnet queue. 701 */ 702 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 703 704 ifnet_unlock(); 705 706 /* 707 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 708 * are no longer accessed and we can free them safely later on. 709 */ 710 netmsg_service_sync(); 711 if (old_ifindex2ifnet != NULL) 712 kfree(old_ifindex2ifnet, M_IFADDR); 713 ifnet_array_free(old_ifnet_array); 714 715 if (!SLIST_EMPTY(&domains)) 716 if_attachdomain1(ifp); 717 718 /* Announce the interface. */ 719 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 720 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 721 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 722 } 723 724 static void 725 if_attachdomain(void *dummy) 726 { 727 struct ifnet *ifp; 728 729 ifnet_lock(); 730 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 731 if_attachdomain1(ifp); 732 ifnet_unlock(); 733 } 734 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 735 if_attachdomain, NULL); 736 737 static void 738 if_attachdomain1(struct ifnet *ifp) 739 { 740 struct domain *dp; 741 742 crit_enter(); 743 744 /* address family dependent data region */ 745 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 746 SLIST_FOREACH(dp, &domains, dom_next) 747 if (dp->dom_ifattach) 748 ifp->if_afdata[dp->dom_family] = 749 (*dp->dom_ifattach)(ifp); 750 crit_exit(); 751 } 752 753 /* 754 * Purge all addresses whose type is _not_ AF_LINK 755 */ 756 static void 757 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 758 { 759 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp; 760 struct ifaddr_container *ifac, *next; 761 762 ASSERT_NETISR0; 763 764 /* 765 * The ifaddr processing in the following loop will block, 766 * however, this function is called in netisr0, in which 767 * ifaddr list changes happen, so we don't care about the 768 * blockness of the ifaddr processing here. 769 */ 770 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 771 ifa_link, next) { 772 struct ifaddr *ifa = ifac->ifa; 773 774 /* Ignore marker */ 775 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 776 continue; 777 778 /* Leave link ifaddr as it is */ 779 if (ifa->ifa_addr->sa_family == AF_LINK) 780 continue; 781 #ifdef INET 782 /* XXX: Ugly!! ad hoc just for INET */ 783 if (ifa->ifa_addr->sa_family == AF_INET) { 784 struct ifaliasreq ifr; 785 struct sockaddr_in saved_addr, saved_dst; 786 #ifdef IFADDR_DEBUG_VERBOSE 787 int i; 788 789 kprintf("purge in4 addr %p: ", ifa); 790 for (i = 0; i < ncpus; ++i) { 791 kprintf("%d ", 792 ifa->ifa_containers[i].ifa_refcnt); 793 } 794 kprintf("\n"); 795 #endif 796 797 /* Save information for panic. */ 798 memcpy(&saved_addr, ifa->ifa_addr, sizeof(saved_addr)); 799 if (ifa->ifa_dstaddr != NULL) { 800 memcpy(&saved_dst, ifa->ifa_dstaddr, 801 sizeof(saved_dst)); 802 } else { 803 memset(&saved_dst, 0, sizeof(saved_dst)); 804 } 805 806 bzero(&ifr, sizeof ifr); 807 ifr.ifra_addr = *ifa->ifa_addr; 808 if (ifa->ifa_dstaddr) 809 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 810 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 811 NULL) == 0) 812 continue; 813 814 /* MUST NOT HAPPEN */ 815 panic("%s: in_control failed %x, dst %x", ifp->if_xname, 816 ntohl(saved_addr.sin_addr.s_addr), 817 ntohl(saved_dst.sin_addr.s_addr)); 818 } 819 #endif /* INET */ 820 #ifdef INET6 821 if (ifa->ifa_addr->sa_family == AF_INET6) { 822 #ifdef IFADDR_DEBUG_VERBOSE 823 int i; 824 825 kprintf("purge in6 addr %p: ", ifa); 826 for (i = 0; i < ncpus; ++i) { 827 kprintf("%d ", 828 ifa->ifa_containers[i].ifa_refcnt); 829 } 830 kprintf("\n"); 831 #endif 832 833 in6_purgeaddr(ifa); 834 /* ifp_addrhead is already updated */ 835 continue; 836 } 837 #endif /* INET6 */ 838 if_printf(ifp, "destroy ifaddr family %d\n", 839 ifa->ifa_addr->sa_family); 840 ifa_ifunlink(ifa, ifp); 841 ifa_destroy(ifa); 842 } 843 844 netisr_replymsg(&nmsg->base, 0); 845 } 846 847 void 848 if_purgeaddrs_nolink(struct ifnet *ifp) 849 { 850 struct netmsg_base nmsg; 851 852 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 853 if_purgeaddrs_nolink_dispatch); 854 nmsg.lmsg.u.ms_resultp = ifp; 855 netisr_domsg(&nmsg, 0); 856 } 857 858 static void 859 ifq_stage_detach_handler(netmsg_t nmsg) 860 { 861 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 862 int q; 863 864 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 865 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 866 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 867 868 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 869 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 870 } 871 lwkt_replymsg(&nmsg->lmsg, 0); 872 } 873 874 static void 875 ifq_stage_detach(struct ifaltq *ifq) 876 { 877 struct netmsg_base base; 878 int cpu; 879 880 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 881 ifq_stage_detach_handler); 882 base.lmsg.u.ms_resultp = ifq; 883 884 /* XXX netisr_ncpus */ 885 for (cpu = 0; cpu < ncpus; ++cpu) 886 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 887 } 888 889 struct netmsg_if_rtdel { 890 struct netmsg_base base; 891 struct ifnet *ifp; 892 }; 893 894 static void 895 if_rtdel_dispatch(netmsg_t msg) 896 { 897 struct netmsg_if_rtdel *rmsg = (void *)msg; 898 int i, cpu; 899 900 cpu = mycpuid; 901 ASSERT_NETISR_NCPUS(cpu); 902 903 for (i = 1; i <= AF_MAX; i++) { 904 struct radix_node_head *rnh; 905 906 if ((rnh = rt_tables[cpu][i]) == NULL) 907 continue; 908 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 909 } 910 netisr_forwardmsg(&msg->base, cpu + 1); 911 } 912 913 /* 914 * Detach an interface, removing it from the 915 * list of "active" interfaces. 916 */ 917 void 918 if_detach(struct ifnet *ifp) 919 { 920 struct ifnet_array *old_ifnet_array; 921 struct netmsg_if_rtdel msg; 922 struct domain *dp; 923 int q; 924 925 /* Announce that the interface is gone. */ 926 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 927 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 928 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 929 930 /* 931 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 932 * array before it is whacked. 933 * 934 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 935 * by ifnet lock, so that non-netisr threads could get a 936 * consistent view. 937 */ 938 ifnet_lock(); 939 940 /* 941 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 942 */ 943 ifindex2ifnet[ifp->if_index] = NULL; 944 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 945 if_index--; 946 947 /* 948 * Remove this ifp from ifnet queue. 949 */ 950 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 951 952 /* 953 * Remove this ifp from ifnet array. 954 */ 955 /* Free old ifnet array after sync all netisrs */ 956 old_ifnet_array = ifnet_array; 957 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 958 959 ifnet_unlock(); 960 961 /* 962 * Sync all netisrs so that the old ifnet array is no longer 963 * accessed and we can free it safely later on. 964 */ 965 netmsg_service_sync(); 966 ifnet_array_free(old_ifnet_array); 967 968 /* 969 * Remove routes and flush queues. 970 */ 971 crit_enter(); 972 #ifdef IFPOLL_ENABLE 973 if (ifp->if_flags & IFF_NPOLLING) 974 ifpoll_deregister(ifp); 975 #endif 976 if_down(ifp); 977 978 /* Decrease the mbuf clusters/jclusters limits increased by us */ 979 if (ifp->if_nmbclusters > 0) 980 mcl_inclimit(-ifp->if_nmbclusters); 981 if (ifp->if_nmbjclusters > 0) 982 mjcl_inclimit(-ifp->if_nmbjclusters); 983 984 #ifdef ALTQ 985 if (ifq_is_enabled(&ifp->if_snd)) 986 altq_disable(&ifp->if_snd); 987 if (ifq_is_attached(&ifp->if_snd)) 988 altq_detach(&ifp->if_snd); 989 #endif 990 991 /* 992 * Clean up all addresses. 993 */ 994 ifp->if_lladdr = NULL; 995 996 if_purgeaddrs_nolink(ifp); 997 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 998 struct ifaddr *ifa; 999 1000 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1001 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 1002 ("non-link ifaddr is left on if_addrheads")); 1003 1004 ifa_ifunlink(ifa, ifp); 1005 ifa_destroy(ifa); 1006 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 1007 ("there are still ifaddrs left on if_addrheads")); 1008 } 1009 1010 #ifdef INET 1011 /* 1012 * Remove all IPv4 kernel structures related to ifp. 1013 */ 1014 in_ifdetach(ifp); 1015 #endif 1016 1017 #ifdef INET6 1018 /* 1019 * Remove all IPv6 kernel structs related to ifp. This should be done 1020 * before removing routing entries below, since IPv6 interface direct 1021 * routes are expected to be removed by the IPv6-specific kernel API. 1022 * Otherwise, the kernel will detect some inconsistency and bark it. 1023 */ 1024 in6_ifdetach(ifp); 1025 #endif 1026 1027 /* 1028 * Delete all remaining routes using this interface 1029 */ 1030 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1031 if_rtdel_dispatch); 1032 msg.ifp = ifp; 1033 netisr_domsg_global(&msg.base); 1034 1035 SLIST_FOREACH(dp, &domains, dom_next) 1036 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1037 (*dp->dom_ifdetach)(ifp, 1038 ifp->if_afdata[dp->dom_family]); 1039 1040 kfree(ifp->if_addrheads, M_IFADDR); 1041 1042 lwkt_synchronize_ipiqs("if_detach"); 1043 ifq_stage_detach(&ifp->if_snd); 1044 1045 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1046 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1047 1048 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1049 kfree(ifsq->ifsq_stage, M_DEVBUF); 1050 } 1051 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1052 1053 kfree(ifp->if_data_pcpu, M_DEVBUF); 1054 1055 crit_exit(); 1056 } 1057 1058 /* 1059 * Create interface group without members 1060 */ 1061 struct ifg_group * 1062 if_creategroup(const char *groupname) 1063 { 1064 struct ifg_group *ifg = NULL; 1065 1066 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1067 M_TEMP, M_NOWAIT)) == NULL) 1068 return (NULL); 1069 1070 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1071 ifg->ifg_refcnt = 0; 1072 ifg->ifg_carp_demoted = 0; 1073 TAILQ_INIT(&ifg->ifg_members); 1074 #if NPF > 0 1075 pfi_attach_ifgroup(ifg); 1076 #endif 1077 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1078 1079 return (ifg); 1080 } 1081 1082 /* 1083 * Add a group to an interface 1084 */ 1085 int 1086 if_addgroup(struct ifnet *ifp, const char *groupname) 1087 { 1088 struct ifg_list *ifgl; 1089 struct ifg_group *ifg = NULL; 1090 struct ifg_member *ifgm; 1091 1092 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1093 groupname[strlen(groupname) - 1] <= '9') 1094 return (EINVAL); 1095 1096 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1097 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1098 return (EEXIST); 1099 1100 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1101 return (ENOMEM); 1102 1103 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1104 kfree(ifgl, M_TEMP); 1105 return (ENOMEM); 1106 } 1107 1108 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1109 if (!strcmp(ifg->ifg_group, groupname)) 1110 break; 1111 1112 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1113 kfree(ifgl, M_TEMP); 1114 kfree(ifgm, M_TEMP); 1115 return (ENOMEM); 1116 } 1117 1118 ifg->ifg_refcnt++; 1119 ifgl->ifgl_group = ifg; 1120 ifgm->ifgm_ifp = ifp; 1121 1122 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1123 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1124 1125 #if NPF > 0 1126 pfi_group_change(groupname); 1127 #endif 1128 1129 return (0); 1130 } 1131 1132 /* 1133 * Remove a group from an interface 1134 */ 1135 int 1136 if_delgroup(struct ifnet *ifp, const char *groupname) 1137 { 1138 struct ifg_list *ifgl; 1139 struct ifg_member *ifgm; 1140 1141 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1142 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1143 break; 1144 if (ifgl == NULL) 1145 return (ENOENT); 1146 1147 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1148 1149 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1150 if (ifgm->ifgm_ifp == ifp) 1151 break; 1152 1153 if (ifgm != NULL) { 1154 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1155 kfree(ifgm, M_TEMP); 1156 } 1157 1158 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1159 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1160 #if NPF > 0 1161 pfi_detach_ifgroup(ifgl->ifgl_group); 1162 #endif 1163 kfree(ifgl->ifgl_group, M_TEMP); 1164 } 1165 1166 kfree(ifgl, M_TEMP); 1167 1168 #if NPF > 0 1169 pfi_group_change(groupname); 1170 #endif 1171 1172 return (0); 1173 } 1174 1175 /* 1176 * Stores all groups from an interface in memory pointed 1177 * to by data 1178 */ 1179 int 1180 if_getgroup(caddr_t data, struct ifnet *ifp) 1181 { 1182 int len, error; 1183 struct ifg_list *ifgl; 1184 struct ifg_req ifgrq, *ifgp; 1185 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1186 1187 if (ifgr->ifgr_len == 0) { 1188 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1189 ifgr->ifgr_len += sizeof(struct ifg_req); 1190 return (0); 1191 } 1192 1193 len = ifgr->ifgr_len; 1194 ifgp = ifgr->ifgr_groups; 1195 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1196 if (len < sizeof(ifgrq)) 1197 return (EINVAL); 1198 bzero(&ifgrq, sizeof ifgrq); 1199 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1200 sizeof(ifgrq.ifgrq_group)); 1201 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1202 sizeof(struct ifg_req)))) 1203 return (error); 1204 len -= sizeof(ifgrq); 1205 ifgp++; 1206 } 1207 1208 return (0); 1209 } 1210 1211 /* 1212 * Stores all members of a group in memory pointed to by data 1213 */ 1214 int 1215 if_getgroupmembers(caddr_t data) 1216 { 1217 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1218 struct ifg_group *ifg; 1219 struct ifg_member *ifgm; 1220 struct ifg_req ifgrq, *ifgp; 1221 int len, error; 1222 1223 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1224 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1225 break; 1226 if (ifg == NULL) 1227 return (ENOENT); 1228 1229 if (ifgr->ifgr_len == 0) { 1230 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1231 ifgr->ifgr_len += sizeof(ifgrq); 1232 return (0); 1233 } 1234 1235 len = ifgr->ifgr_len; 1236 ifgp = ifgr->ifgr_groups; 1237 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1238 if (len < sizeof(ifgrq)) 1239 return (EINVAL); 1240 bzero(&ifgrq, sizeof ifgrq); 1241 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1242 sizeof(ifgrq.ifgrq_member)); 1243 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1244 sizeof(struct ifg_req)))) 1245 return (error); 1246 len -= sizeof(ifgrq); 1247 ifgp++; 1248 } 1249 1250 return (0); 1251 } 1252 1253 /* 1254 * Delete Routes for a Network Interface 1255 * 1256 * Called for each routing entry via the rnh->rnh_walktree() call above 1257 * to delete all route entries referencing a detaching network interface. 1258 * 1259 * Arguments: 1260 * rn pointer to node in the routing table 1261 * arg argument passed to rnh->rnh_walktree() - detaching interface 1262 * 1263 * Returns: 1264 * 0 successful 1265 * errno failed - reason indicated 1266 * 1267 */ 1268 static int 1269 if_rtdel(struct radix_node *rn, void *arg) 1270 { 1271 struct rtentry *rt = (struct rtentry *)rn; 1272 struct ifnet *ifp = arg; 1273 int err; 1274 1275 if (rt->rt_ifp == ifp) { 1276 1277 /* 1278 * Protect (sorta) against walktree recursion problems 1279 * with cloned routes 1280 */ 1281 if (!(rt->rt_flags & RTF_UP)) 1282 return (0); 1283 1284 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1285 rt_mask(rt), rt->rt_flags, 1286 NULL); 1287 if (err) { 1288 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1289 } 1290 } 1291 1292 return (0); 1293 } 1294 1295 static __inline boolean_t 1296 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1297 { 1298 if (old_ifa == NULL) 1299 return TRUE; 1300 1301 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1302 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1303 return TRUE; 1304 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1305 (cur_ifa->ifa_flags & IFA_ROUTE)) 1306 return TRUE; 1307 return FALSE; 1308 } 1309 1310 /* 1311 * Locate an interface based on a complete address. 1312 */ 1313 struct ifaddr * 1314 ifa_ifwithaddr(struct sockaddr *addr) 1315 { 1316 const struct ifnet_array *arr; 1317 int i; 1318 1319 arr = ifnet_array_get(); 1320 for (i = 0; i < arr->ifnet_count; ++i) { 1321 struct ifnet *ifp = arr->ifnet_arr[i]; 1322 struct ifaddr_container *ifac; 1323 1324 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1325 struct ifaddr *ifa = ifac->ifa; 1326 1327 if (ifa->ifa_addr->sa_family != addr->sa_family) 1328 continue; 1329 if (sa_equal(addr, ifa->ifa_addr)) 1330 return (ifa); 1331 if ((ifp->if_flags & IFF_BROADCAST) && 1332 ifa->ifa_broadaddr && 1333 /* IPv6 doesn't have broadcast */ 1334 ifa->ifa_broadaddr->sa_len != 0 && 1335 sa_equal(ifa->ifa_broadaddr, addr)) 1336 return (ifa); 1337 } 1338 } 1339 return (NULL); 1340 } 1341 1342 /* 1343 * Locate the point to point interface with a given destination address. 1344 */ 1345 struct ifaddr * 1346 ifa_ifwithdstaddr(struct sockaddr *addr) 1347 { 1348 const struct ifnet_array *arr; 1349 int i; 1350 1351 arr = ifnet_array_get(); 1352 for (i = 0; i < arr->ifnet_count; ++i) { 1353 struct ifnet *ifp = arr->ifnet_arr[i]; 1354 struct ifaddr_container *ifac; 1355 1356 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1357 continue; 1358 1359 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1360 struct ifaddr *ifa = ifac->ifa; 1361 1362 if (ifa->ifa_addr->sa_family != addr->sa_family) 1363 continue; 1364 if (ifa->ifa_dstaddr && 1365 sa_equal(addr, ifa->ifa_dstaddr)) 1366 return (ifa); 1367 } 1368 } 1369 return (NULL); 1370 } 1371 1372 /* 1373 * Find an interface on a specific network. If many, choice 1374 * is most specific found. 1375 */ 1376 struct ifaddr * 1377 ifa_ifwithnet(struct sockaddr *addr) 1378 { 1379 struct ifaddr *ifa_maybe = NULL; 1380 u_int af = addr->sa_family; 1381 char *addr_data = addr->sa_data, *cplim; 1382 const struct ifnet_array *arr; 1383 int i; 1384 1385 /* 1386 * AF_LINK addresses can be looked up directly by their index number, 1387 * so do that if we can. 1388 */ 1389 if (af == AF_LINK) { 1390 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1391 1392 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1393 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1394 } 1395 1396 /* 1397 * Scan though each interface, looking for ones that have 1398 * addresses in this address family. 1399 */ 1400 arr = ifnet_array_get(); 1401 for (i = 0; i < arr->ifnet_count; ++i) { 1402 struct ifnet *ifp = arr->ifnet_arr[i]; 1403 struct ifaddr_container *ifac; 1404 1405 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1406 struct ifaddr *ifa = ifac->ifa; 1407 char *cp, *cp2, *cp3; 1408 1409 if (ifa->ifa_addr->sa_family != af) 1410 next: continue; 1411 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1412 /* 1413 * This is a bit broken as it doesn't 1414 * take into account that the remote end may 1415 * be a single node in the network we are 1416 * looking for. 1417 * The trouble is that we don't know the 1418 * netmask for the remote end. 1419 */ 1420 if (ifa->ifa_dstaddr != NULL && 1421 sa_equal(addr, ifa->ifa_dstaddr)) 1422 return (ifa); 1423 } else { 1424 /* 1425 * if we have a special address handler, 1426 * then use it instead of the generic one. 1427 */ 1428 if (ifa->ifa_claim_addr) { 1429 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1430 return (ifa); 1431 } else { 1432 continue; 1433 } 1434 } 1435 1436 /* 1437 * Scan all the bits in the ifa's address. 1438 * If a bit dissagrees with what we are 1439 * looking for, mask it with the netmask 1440 * to see if it really matters. 1441 * (A byte at a time) 1442 */ 1443 if (ifa->ifa_netmask == 0) 1444 continue; 1445 cp = addr_data; 1446 cp2 = ifa->ifa_addr->sa_data; 1447 cp3 = ifa->ifa_netmask->sa_data; 1448 cplim = ifa->ifa_netmask->sa_len + 1449 (char *)ifa->ifa_netmask; 1450 while (cp3 < cplim) 1451 if ((*cp++ ^ *cp2++) & *cp3++) 1452 goto next; /* next address! */ 1453 /* 1454 * If the netmask of what we just found 1455 * is more specific than what we had before 1456 * (if we had one) then remember the new one 1457 * before continuing to search for an even 1458 * better one. If the netmasks are equal, 1459 * we prefer the this ifa based on the result 1460 * of ifa_prefer(). 1461 */ 1462 if (ifa_maybe == NULL || 1463 rn_refines((char *)ifa->ifa_netmask, 1464 (char *)ifa_maybe->ifa_netmask) || 1465 (sa_equal(ifa_maybe->ifa_netmask, 1466 ifa->ifa_netmask) && 1467 ifa_prefer(ifa, ifa_maybe))) 1468 ifa_maybe = ifa; 1469 } 1470 } 1471 } 1472 return (ifa_maybe); 1473 } 1474 1475 /* 1476 * Find an interface address specific to an interface best matching 1477 * a given address. 1478 */ 1479 struct ifaddr * 1480 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1481 { 1482 struct ifaddr_container *ifac; 1483 char *cp, *cp2, *cp3; 1484 char *cplim; 1485 struct ifaddr *ifa_maybe = NULL; 1486 u_int af = addr->sa_family; 1487 1488 if (af >= AF_MAX) 1489 return (0); 1490 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1491 struct ifaddr *ifa = ifac->ifa; 1492 1493 if (ifa->ifa_addr->sa_family != af) 1494 continue; 1495 if (ifa_maybe == NULL) 1496 ifa_maybe = ifa; 1497 if (ifa->ifa_netmask == NULL) { 1498 if (sa_equal(addr, ifa->ifa_addr) || 1499 (ifa->ifa_dstaddr != NULL && 1500 sa_equal(addr, ifa->ifa_dstaddr))) 1501 return (ifa); 1502 continue; 1503 } 1504 if (ifp->if_flags & IFF_POINTOPOINT) { 1505 if (sa_equal(addr, ifa->ifa_dstaddr)) 1506 return (ifa); 1507 } else { 1508 cp = addr->sa_data; 1509 cp2 = ifa->ifa_addr->sa_data; 1510 cp3 = ifa->ifa_netmask->sa_data; 1511 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1512 for (; cp3 < cplim; cp3++) 1513 if ((*cp++ ^ *cp2++) & *cp3) 1514 break; 1515 if (cp3 == cplim) 1516 return (ifa); 1517 } 1518 } 1519 return (ifa_maybe); 1520 } 1521 1522 /* 1523 * Default action when installing a route with a Link Level gateway. 1524 * Lookup an appropriate real ifa to point to. 1525 * This should be moved to /sys/net/link.c eventually. 1526 */ 1527 static void 1528 link_rtrequest(int cmd, struct rtentry *rt) 1529 { 1530 struct ifaddr *ifa; 1531 struct sockaddr *dst; 1532 struct ifnet *ifp; 1533 1534 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1535 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1536 return; 1537 ifa = ifaof_ifpforaddr(dst, ifp); 1538 if (ifa != NULL) { 1539 IFAFREE(rt->rt_ifa); 1540 IFAREF(ifa); 1541 rt->rt_ifa = ifa; 1542 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1543 ifa->ifa_rtrequest(cmd, rt); 1544 } 1545 } 1546 1547 struct netmsg_ifroute { 1548 struct netmsg_base base; 1549 struct ifnet *ifp; 1550 int flag; 1551 int fam; 1552 }; 1553 1554 /* 1555 * Mark an interface down and notify protocols of the transition. 1556 */ 1557 static void 1558 if_unroute_dispatch(netmsg_t nmsg) 1559 { 1560 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1561 struct ifnet *ifp = msg->ifp; 1562 int flag = msg->flag, fam = msg->fam; 1563 struct ifaddr_container *ifac; 1564 1565 ASSERT_NETISR0; 1566 1567 ifp->if_flags &= ~flag; 1568 getmicrotime(&ifp->if_lastchange); 1569 /* 1570 * The ifaddr processing in the following loop will block, 1571 * however, this function is called in netisr0, in which 1572 * ifaddr list changes happen, so we don't care about the 1573 * blockness of the ifaddr processing here. 1574 */ 1575 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1576 struct ifaddr *ifa = ifac->ifa; 1577 1578 /* Ignore marker */ 1579 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1580 continue; 1581 1582 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1583 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1584 } 1585 ifq_purge_all(&ifp->if_snd); 1586 rt_ifmsg(ifp); 1587 1588 netisr_replymsg(&nmsg->base, 0); 1589 } 1590 1591 void 1592 if_unroute(struct ifnet *ifp, int flag, int fam) 1593 { 1594 struct netmsg_ifroute msg; 1595 1596 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1597 if_unroute_dispatch); 1598 msg.ifp = ifp; 1599 msg.flag = flag; 1600 msg.fam = fam; 1601 netisr_domsg(&msg.base, 0); 1602 } 1603 1604 /* 1605 * Mark an interface up and notify protocols of the transition. 1606 */ 1607 static void 1608 if_route_dispatch(netmsg_t nmsg) 1609 { 1610 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1611 struct ifnet *ifp = msg->ifp; 1612 int flag = msg->flag, fam = msg->fam; 1613 struct ifaddr_container *ifac; 1614 1615 ASSERT_NETISR0; 1616 1617 ifq_purge_all(&ifp->if_snd); 1618 ifp->if_flags |= flag; 1619 getmicrotime(&ifp->if_lastchange); 1620 /* 1621 * The ifaddr processing in the following loop will block, 1622 * however, this function is called in netisr0, in which 1623 * ifaddr list changes happen, so we don't care about the 1624 * blockness of the ifaddr processing here. 1625 */ 1626 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1627 struct ifaddr *ifa = ifac->ifa; 1628 1629 /* Ignore marker */ 1630 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1631 continue; 1632 1633 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1634 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1635 } 1636 rt_ifmsg(ifp); 1637 #ifdef INET6 1638 in6_if_up(ifp); 1639 #endif 1640 1641 netisr_replymsg(&nmsg->base, 0); 1642 } 1643 1644 void 1645 if_route(struct ifnet *ifp, int flag, int fam) 1646 { 1647 struct netmsg_ifroute msg; 1648 1649 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1650 if_route_dispatch); 1651 msg.ifp = ifp; 1652 msg.flag = flag; 1653 msg.fam = fam; 1654 netisr_domsg(&msg.base, 0); 1655 } 1656 1657 /* 1658 * Mark an interface down and notify protocols of the transition. An 1659 * interface going down is also considered to be a synchronizing event. 1660 * We must ensure that all packet processing related to the interface 1661 * has completed before we return so e.g. the caller can free the ifnet 1662 * structure that the mbufs may be referencing. 1663 * 1664 * NOTE: must be called at splnet or eqivalent. 1665 */ 1666 void 1667 if_down(struct ifnet *ifp) 1668 { 1669 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1670 netmsg_service_sync(); 1671 } 1672 1673 /* 1674 * Mark an interface up and notify protocols of 1675 * the transition. 1676 * NOTE: must be called at splnet or eqivalent. 1677 */ 1678 void 1679 if_up(struct ifnet *ifp) 1680 { 1681 if_route(ifp, IFF_UP, AF_UNSPEC); 1682 } 1683 1684 /* 1685 * Process a link state change. 1686 * NOTE: must be called at splsoftnet or equivalent. 1687 */ 1688 void 1689 if_link_state_change(struct ifnet *ifp) 1690 { 1691 int link_state = ifp->if_link_state; 1692 1693 rt_ifmsg(ifp); 1694 devctl_notify("IFNET", ifp->if_xname, 1695 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1696 } 1697 1698 /* 1699 * Handle interface watchdog timer routines. Called 1700 * from softclock, we decrement timers (if set) and 1701 * call the appropriate interface routine on expiration. 1702 */ 1703 static void 1704 if_slowtimo_dispatch(netmsg_t nmsg) 1705 { 1706 struct globaldata *gd = mycpu; 1707 const struct ifnet_array *arr; 1708 int i; 1709 1710 ASSERT_NETISR0; 1711 1712 crit_enter_gd(gd); 1713 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1714 crit_exit_gd(gd); 1715 1716 arr = ifnet_array_get(); 1717 for (i = 0; i < arr->ifnet_count; ++i) { 1718 struct ifnet *ifp = arr->ifnet_arr[i]; 1719 1720 crit_enter_gd(gd); 1721 1722 if (if_stats_compat) { 1723 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1724 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1725 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1726 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1727 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1728 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1729 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1730 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1731 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1732 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1733 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1734 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1735 } 1736 1737 if (ifp->if_timer == 0 || --ifp->if_timer) { 1738 crit_exit_gd(gd); 1739 continue; 1740 } 1741 if (ifp->if_watchdog) { 1742 if (ifnet_tryserialize_all(ifp)) { 1743 (*ifp->if_watchdog)(ifp); 1744 ifnet_deserialize_all(ifp); 1745 } else { 1746 /* try again next timeout */ 1747 ++ifp->if_timer; 1748 } 1749 } 1750 1751 crit_exit_gd(gd); 1752 } 1753 1754 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1755 } 1756 1757 static void 1758 if_slowtimo(void *arg __unused) 1759 { 1760 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1761 1762 KASSERT(mycpuid == 0, ("not on cpu0")); 1763 crit_enter(); 1764 if (lmsg->ms_flags & MSGF_DONE) 1765 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1766 crit_exit(); 1767 } 1768 1769 /* 1770 * Map interface name to 1771 * interface structure pointer. 1772 */ 1773 struct ifnet * 1774 ifunit(const char *name) 1775 { 1776 struct ifnet *ifp; 1777 1778 /* 1779 * Search all the interfaces for this name/number 1780 */ 1781 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1782 1783 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1784 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1785 break; 1786 } 1787 return (ifp); 1788 } 1789 1790 struct ifnet * 1791 ifunit_netisr(const char *name) 1792 { 1793 const struct ifnet_array *arr; 1794 int i; 1795 1796 /* 1797 * Search all the interfaces for this name/number 1798 */ 1799 1800 arr = ifnet_array_get(); 1801 for (i = 0; i < arr->ifnet_count; ++i) { 1802 struct ifnet *ifp = arr->ifnet_arr[i]; 1803 1804 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1805 return ifp; 1806 } 1807 return NULL; 1808 } 1809 1810 /* 1811 * Interface ioctls. 1812 */ 1813 int 1814 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1815 { 1816 struct ifnet *ifp; 1817 struct ifreq *ifr; 1818 struct ifstat *ifs; 1819 int error, do_ifup = 0; 1820 short oif_flags; 1821 int new_flags; 1822 size_t namelen, onamelen; 1823 char new_name[IFNAMSIZ]; 1824 struct ifaddr *ifa; 1825 struct sockaddr_dl *sdl; 1826 1827 switch (cmd) { 1828 case SIOCGIFCONF: 1829 return (ifconf(cmd, data, cred)); 1830 default: 1831 break; 1832 } 1833 1834 ifr = (struct ifreq *)data; 1835 1836 switch (cmd) { 1837 case SIOCIFCREATE: 1838 case SIOCIFCREATE2: 1839 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1840 return (error); 1841 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1842 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1843 case SIOCIFDESTROY: 1844 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1845 return (error); 1846 return (if_clone_destroy(ifr->ifr_name)); 1847 case SIOCIFGCLONERS: 1848 return (if_clone_list((struct if_clonereq *)data)); 1849 default: 1850 break; 1851 } 1852 1853 /* 1854 * Nominal ioctl through interface, lookup the ifp and obtain a 1855 * lock to serialize the ifconfig ioctl operation. 1856 */ 1857 ifnet_lock(); 1858 1859 ifp = ifunit(ifr->ifr_name); 1860 if (ifp == NULL) { 1861 ifnet_unlock(); 1862 return (ENXIO); 1863 } 1864 error = 0; 1865 1866 switch (cmd) { 1867 case SIOCGIFINDEX: 1868 ifr->ifr_index = ifp->if_index; 1869 break; 1870 1871 case SIOCGIFFLAGS: 1872 ifr->ifr_flags = ifp->if_flags; 1873 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1874 break; 1875 1876 case SIOCGIFCAP: 1877 ifr->ifr_reqcap = ifp->if_capabilities; 1878 ifr->ifr_curcap = ifp->if_capenable; 1879 break; 1880 1881 case SIOCGIFMETRIC: 1882 ifr->ifr_metric = ifp->if_metric; 1883 break; 1884 1885 case SIOCGIFMTU: 1886 ifr->ifr_mtu = ifp->if_mtu; 1887 break; 1888 1889 case SIOCGIFTSOLEN: 1890 ifr->ifr_tsolen = ifp->if_tsolen; 1891 break; 1892 1893 case SIOCGIFDATA: 1894 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1895 sizeof(ifp->if_data)); 1896 break; 1897 1898 case SIOCGIFPHYS: 1899 ifr->ifr_phys = ifp->if_physical; 1900 break; 1901 1902 case SIOCGIFPOLLCPU: 1903 ifr->ifr_pollcpu = -1; 1904 break; 1905 1906 case SIOCSIFPOLLCPU: 1907 break; 1908 1909 case SIOCSIFFLAGS: 1910 error = priv_check_cred(cred, PRIV_ROOT, 0); 1911 if (error) 1912 break; 1913 new_flags = (ifr->ifr_flags & 0xffff) | 1914 (ifr->ifr_flagshigh << 16); 1915 if (ifp->if_flags & IFF_SMART) { 1916 /* Smart drivers twiddle their own routes */ 1917 } else if (ifp->if_flags & IFF_UP && 1918 (new_flags & IFF_UP) == 0) { 1919 if_down(ifp); 1920 } else if (new_flags & IFF_UP && 1921 (ifp->if_flags & IFF_UP) == 0) { 1922 do_ifup = 1; 1923 } 1924 1925 #ifdef IFPOLL_ENABLE 1926 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1927 if (new_flags & IFF_NPOLLING) 1928 ifpoll_register(ifp); 1929 else 1930 ifpoll_deregister(ifp); 1931 } 1932 #endif 1933 1934 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1935 (new_flags &~ IFF_CANTCHANGE); 1936 if (new_flags & IFF_PPROMISC) { 1937 /* Permanently promiscuous mode requested */ 1938 ifp->if_flags |= IFF_PROMISC; 1939 } else if (ifp->if_pcount == 0) { 1940 ifp->if_flags &= ~IFF_PROMISC; 1941 } 1942 if (ifp->if_ioctl) { 1943 ifnet_serialize_all(ifp); 1944 ifp->if_ioctl(ifp, cmd, data, cred); 1945 ifnet_deserialize_all(ifp); 1946 } 1947 if (do_ifup) 1948 if_up(ifp); 1949 getmicrotime(&ifp->if_lastchange); 1950 break; 1951 1952 case SIOCSIFCAP: 1953 error = priv_check_cred(cred, PRIV_ROOT, 0); 1954 if (error) 1955 break; 1956 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1957 error = EINVAL; 1958 break; 1959 } 1960 ifnet_serialize_all(ifp); 1961 ifp->if_ioctl(ifp, cmd, data, cred); 1962 ifnet_deserialize_all(ifp); 1963 break; 1964 1965 case SIOCSIFNAME: 1966 error = priv_check_cred(cred, PRIV_ROOT, 0); 1967 if (error) 1968 break; 1969 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1970 if (error) 1971 break; 1972 if (new_name[0] == '\0') { 1973 error = EINVAL; 1974 break; 1975 } 1976 if (ifunit(new_name) != NULL) { 1977 error = EEXIST; 1978 break; 1979 } 1980 1981 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1982 1983 /* Announce the departure of the interface. */ 1984 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1985 1986 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1987 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1988 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1989 namelen = strlen(new_name); 1990 onamelen = sdl->sdl_nlen; 1991 /* 1992 * Move the address if needed. This is safe because we 1993 * allocate space for a name of length IFNAMSIZ when we 1994 * create this in if_attach(). 1995 */ 1996 if (namelen != onamelen) { 1997 bcopy(sdl->sdl_data + onamelen, 1998 sdl->sdl_data + namelen, sdl->sdl_alen); 1999 } 2000 bcopy(new_name, sdl->sdl_data, namelen); 2001 sdl->sdl_nlen = namelen; 2002 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2003 bzero(sdl->sdl_data, onamelen); 2004 while (namelen != 0) 2005 sdl->sdl_data[--namelen] = 0xff; 2006 2007 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 2008 2009 /* Announce the return of the interface. */ 2010 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2011 break; 2012 2013 case SIOCSIFMETRIC: 2014 error = priv_check_cred(cred, PRIV_ROOT, 0); 2015 if (error) 2016 break; 2017 ifp->if_metric = ifr->ifr_metric; 2018 getmicrotime(&ifp->if_lastchange); 2019 break; 2020 2021 case SIOCSIFPHYS: 2022 error = priv_check_cred(cred, PRIV_ROOT, 0); 2023 if (error) 2024 break; 2025 if (ifp->if_ioctl == NULL) { 2026 error = EOPNOTSUPP; 2027 break; 2028 } 2029 ifnet_serialize_all(ifp); 2030 error = ifp->if_ioctl(ifp, cmd, data, cred); 2031 ifnet_deserialize_all(ifp); 2032 if (error == 0) 2033 getmicrotime(&ifp->if_lastchange); 2034 break; 2035 2036 case SIOCSIFMTU: 2037 { 2038 u_long oldmtu = ifp->if_mtu; 2039 2040 error = priv_check_cred(cred, PRIV_ROOT, 0); 2041 if (error) 2042 break; 2043 if (ifp->if_ioctl == NULL) { 2044 error = EOPNOTSUPP; 2045 break; 2046 } 2047 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2048 error = EINVAL; 2049 break; 2050 } 2051 ifnet_serialize_all(ifp); 2052 error = ifp->if_ioctl(ifp, cmd, data, cred); 2053 ifnet_deserialize_all(ifp); 2054 if (error == 0) { 2055 getmicrotime(&ifp->if_lastchange); 2056 rt_ifmsg(ifp); 2057 } 2058 /* 2059 * If the link MTU changed, do network layer specific procedure. 2060 */ 2061 if (ifp->if_mtu != oldmtu) { 2062 #ifdef INET6 2063 nd6_setmtu(ifp); 2064 #endif 2065 } 2066 break; 2067 } 2068 2069 case SIOCSIFTSOLEN: 2070 error = priv_check_cred(cred, PRIV_ROOT, 0); 2071 if (error) 2072 break; 2073 2074 /* XXX need driver supplied upper limit */ 2075 if (ifr->ifr_tsolen <= 0) { 2076 error = EINVAL; 2077 break; 2078 } 2079 ifp->if_tsolen = ifr->ifr_tsolen; 2080 break; 2081 2082 case SIOCADDMULTI: 2083 case SIOCDELMULTI: 2084 error = priv_check_cred(cred, PRIV_ROOT, 0); 2085 if (error) 2086 break; 2087 2088 /* Don't allow group membership on non-multicast interfaces. */ 2089 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2090 error = EOPNOTSUPP; 2091 break; 2092 } 2093 2094 /* Don't let users screw up protocols' entries. */ 2095 if (ifr->ifr_addr.sa_family != AF_LINK) { 2096 error = EINVAL; 2097 break; 2098 } 2099 2100 if (cmd == SIOCADDMULTI) { 2101 struct ifmultiaddr *ifma; 2102 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2103 } else { 2104 error = if_delmulti(ifp, &ifr->ifr_addr); 2105 } 2106 if (error == 0) 2107 getmicrotime(&ifp->if_lastchange); 2108 break; 2109 2110 case SIOCSIFPHYADDR: 2111 case SIOCDIFPHYADDR: 2112 #ifdef INET6 2113 case SIOCSIFPHYADDR_IN6: 2114 #endif 2115 case SIOCSLIFPHYADDR: 2116 case SIOCSIFMEDIA: 2117 case SIOCSIFGENERIC: 2118 error = priv_check_cred(cred, PRIV_ROOT, 0); 2119 if (error) 2120 break; 2121 if (ifp->if_ioctl == 0) { 2122 error = EOPNOTSUPP; 2123 break; 2124 } 2125 ifnet_serialize_all(ifp); 2126 error = ifp->if_ioctl(ifp, cmd, data, cred); 2127 ifnet_deserialize_all(ifp); 2128 if (error == 0) 2129 getmicrotime(&ifp->if_lastchange); 2130 break; 2131 2132 case SIOCGIFSTATUS: 2133 ifs = (struct ifstat *)data; 2134 ifs->ascii[0] = '\0'; 2135 /* fall through */ 2136 case SIOCGIFPSRCADDR: 2137 case SIOCGIFPDSTADDR: 2138 case SIOCGLIFPHYADDR: 2139 case SIOCGIFMEDIA: 2140 case SIOCGIFGENERIC: 2141 if (ifp->if_ioctl == NULL) { 2142 error = EOPNOTSUPP; 2143 break; 2144 } 2145 ifnet_serialize_all(ifp); 2146 error = ifp->if_ioctl(ifp, cmd, data, cred); 2147 ifnet_deserialize_all(ifp); 2148 break; 2149 2150 case SIOCSIFLLADDR: 2151 error = priv_check_cred(cred, PRIV_ROOT, 0); 2152 if (error) 2153 break; 2154 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2155 ifr->ifr_addr.sa_len); 2156 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2157 break; 2158 2159 default: 2160 oif_flags = ifp->if_flags; 2161 if (so->so_proto == 0) { 2162 error = EOPNOTSUPP; 2163 break; 2164 } 2165 error = so_pru_control_direct(so, cmd, data, ifp); 2166 2167 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2168 #ifdef INET6 2169 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2170 if (ifp->if_flags & IFF_UP) { 2171 crit_enter(); 2172 in6_if_up(ifp); 2173 crit_exit(); 2174 } 2175 #endif 2176 } 2177 break; 2178 } 2179 2180 ifnet_unlock(); 2181 return (error); 2182 } 2183 2184 /* 2185 * Set/clear promiscuous mode on interface ifp based on the truth value 2186 * of pswitch. The calls are reference counted so that only the first 2187 * "on" request actually has an effect, as does the final "off" request. 2188 * Results are undefined if the "off" and "on" requests are not matched. 2189 */ 2190 int 2191 ifpromisc(struct ifnet *ifp, int pswitch) 2192 { 2193 struct ifreq ifr; 2194 int error; 2195 int oldflags; 2196 2197 oldflags = ifp->if_flags; 2198 if (ifp->if_flags & IFF_PPROMISC) { 2199 /* Do nothing if device is in permanently promiscuous mode */ 2200 ifp->if_pcount += pswitch ? 1 : -1; 2201 return (0); 2202 } 2203 if (pswitch) { 2204 /* 2205 * If the device is not configured up, we cannot put it in 2206 * promiscuous mode. 2207 */ 2208 if ((ifp->if_flags & IFF_UP) == 0) 2209 return (ENETDOWN); 2210 if (ifp->if_pcount++ != 0) 2211 return (0); 2212 ifp->if_flags |= IFF_PROMISC; 2213 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2214 ifp->if_xname); 2215 } else { 2216 if (--ifp->if_pcount > 0) 2217 return (0); 2218 ifp->if_flags &= ~IFF_PROMISC; 2219 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2220 ifp->if_xname); 2221 } 2222 ifr.ifr_flags = ifp->if_flags; 2223 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2224 ifnet_serialize_all(ifp); 2225 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2226 ifnet_deserialize_all(ifp); 2227 if (error == 0) 2228 rt_ifmsg(ifp); 2229 else 2230 ifp->if_flags = oldflags; 2231 return error; 2232 } 2233 2234 /* 2235 * Return interface configuration 2236 * of system. List may be used 2237 * in later ioctl's (above) to get 2238 * other information. 2239 */ 2240 static int 2241 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2242 { 2243 struct ifconf *ifc = (struct ifconf *)data; 2244 struct ifnet *ifp; 2245 struct sockaddr *sa; 2246 struct ifreq ifr, *ifrp; 2247 int space = ifc->ifc_len, error = 0; 2248 2249 ifrp = ifc->ifc_req; 2250 2251 ifnet_lock(); 2252 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2253 struct ifaddr_container *ifac, *ifac_mark; 2254 struct ifaddr_marker mark; 2255 struct ifaddrhead *head; 2256 int addrs; 2257 2258 if (space <= sizeof ifr) 2259 break; 2260 2261 /* 2262 * Zero the stack declared structure first to prevent 2263 * memory disclosure. 2264 */ 2265 bzero(&ifr, sizeof(ifr)); 2266 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2267 >= sizeof(ifr.ifr_name)) { 2268 error = ENAMETOOLONG; 2269 break; 2270 } 2271 2272 /* 2273 * Add a marker, since copyout() could block and during that 2274 * period the list could be changed. Inserting the marker to 2275 * the header of the list will not cause trouble for the code 2276 * assuming that the first element of the list is AF_LINK; the 2277 * marker will be moved to the next position w/o blocking. 2278 */ 2279 ifa_marker_init(&mark, ifp); 2280 ifac_mark = &mark.ifac; 2281 head = &ifp->if_addrheads[mycpuid]; 2282 2283 addrs = 0; 2284 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2285 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2286 struct ifaddr *ifa = ifac->ifa; 2287 2288 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2289 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2290 2291 /* Ignore marker */ 2292 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2293 continue; 2294 2295 if (space <= sizeof ifr) 2296 break; 2297 sa = ifa->ifa_addr; 2298 if (cred->cr_prison && 2299 prison_if(cred, sa)) 2300 continue; 2301 addrs++; 2302 /* 2303 * Keep a reference on this ifaddr, so that it will 2304 * not be destroyed when its address is copied to 2305 * the userland, which could block. 2306 */ 2307 IFAREF(ifa); 2308 if (sa->sa_len <= sizeof(*sa)) { 2309 ifr.ifr_addr = *sa; 2310 error = copyout(&ifr, ifrp, sizeof ifr); 2311 ifrp++; 2312 } else { 2313 if (space < (sizeof ifr) + sa->sa_len - 2314 sizeof(*sa)) { 2315 IFAFREE(ifa); 2316 break; 2317 } 2318 space -= sa->sa_len - sizeof(*sa); 2319 error = copyout(&ifr, ifrp, 2320 sizeof ifr.ifr_name); 2321 if (error == 0) 2322 error = copyout(sa, &ifrp->ifr_addr, 2323 sa->sa_len); 2324 ifrp = (struct ifreq *) 2325 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2326 } 2327 IFAFREE(ifa); 2328 if (error) 2329 break; 2330 space -= sizeof ifr; 2331 } 2332 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2333 if (error) 2334 break; 2335 if (!addrs) { 2336 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2337 error = copyout(&ifr, ifrp, sizeof ifr); 2338 if (error) 2339 break; 2340 space -= sizeof ifr; 2341 ifrp++; 2342 } 2343 } 2344 ifnet_unlock(); 2345 2346 ifc->ifc_len -= space; 2347 return (error); 2348 } 2349 2350 /* 2351 * Just like if_promisc(), but for all-multicast-reception mode. 2352 */ 2353 int 2354 if_allmulti(struct ifnet *ifp, int onswitch) 2355 { 2356 int error = 0; 2357 struct ifreq ifr; 2358 2359 crit_enter(); 2360 2361 if (onswitch) { 2362 if (ifp->if_amcount++ == 0) { 2363 ifp->if_flags |= IFF_ALLMULTI; 2364 ifr.ifr_flags = ifp->if_flags; 2365 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2366 ifnet_serialize_all(ifp); 2367 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2368 NULL); 2369 ifnet_deserialize_all(ifp); 2370 } 2371 } else { 2372 if (ifp->if_amcount > 1) { 2373 ifp->if_amcount--; 2374 } else { 2375 ifp->if_amcount = 0; 2376 ifp->if_flags &= ~IFF_ALLMULTI; 2377 ifr.ifr_flags = ifp->if_flags; 2378 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2379 ifnet_serialize_all(ifp); 2380 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2381 NULL); 2382 ifnet_deserialize_all(ifp); 2383 } 2384 } 2385 2386 crit_exit(); 2387 2388 if (error == 0) 2389 rt_ifmsg(ifp); 2390 return error; 2391 } 2392 2393 /* 2394 * Add a multicast listenership to the interface in question. 2395 * The link layer provides a routine which converts 2396 */ 2397 int 2398 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2399 struct ifmultiaddr **retifma) 2400 { 2401 struct sockaddr *llsa, *dupsa; 2402 int error; 2403 struct ifmultiaddr *ifma; 2404 2405 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2406 2407 /* 2408 * If the matching multicast address already exists 2409 * then don't add a new one, just add a reference 2410 */ 2411 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2412 if (sa_equal(sa, ifma->ifma_addr)) { 2413 ifma->ifma_refcount++; 2414 if (retifma) 2415 *retifma = ifma; 2416 return 0; 2417 } 2418 } 2419 2420 /* 2421 * Give the link layer a chance to accept/reject it, and also 2422 * find out which AF_LINK address this maps to, if it isn't one 2423 * already. 2424 */ 2425 if (ifp->if_resolvemulti) { 2426 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2427 if (error) 2428 return error; 2429 } else { 2430 llsa = NULL; 2431 } 2432 2433 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2434 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2435 bcopy(sa, dupsa, sa->sa_len); 2436 2437 ifma->ifma_addr = dupsa; 2438 ifma->ifma_lladdr = llsa; 2439 ifma->ifma_ifp = ifp; 2440 ifma->ifma_refcount = 1; 2441 ifma->ifma_protospec = NULL; 2442 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2443 2444 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2445 if (retifma) 2446 *retifma = ifma; 2447 2448 if (llsa != NULL) { 2449 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2450 if (sa_equal(ifma->ifma_addr, llsa)) 2451 break; 2452 } 2453 if (ifma) { 2454 ifma->ifma_refcount++; 2455 } else { 2456 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2457 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2458 bcopy(llsa, dupsa, llsa->sa_len); 2459 ifma->ifma_addr = dupsa; 2460 ifma->ifma_ifp = ifp; 2461 ifma->ifma_refcount = 1; 2462 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2463 } 2464 } 2465 /* 2466 * We are certain we have added something, so call down to the 2467 * interface to let them know about it. 2468 */ 2469 if (ifp->if_ioctl) 2470 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2471 2472 return 0; 2473 } 2474 2475 int 2476 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2477 struct ifmultiaddr **retifma) 2478 { 2479 int error; 2480 2481 ifnet_serialize_all(ifp); 2482 error = if_addmulti_serialized(ifp, sa, retifma); 2483 ifnet_deserialize_all(ifp); 2484 2485 return error; 2486 } 2487 2488 /* 2489 * Remove a reference to a multicast address on this interface. Yell 2490 * if the request does not match an existing membership. 2491 */ 2492 static int 2493 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2494 { 2495 struct ifmultiaddr *ifma; 2496 2497 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2498 2499 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2500 if (sa_equal(sa, ifma->ifma_addr)) 2501 break; 2502 if (ifma == NULL) 2503 return ENOENT; 2504 2505 if (ifma->ifma_refcount > 1) { 2506 ifma->ifma_refcount--; 2507 return 0; 2508 } 2509 2510 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2511 sa = ifma->ifma_lladdr; 2512 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2513 /* 2514 * Make sure the interface driver is notified 2515 * in the case of a link layer mcast group being left. 2516 */ 2517 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2518 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2519 kfree(ifma->ifma_addr, M_IFMADDR); 2520 kfree(ifma, M_IFMADDR); 2521 if (sa == NULL) 2522 return 0; 2523 2524 /* 2525 * Now look for the link-layer address which corresponds to 2526 * this network address. It had been squirreled away in 2527 * ifma->ifma_lladdr for this purpose (so we don't have 2528 * to call ifp->if_resolvemulti() again), and we saved that 2529 * value in sa above. If some nasty deleted the 2530 * link-layer address out from underneath us, we can deal because 2531 * the address we stored was is not the same as the one which was 2532 * in the record for the link-layer address. (So we don't complain 2533 * in that case.) 2534 */ 2535 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2536 if (sa_equal(sa, ifma->ifma_addr)) 2537 break; 2538 if (ifma == NULL) 2539 return 0; 2540 2541 if (ifma->ifma_refcount > 1) { 2542 ifma->ifma_refcount--; 2543 return 0; 2544 } 2545 2546 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2547 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2548 kfree(ifma->ifma_addr, M_IFMADDR); 2549 kfree(sa, M_IFMADDR); 2550 kfree(ifma, M_IFMADDR); 2551 2552 return 0; 2553 } 2554 2555 int 2556 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2557 { 2558 int error; 2559 2560 ifnet_serialize_all(ifp); 2561 error = if_delmulti_serialized(ifp, sa); 2562 ifnet_deserialize_all(ifp); 2563 2564 return error; 2565 } 2566 2567 /* 2568 * Delete all multicast group membership for an interface. 2569 * Should be used to quickly flush all multicast filters. 2570 */ 2571 void 2572 if_delallmulti_serialized(struct ifnet *ifp) 2573 { 2574 struct ifmultiaddr *ifma, mark; 2575 struct sockaddr sa; 2576 2577 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2578 2579 bzero(&sa, sizeof(sa)); 2580 sa.sa_family = AF_UNSPEC; 2581 sa.sa_len = sizeof(sa); 2582 2583 bzero(&mark, sizeof(mark)); 2584 mark.ifma_addr = &sa; 2585 2586 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2587 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2588 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2589 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2590 ifma_link); 2591 2592 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2593 continue; 2594 2595 if_delmulti_serialized(ifp, ifma->ifma_addr); 2596 } 2597 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2598 } 2599 2600 2601 /* 2602 * Set the link layer address on an interface. 2603 * 2604 * At this time we only support certain types of interfaces, 2605 * and we don't allow the length of the address to change. 2606 */ 2607 int 2608 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2609 { 2610 struct sockaddr_dl *sdl; 2611 struct ifreq ifr; 2612 2613 sdl = IF_LLSOCKADDR(ifp); 2614 if (sdl == NULL) 2615 return (EINVAL); 2616 if (len != sdl->sdl_alen) /* don't allow length to change */ 2617 return (EINVAL); 2618 switch (ifp->if_type) { 2619 case IFT_ETHER: /* these types use struct arpcom */ 2620 case IFT_XETHER: 2621 case IFT_L2VLAN: 2622 case IFT_IEEE8023ADLAG: 2623 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2624 bcopy(lladdr, LLADDR(sdl), len); 2625 break; 2626 default: 2627 return (ENODEV); 2628 } 2629 /* 2630 * If the interface is already up, we need 2631 * to re-init it in order to reprogram its 2632 * address filter. 2633 */ 2634 ifnet_serialize_all(ifp); 2635 if ((ifp->if_flags & IFF_UP) != 0) { 2636 #ifdef INET 2637 struct ifaddr_container *ifac; 2638 #endif 2639 2640 ifp->if_flags &= ~IFF_UP; 2641 ifr.ifr_flags = ifp->if_flags; 2642 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2643 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2644 NULL); 2645 ifp->if_flags |= IFF_UP; 2646 ifr.ifr_flags = ifp->if_flags; 2647 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2648 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2649 NULL); 2650 #ifdef INET 2651 /* 2652 * Also send gratuitous ARPs to notify other nodes about 2653 * the address change. 2654 */ 2655 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2656 struct ifaddr *ifa = ifac->ifa; 2657 2658 if (ifa->ifa_addr != NULL && 2659 ifa->ifa_addr->sa_family == AF_INET) 2660 arp_gratuitous(ifp, ifa); 2661 } 2662 #endif 2663 } 2664 ifnet_deserialize_all(ifp); 2665 return (0); 2666 } 2667 2668 struct ifmultiaddr * 2669 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2670 { 2671 struct ifmultiaddr *ifma; 2672 2673 /* TODO: need ifnet_serialize_main */ 2674 ifnet_serialize_all(ifp); 2675 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2676 if (sa_equal(ifma->ifma_addr, sa)) 2677 break; 2678 ifnet_deserialize_all(ifp); 2679 2680 return ifma; 2681 } 2682 2683 /* 2684 * This function locates the first real ethernet MAC from a network 2685 * card and loads it into node, returning 0 on success or ENOENT if 2686 * no suitable interfaces were found. It is used by the uuid code to 2687 * generate a unique 6-byte number. 2688 */ 2689 int 2690 if_getanyethermac(uint16_t *node, int minlen) 2691 { 2692 struct ifnet *ifp; 2693 struct sockaddr_dl *sdl; 2694 2695 ifnet_lock(); 2696 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2697 if (ifp->if_type != IFT_ETHER) 2698 continue; 2699 sdl = IF_LLSOCKADDR(ifp); 2700 if (sdl->sdl_alen < minlen) 2701 continue; 2702 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2703 minlen); 2704 ifnet_unlock(); 2705 return(0); 2706 } 2707 ifnet_unlock(); 2708 return (ENOENT); 2709 } 2710 2711 /* 2712 * The name argument must be a pointer to storage which will last as 2713 * long as the interface does. For physical devices, the result of 2714 * device_get_name(dev) is a good choice and for pseudo-devices a 2715 * static string works well. 2716 */ 2717 void 2718 if_initname(struct ifnet *ifp, const char *name, int unit) 2719 { 2720 ifp->if_dname = name; 2721 ifp->if_dunit = unit; 2722 if (unit != IF_DUNIT_NONE) 2723 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2724 else 2725 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2726 } 2727 2728 int 2729 if_printf(struct ifnet *ifp, const char *fmt, ...) 2730 { 2731 __va_list ap; 2732 int retval; 2733 2734 retval = kprintf("%s: ", ifp->if_xname); 2735 __va_start(ap, fmt); 2736 retval += kvprintf(fmt, ap); 2737 __va_end(ap); 2738 return (retval); 2739 } 2740 2741 struct ifnet * 2742 if_alloc(uint8_t type) 2743 { 2744 struct ifnet *ifp; 2745 size_t size; 2746 2747 /* 2748 * XXX temporary hack until arpcom is setup in if_l2com 2749 */ 2750 if (type == IFT_ETHER) 2751 size = sizeof(struct arpcom); 2752 else 2753 size = sizeof(struct ifnet); 2754 2755 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2756 2757 ifp->if_type = type; 2758 2759 if (if_com_alloc[type] != NULL) { 2760 ifp->if_l2com = if_com_alloc[type](type, ifp); 2761 if (ifp->if_l2com == NULL) { 2762 kfree(ifp, M_IFNET); 2763 return (NULL); 2764 } 2765 } 2766 return (ifp); 2767 } 2768 2769 void 2770 if_free(struct ifnet *ifp) 2771 { 2772 kfree(ifp, M_IFNET); 2773 } 2774 2775 void 2776 ifq_set_classic(struct ifaltq *ifq) 2777 { 2778 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2779 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2780 } 2781 2782 void 2783 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2784 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2785 { 2786 int q; 2787 2788 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2789 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2790 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2791 KASSERT(request != NULL, ("request is not specified")); 2792 2793 ifq->altq_mapsubq = mapsubq; 2794 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2795 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2796 2797 ifsq->ifsq_enqueue = enqueue; 2798 ifsq->ifsq_dequeue = dequeue; 2799 ifsq->ifsq_request = request; 2800 } 2801 } 2802 2803 static void 2804 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2805 { 2806 2807 classq_add(&ifsq->ifsq_norm, m); 2808 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2809 } 2810 2811 static void 2812 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2813 { 2814 2815 classq_add(&ifsq->ifsq_prio, m); 2816 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2817 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2818 } 2819 2820 static struct mbuf * 2821 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2822 { 2823 struct mbuf *m; 2824 2825 m = classq_get(&ifsq->ifsq_norm); 2826 if (m != NULL) 2827 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2828 return (m); 2829 } 2830 2831 static struct mbuf * 2832 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2833 { 2834 struct mbuf *m; 2835 2836 m = classq_get(&ifsq->ifsq_prio); 2837 if (m != NULL) { 2838 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2839 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2840 } 2841 return (m); 2842 } 2843 2844 int 2845 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2846 struct altq_pktattr *pa __unused) 2847 { 2848 2849 M_ASSERTPKTHDR(m); 2850 again: 2851 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2852 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2853 struct mbuf *m_drop; 2854 2855 if (m->m_flags & M_PRIO) { 2856 m_drop = NULL; 2857 if (ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen >> 1) && 2858 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt >> 1)) { 2859 /* Try dropping some from normal queue. */ 2860 m_drop = ifsq_norm_dequeue(ifsq); 2861 } 2862 if (m_drop == NULL) 2863 m_drop = ifsq_prio_dequeue(ifsq); 2864 } else { 2865 m_drop = ifsq_norm_dequeue(ifsq); 2866 } 2867 if (m_drop != NULL) { 2868 IFNET_STAT_INC(ifsq->ifsq_ifp, oqdrops, 1); 2869 m_freem(m_drop); 2870 goto again; 2871 } 2872 /* 2873 * No old packets could be dropped! 2874 * NOTE: Caller increases oqdrops. 2875 */ 2876 m_freem(m); 2877 return (ENOBUFS); 2878 } else { 2879 if (m->m_flags & M_PRIO) 2880 ifsq_prio_enqueue(ifsq, m); 2881 else 2882 ifsq_norm_enqueue(ifsq, m); 2883 return (0); 2884 } 2885 } 2886 2887 struct mbuf * 2888 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2889 { 2890 struct mbuf *m; 2891 2892 switch (op) { 2893 case ALTDQ_POLL: 2894 m = classq_head(&ifsq->ifsq_prio); 2895 if (m == NULL) 2896 m = classq_head(&ifsq->ifsq_norm); 2897 break; 2898 2899 case ALTDQ_REMOVE: 2900 m = ifsq_prio_dequeue(ifsq); 2901 if (m == NULL) 2902 m = ifsq_norm_dequeue(ifsq); 2903 break; 2904 2905 default: 2906 panic("unsupported ALTQ dequeue op: %d", op); 2907 } 2908 return m; 2909 } 2910 2911 int 2912 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2913 { 2914 switch (req) { 2915 case ALTRQ_PURGE: 2916 for (;;) { 2917 struct mbuf *m; 2918 2919 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2920 if (m == NULL) 2921 break; 2922 m_freem(m); 2923 } 2924 break; 2925 2926 default: 2927 panic("unsupported ALTQ request: %d", req); 2928 } 2929 return 0; 2930 } 2931 2932 static void 2933 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2934 { 2935 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2936 int running = 0, need_sched; 2937 2938 /* 2939 * Try to do direct ifnet.if_start on the subqueue first, if there is 2940 * contention on the subqueue hardware serializer, ifnet.if_start on 2941 * the subqueue will be scheduled on the subqueue owner CPU. 2942 */ 2943 if (!ifsq_tryserialize_hw(ifsq)) { 2944 /* 2945 * Subqueue hardware serializer contention happened, 2946 * ifnet.if_start on the subqueue is scheduled on 2947 * the subqueue owner CPU, and we keep going. 2948 */ 2949 ifsq_ifstart_schedule(ifsq, 1); 2950 return; 2951 } 2952 2953 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2954 ifp->if_start(ifp, ifsq); 2955 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2956 running = 1; 2957 } 2958 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2959 2960 ifsq_deserialize_hw(ifsq); 2961 2962 if (need_sched) { 2963 /* 2964 * More data need to be transmitted, ifnet.if_start on the 2965 * subqueue is scheduled on the subqueue owner CPU, and we 2966 * keep going. 2967 * NOTE: ifnet.if_start subqueue interlock is not released. 2968 */ 2969 ifsq_ifstart_schedule(ifsq, force_sched); 2970 } 2971 } 2972 2973 /* 2974 * Subqeue packets staging mechanism: 2975 * 2976 * The packets enqueued into the subqueue are staged to a certain amount 2977 * before the ifnet.if_start on the subqueue is called. In this way, the 2978 * driver could avoid writing to hardware registers upon every packet, 2979 * instead, hardware registers could be written when certain amount of 2980 * packets are put onto hardware TX ring. The measurement on several modern 2981 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 2982 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 2983 * datagrams are transmitted at 1.48Mpps. The performance improvement by 2984 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 2985 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 2986 * 2987 * Subqueue packets staging is performed for two entry points into drivers' 2988 * transmission function: 2989 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 2990 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 2991 * 2992 * Subqueue packets staging will be stopped upon any of the following 2993 * conditions: 2994 * - If the count of packets enqueued on the current CPU is great than or 2995 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 2996 * - If the total length of packets enqueued on the current CPU is great 2997 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 2998 * cut from the hardware's MTU mainly bacause a full TCP segment's size 2999 * is usually less than hardware's MTU. 3000 * - ifsq_ifstart_schedule() is not pending on the current CPU and 3001 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 3002 * released. 3003 * - The if_start_rollup(), which is registered as low priority netisr 3004 * rollup function, is called; probably because no more work is pending 3005 * for netisr. 3006 * 3007 * NOTE: 3008 * Currently subqueue packet staging is only performed in netisr threads. 3009 */ 3010 int 3011 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 3012 { 3013 struct ifaltq *ifq = &ifp->if_snd; 3014 struct ifaltq_subque *ifsq; 3015 int error, start = 0, len, mcast = 0, avoid_start = 0; 3016 struct ifsubq_stage_head *head = NULL; 3017 struct ifsubq_stage *stage = NULL; 3018 struct globaldata *gd = mycpu; 3019 struct thread *td = gd->gd_curthread; 3020 3021 crit_enter_quick(td); 3022 3023 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 3024 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 3025 3026 len = m->m_pkthdr.len; 3027 if (m->m_flags & M_MCAST) 3028 mcast = 1; 3029 3030 if (td->td_type == TD_TYPE_NETISR) { 3031 head = &ifsubq_stage_heads[mycpuid]; 3032 stage = ifsq_get_stage(ifsq, mycpuid); 3033 3034 stage->stg_cnt++; 3035 stage->stg_len += len; 3036 if (stage->stg_cnt < ifsq_stage_cntmax && 3037 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3038 avoid_start = 1; 3039 } 3040 3041 ALTQ_SQ_LOCK(ifsq); 3042 error = ifsq_enqueue_locked(ifsq, m, pa); 3043 if (error) { 3044 IFNET_STAT_INC(ifp, oqdrops, 1); 3045 if (!ifsq_data_ready(ifsq)) { 3046 ALTQ_SQ_UNLOCK(ifsq); 3047 crit_exit_quick(td); 3048 return error; 3049 } 3050 avoid_start = 0; 3051 } 3052 if (!ifsq_is_started(ifsq)) { 3053 if (avoid_start) { 3054 ALTQ_SQ_UNLOCK(ifsq); 3055 3056 KKASSERT(!error); 3057 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3058 ifsq_stage_insert(head, stage); 3059 3060 IFNET_STAT_INC(ifp, obytes, len); 3061 if (mcast) 3062 IFNET_STAT_INC(ifp, omcasts, 1); 3063 crit_exit_quick(td); 3064 return error; 3065 } 3066 3067 /* 3068 * Hold the subqueue interlock of ifnet.if_start 3069 */ 3070 ifsq_set_started(ifsq); 3071 start = 1; 3072 } 3073 ALTQ_SQ_UNLOCK(ifsq); 3074 3075 if (!error) { 3076 IFNET_STAT_INC(ifp, obytes, len); 3077 if (mcast) 3078 IFNET_STAT_INC(ifp, omcasts, 1); 3079 } 3080 3081 if (stage != NULL) { 3082 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3083 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3084 if (!avoid_start) { 3085 ifsq_stage_remove(head, stage); 3086 ifsq_ifstart_schedule(ifsq, 1); 3087 } 3088 crit_exit_quick(td); 3089 return error; 3090 } 3091 3092 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3093 ifsq_stage_remove(head, stage); 3094 } else { 3095 stage->stg_cnt = 0; 3096 stage->stg_len = 0; 3097 } 3098 } 3099 3100 if (!start) { 3101 crit_exit_quick(td); 3102 return error; 3103 } 3104 3105 ifsq_ifstart_try(ifsq, 0); 3106 3107 crit_exit_quick(td); 3108 return error; 3109 } 3110 3111 void * 3112 ifa_create(int size) 3113 { 3114 struct ifaddr *ifa; 3115 int i; 3116 3117 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3118 3119 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3120 3121 /* 3122 * Make ifa_container availabel on all CPUs, since they 3123 * could be accessed by any threads. 3124 */ 3125 ifa->ifa_containers = 3126 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3127 M_IFADDR, M_INTWAIT | M_ZERO); 3128 3129 ifa->ifa_ncnt = ncpus; 3130 for (i = 0; i < ncpus; ++i) { 3131 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3132 3133 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3134 ifac->ifa = ifa; 3135 ifac->ifa_refcnt = 1; 3136 } 3137 #ifdef IFADDR_DEBUG 3138 kprintf("alloc ifa %p %d\n", ifa, size); 3139 #endif 3140 return ifa; 3141 } 3142 3143 void 3144 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3145 { 3146 struct ifaddr *ifa = ifac->ifa; 3147 3148 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3149 KKASSERT(ifac->ifa_refcnt == 0); 3150 KASSERT(ifac->ifa_listmask == 0, 3151 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3152 3153 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3154 3155 #ifdef IFADDR_DEBUG_VERBOSE 3156 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3157 #endif 3158 3159 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3160 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3161 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3162 #ifdef IFADDR_DEBUG 3163 kprintf("free ifa %p\n", ifa); 3164 #endif 3165 kfree(ifa->ifa_containers, M_IFADDR); 3166 kfree(ifa, M_IFADDR); 3167 } 3168 } 3169 3170 static void 3171 ifa_iflink_dispatch(netmsg_t nmsg) 3172 { 3173 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3174 struct ifaddr *ifa = msg->ifa; 3175 struct ifnet *ifp = msg->ifp; 3176 int cpu = mycpuid; 3177 struct ifaddr_container *ifac; 3178 3179 crit_enter(); 3180 3181 ifac = &ifa->ifa_containers[cpu]; 3182 ASSERT_IFAC_VALID(ifac); 3183 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3184 ("ifaddr is on if_addrheads")); 3185 3186 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3187 if (msg->tail) 3188 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3189 else 3190 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3191 3192 crit_exit(); 3193 3194 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3195 } 3196 3197 void 3198 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3199 { 3200 struct netmsg_ifaddr msg; 3201 3202 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3203 0, ifa_iflink_dispatch); 3204 msg.ifa = ifa; 3205 msg.ifp = ifp; 3206 msg.tail = tail; 3207 3208 netisr_domsg(&msg.base, 0); 3209 } 3210 3211 static void 3212 ifa_ifunlink_dispatch(netmsg_t nmsg) 3213 { 3214 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3215 struct ifaddr *ifa = msg->ifa; 3216 struct ifnet *ifp = msg->ifp; 3217 int cpu = mycpuid; 3218 struct ifaddr_container *ifac; 3219 3220 crit_enter(); 3221 3222 ifac = &ifa->ifa_containers[cpu]; 3223 ASSERT_IFAC_VALID(ifac); 3224 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3225 ("ifaddr is not on if_addrhead")); 3226 3227 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3228 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3229 3230 crit_exit(); 3231 3232 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3233 } 3234 3235 void 3236 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3237 { 3238 struct netmsg_ifaddr msg; 3239 3240 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3241 0, ifa_ifunlink_dispatch); 3242 msg.ifa = ifa; 3243 msg.ifp = ifp; 3244 3245 netisr_domsg(&msg.base, 0); 3246 } 3247 3248 static void 3249 ifa_destroy_dispatch(netmsg_t nmsg) 3250 { 3251 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3252 3253 IFAFREE(msg->ifa); 3254 netisr_forwardmsg_all(&nmsg->base, mycpuid + 1); 3255 } 3256 3257 void 3258 ifa_destroy(struct ifaddr *ifa) 3259 { 3260 struct netmsg_ifaddr msg; 3261 3262 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3263 0, ifa_destroy_dispatch); 3264 msg.ifa = ifa; 3265 3266 netisr_domsg(&msg.base, 0); 3267 } 3268 3269 static void 3270 if_start_rollup(void) 3271 { 3272 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3273 struct ifsubq_stage *stage; 3274 3275 crit_enter(); 3276 3277 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3278 struct ifaltq_subque *ifsq = stage->stg_subq; 3279 int is_sched = 0; 3280 3281 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3282 is_sched = 1; 3283 ifsq_stage_remove(head, stage); 3284 3285 if (is_sched) { 3286 ifsq_ifstart_schedule(ifsq, 1); 3287 } else { 3288 int start = 0; 3289 3290 ALTQ_SQ_LOCK(ifsq); 3291 if (!ifsq_is_started(ifsq)) { 3292 /* 3293 * Hold the subqueue interlock of 3294 * ifnet.if_start 3295 */ 3296 ifsq_set_started(ifsq); 3297 start = 1; 3298 } 3299 ALTQ_SQ_UNLOCK(ifsq); 3300 3301 if (start) 3302 ifsq_ifstart_try(ifsq, 1); 3303 } 3304 KKASSERT((stage->stg_flags & 3305 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3306 } 3307 3308 crit_exit(); 3309 } 3310 3311 static void 3312 ifnetinit(void *dummy __unused) 3313 { 3314 int i; 3315 3316 /* XXX netisr_ncpus */ 3317 for (i = 0; i < ncpus; ++i) 3318 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3319 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3320 } 3321 3322 void 3323 if_register_com_alloc(u_char type, 3324 if_com_alloc_t *a, if_com_free_t *f) 3325 { 3326 3327 KASSERT(if_com_alloc[type] == NULL, 3328 ("if_register_com_alloc: %d already registered", type)); 3329 KASSERT(if_com_free[type] == NULL, 3330 ("if_register_com_alloc: %d free already registered", type)); 3331 3332 if_com_alloc[type] = a; 3333 if_com_free[type] = f; 3334 } 3335 3336 void 3337 if_deregister_com_alloc(u_char type) 3338 { 3339 3340 KASSERT(if_com_alloc[type] != NULL, 3341 ("if_deregister_com_alloc: %d not registered", type)); 3342 KASSERT(if_com_free[type] != NULL, 3343 ("if_deregister_com_alloc: %d free not registered", type)); 3344 if_com_alloc[type] = NULL; 3345 if_com_free[type] = NULL; 3346 } 3347 3348 void 3349 ifq_set_maxlen(struct ifaltq *ifq, int len) 3350 { 3351 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3352 } 3353 3354 int 3355 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3356 { 3357 return ALTQ_SUBQ_INDEX_DEFAULT; 3358 } 3359 3360 int 3361 ifq_mapsubq_modulo(struct ifaltq *ifq, int cpuid) 3362 { 3363 3364 return (cpuid % ifq->altq_subq_mappriv); 3365 } 3366 3367 static void 3368 ifsq_watchdog(void *arg) 3369 { 3370 struct ifsubq_watchdog *wd = arg; 3371 struct ifnet *ifp; 3372 3373 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3374 goto done; 3375 3376 ifp = ifsq_get_ifp(wd->wd_subq); 3377 if (ifnet_tryserialize_all(ifp)) { 3378 wd->wd_watchdog(wd->wd_subq); 3379 ifnet_deserialize_all(ifp); 3380 } else { 3381 /* try again next timeout */ 3382 wd->wd_timer = 1; 3383 } 3384 done: 3385 ifsq_watchdog_reset(wd); 3386 } 3387 3388 static void 3389 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3390 { 3391 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3392 ifsq_get_cpuid(wd->wd_subq)); 3393 } 3394 3395 void 3396 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3397 ifsq_watchdog_t watchdog) 3398 { 3399 callout_init_mp(&wd->wd_callout); 3400 wd->wd_timer = 0; 3401 wd->wd_subq = ifsq; 3402 wd->wd_watchdog = watchdog; 3403 } 3404 3405 void 3406 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3407 { 3408 wd->wd_timer = 0; 3409 ifsq_watchdog_reset(wd); 3410 } 3411 3412 void 3413 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3414 { 3415 wd->wd_timer = 0; 3416 callout_stop(&wd->wd_callout); 3417 } 3418 3419 void 3420 ifnet_lock(void) 3421 { 3422 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3423 ("try holding ifnet lock in netisr")); 3424 mtx_lock(&ifnet_mtx); 3425 } 3426 3427 void 3428 ifnet_unlock(void) 3429 { 3430 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3431 ("try holding ifnet lock in netisr")); 3432 mtx_unlock(&ifnet_mtx); 3433 } 3434 3435 static struct ifnet_array * 3436 ifnet_array_alloc(int count) 3437 { 3438 struct ifnet_array *arr; 3439 3440 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3441 M_IFNET, M_WAITOK); 3442 arr->ifnet_count = count; 3443 3444 return arr; 3445 } 3446 3447 static void 3448 ifnet_array_free(struct ifnet_array *arr) 3449 { 3450 if (arr == &ifnet_array0) 3451 return; 3452 kfree(arr, M_IFNET); 3453 } 3454 3455 static struct ifnet_array * 3456 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3457 { 3458 struct ifnet_array *arr; 3459 int count, i; 3460 3461 KASSERT(old_arr->ifnet_count >= 0, 3462 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3463 count = old_arr->ifnet_count + 1; 3464 arr = ifnet_array_alloc(count); 3465 3466 /* 3467 * Save the old ifnet array and append this ifp to the end of 3468 * the new ifnet array. 3469 */ 3470 for (i = 0; i < old_arr->ifnet_count; ++i) { 3471 KASSERT(old_arr->ifnet_arr[i] != ifp, 3472 ("%s is already in ifnet array", ifp->if_xname)); 3473 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3474 } 3475 KASSERT(i == count - 1, 3476 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3477 ifp->if_xname, count - 1, i)); 3478 arr->ifnet_arr[i] = ifp; 3479 3480 return arr; 3481 } 3482 3483 static struct ifnet_array * 3484 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3485 { 3486 struct ifnet_array *arr; 3487 int count, i, idx, found = 0; 3488 3489 KASSERT(old_arr->ifnet_count > 0, 3490 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3491 count = old_arr->ifnet_count - 1; 3492 arr = ifnet_array_alloc(count); 3493 3494 /* 3495 * Save the old ifnet array, but skip this ifp. 3496 */ 3497 idx = 0; 3498 for (i = 0; i < old_arr->ifnet_count; ++i) { 3499 if (old_arr->ifnet_arr[i] == ifp) { 3500 KASSERT(!found, 3501 ("dup %s is in ifnet array", ifp->if_xname)); 3502 found = 1; 3503 continue; 3504 } 3505 KASSERT(idx < count, 3506 ("invalid ifnet array index %d, count %d", idx, count)); 3507 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3508 ++idx; 3509 } 3510 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3511 KASSERT(idx == count, 3512 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3513 ifp->if_xname, count, idx)); 3514 3515 return arr; 3516 } 3517 3518 const struct ifnet_array * 3519 ifnet_array_get(void) 3520 { 3521 const struct ifnet_array *ret; 3522 3523 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3524 ret = ifnet_array; 3525 /* Make sure 'ret' is really used. */ 3526 cpu_ccfence(); 3527 return (ret); 3528 } 3529 3530 int 3531 ifnet_array_isempty(void) 3532 { 3533 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3534 if (ifnet_array->ifnet_count == 0) 3535 return 1; 3536 else 3537 return 0; 3538 } 3539 3540 void 3541 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3542 { 3543 struct ifaddr *ifa; 3544 3545 memset(mark, 0, sizeof(*mark)); 3546 ifa = &mark->ifa; 3547 3548 mark->ifac.ifa = ifa; 3549 3550 ifa->ifa_addr = &mark->addr; 3551 ifa->ifa_dstaddr = &mark->dstaddr; 3552 ifa->ifa_netmask = &mark->netmask; 3553 ifa->ifa_ifp = ifp; 3554 } 3555 3556 static int 3557 if_ringcnt_fixup(int ring_cnt, int ring_cntmax) 3558 { 3559 3560 KASSERT(ring_cntmax > 0, ("invalid ring count max %d", ring_cntmax)); 3561 3562 if (ring_cnt <= 0 || ring_cnt > ring_cntmax) 3563 ring_cnt = ring_cntmax; 3564 if (ring_cnt > netisr_ncpus) 3565 ring_cnt = netisr_ncpus; 3566 return (ring_cnt); 3567 } 3568 3569 static void 3570 if_ringmap_set_grid(device_t dev, struct if_ringmap *rm, int grid) 3571 { 3572 int i, offset; 3573 3574 KASSERT(grid > 0, ("invalid if_ringmap grid %d", grid)); 3575 KASSERT(grid >= rm->rm_cnt, ("invalid if_ringmap grid %d, count %d", 3576 grid, rm->rm_cnt)); 3577 rm->rm_grid = grid; 3578 3579 offset = (rm->rm_grid * device_get_unit(dev)) % netisr_ncpus; 3580 for (i = 0; i < rm->rm_cnt; ++i) { 3581 rm->rm_cpumap[i] = offset + i; 3582 KASSERT(rm->rm_cpumap[i] < netisr_ncpus, 3583 ("invalid cpumap[%d] = %d, offset %d", i, 3584 rm->rm_cpumap[i], offset)); 3585 } 3586 } 3587 3588 static struct if_ringmap * 3589 if_ringmap_alloc_flags(device_t dev, int ring_cnt, int ring_cntmax, 3590 uint32_t flags) 3591 { 3592 struct if_ringmap *rm; 3593 int i, grid = 0, prev_grid; 3594 3595 ring_cnt = if_ringcnt_fixup(ring_cnt, ring_cntmax); 3596 rm = kmalloc(__offsetof(struct if_ringmap, rm_cpumap[ring_cnt]), 3597 M_DEVBUF, M_WAITOK | M_ZERO); 3598 3599 rm->rm_cnt = ring_cnt; 3600 if (flags & RINGMAP_FLAG_POWEROF2) 3601 rm->rm_cnt = 1 << (fls(rm->rm_cnt) - 1); 3602 3603 prev_grid = netisr_ncpus; 3604 for (i = 0; i < netisr_ncpus; ++i) { 3605 if (netisr_ncpus % (i + 1) != 0) 3606 continue; 3607 3608 grid = netisr_ncpus / (i + 1); 3609 if (rm->rm_cnt > grid) { 3610 grid = prev_grid; 3611 break; 3612 } 3613 3614 if (rm->rm_cnt > netisr_ncpus / (i + 2)) 3615 break; 3616 prev_grid = grid; 3617 } 3618 if_ringmap_set_grid(dev, rm, grid); 3619 3620 return (rm); 3621 } 3622 3623 struct if_ringmap * 3624 if_ringmap_alloc(device_t dev, int ring_cnt, int ring_cntmax) 3625 { 3626 3627 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3628 RINGMAP_FLAG_NONE)); 3629 } 3630 3631 struct if_ringmap * 3632 if_ringmap_alloc2(device_t dev, int ring_cnt, int ring_cntmax) 3633 { 3634 3635 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3636 RINGMAP_FLAG_POWEROF2)); 3637 } 3638 3639 void 3640 if_ringmap_free(struct if_ringmap *rm) 3641 { 3642 3643 kfree(rm, M_DEVBUF); 3644 } 3645 3646 /* 3647 * Align the two ringmaps. 3648 * 3649 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3650 * 3651 * Before: 3652 * 3653 * CPU 0 1 2 3 4 5 6 7 3654 * NIC_RX n0 n1 n2 n3 3655 * NIC_TX N0 N1 3656 * 3657 * After: 3658 * 3659 * CPU 0 1 2 3 4 5 6 7 3660 * NIC_RX n0 n1 n2 n3 3661 * NIC_TX N0 N1 3662 */ 3663 void 3664 if_ringmap_align(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3665 { 3666 3667 if (rm0->rm_grid > rm1->rm_grid) 3668 if_ringmap_set_grid(dev, rm1, rm0->rm_grid); 3669 else if (rm0->rm_grid < rm1->rm_grid) 3670 if_ringmap_set_grid(dev, rm0, rm1->rm_grid); 3671 } 3672 3673 void 3674 if_ringmap_match(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3675 { 3676 int subset_grid, cnt, divisor, mod, offset, i; 3677 struct if_ringmap *subset_rm, *rm; 3678 int old_rm0_grid, old_rm1_grid; 3679 3680 if (rm0->rm_grid == rm1->rm_grid) 3681 return; 3682 3683 /* Save grid for later use */ 3684 old_rm0_grid = rm0->rm_grid; 3685 old_rm1_grid = rm1->rm_grid; 3686 3687 if_ringmap_align(dev, rm0, rm1); 3688 3689 /* 3690 * Re-shuffle rings to get more even distribution. 3691 * 3692 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3693 * 3694 * CPU 0 1 2 3 4 5 6 7 8 9 10 11 3695 * 3696 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 3697 * NIC_TX A0 A1 B0 B1 C0 C1 3698 * 3699 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3 3700 * NIC_TX D0 D1 E0 E1 F0 F1 3701 */ 3702 3703 if (rm0->rm_cnt >= (2 * old_rm1_grid)) { 3704 cnt = rm0->rm_cnt; 3705 subset_grid = old_rm1_grid; 3706 subset_rm = rm1; 3707 rm = rm0; 3708 } else if (rm1->rm_cnt > (2 * old_rm0_grid)) { 3709 cnt = rm1->rm_cnt; 3710 subset_grid = old_rm0_grid; 3711 subset_rm = rm0; 3712 rm = rm1; 3713 } else { 3714 /* No space to shuffle. */ 3715 return; 3716 } 3717 3718 mod = cnt / subset_grid; 3719 KKASSERT(mod >= 2); 3720 divisor = netisr_ncpus / rm->rm_grid; 3721 offset = ((device_get_unit(dev) / divisor) % mod) * subset_grid; 3722 3723 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3724 subset_rm->rm_cpumap[i] += offset; 3725 KASSERT(subset_rm->rm_cpumap[i] < netisr_ncpus, 3726 ("match: invalid cpumap[%d] = %d, offset %d", 3727 i, subset_rm->rm_cpumap[i], offset)); 3728 } 3729 #ifdef INVARIANTS 3730 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3731 int j; 3732 3733 for (j = 0; j < rm->rm_cnt; ++j) { 3734 if (rm->rm_cpumap[j] == subset_rm->rm_cpumap[i]) 3735 break; 3736 } 3737 KASSERT(j < rm->rm_cnt, 3738 ("subset cpumap[%d] = %d not found in superset", 3739 i, subset_rm->rm_cpumap[i])); 3740 } 3741 #endif 3742 } 3743 3744 int 3745 if_ringmap_count(const struct if_ringmap *rm) 3746 { 3747 3748 return (rm->rm_cnt); 3749 } 3750 3751 int 3752 if_ringmap_cpumap(const struct if_ringmap *rm, int ring) 3753 { 3754 3755 KASSERT(ring >= 0 && ring < rm->rm_cnt, ("invalid ring %d", ring)); 3756 return (rm->rm_cpumap[ring]); 3757 } 3758 3759 void 3760 if_ringmap_rdrtable(const struct if_ringmap *rm, int table[], int table_nent) 3761 { 3762 int i, grid_idx, grid_cnt, patch_off, patch_cnt, ncopy; 3763 3764 KASSERT(table_nent > 0 && (table_nent & NETISR_CPUMASK) == 0, 3765 ("invalid redirect table entries %d", table_nent)); 3766 3767 grid_idx = 0; 3768 for (i = 0; i < NETISR_CPUMAX; ++i) { 3769 table[i] = grid_idx++ % rm->rm_cnt; 3770 3771 if (grid_idx == rm->rm_grid) 3772 grid_idx = 0; 3773 } 3774 3775 /* 3776 * Make the ring distributed more evenly for the remainder 3777 * of each grid. 3778 * 3779 * e.g. 12 netisrs, rm contains 8 rings. 3780 * 3781 * Redirect table before: 3782 * 3783 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3 3784 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7 3785 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3 3786 * .... 3787 * 3788 * Redirect table after being patched (pX, patched entries): 3789 * 3790 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3 3791 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7 3792 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7 3793 * .... 3794 */ 3795 patch_cnt = rm->rm_grid % rm->rm_cnt; 3796 if (patch_cnt == 0) 3797 goto done; 3798 patch_off = rm->rm_grid - (rm->rm_grid % rm->rm_cnt); 3799 3800 grid_cnt = roundup(NETISR_CPUMAX, rm->rm_grid) / rm->rm_grid; 3801 grid_idx = 0; 3802 for (i = 0; i < grid_cnt; ++i) { 3803 int j; 3804 3805 for (j = 0; j < patch_cnt; ++j) { 3806 int fix_idx; 3807 3808 fix_idx = (i * rm->rm_grid) + patch_off + j; 3809 if (fix_idx >= NETISR_CPUMAX) 3810 goto done; 3811 table[fix_idx] = grid_idx++ % rm->rm_cnt; 3812 } 3813 } 3814 done: 3815 /* 3816 * If the device supports larger redirect table, duplicate 3817 * the first NETISR_CPUMAX entries to the rest of the table, 3818 * so that it matches upper layer's expectation: 3819 * (hash & NETISR_CPUMASK) % netisr_ncpus 3820 */ 3821 ncopy = table_nent / NETISR_CPUMAX; 3822 for (i = 1; i < ncopy; ++i) { 3823 memcpy(&table[i * NETISR_CPUMAX], table, 3824 NETISR_CPUMAX * sizeof(table[0])); 3825 } 3826 if (if_ringmap_dumprdr) { 3827 for (i = 0; i < table_nent; ++i) { 3828 if (i != 0 && i % 16 == 0) 3829 kprintf("\n"); 3830 kprintf("%03d ", table[i]); 3831 } 3832 kprintf("\n"); 3833 } 3834 } 3835 3836 int 3837 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS) 3838 { 3839 struct if_ringmap *rm = arg1; 3840 int i, error = 0; 3841 3842 for (i = 0; i < rm->rm_cnt; ++i) { 3843 int cpu = rm->rm_cpumap[i]; 3844 3845 error = SYSCTL_OUT(req, &cpu, sizeof(cpu)); 3846 if (error) 3847 break; 3848 } 3849 return (error); 3850 } 3851