1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 #include "opt_ifpoll.h" 36 37 #include <sys/param.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/socketops.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/mutex.h> 50 #include <sys/lock.h> 51 #include <sys/sockio.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/domain.h> 55 #include <sys/thread.h> 56 #include <sys/serialize.h> 57 #include <sys/bus.h> 58 #include <sys/jail.h> 59 60 #include <sys/thread2.h> 61 #include <sys/msgport2.h> 62 #include <sys/mutex2.h> 63 64 #include <net/if.h> 65 #include <net/if_arp.h> 66 #include <net/if_dl.h> 67 #include <net/if_types.h> 68 #include <net/if_var.h> 69 #include <net/if_ringmap.h> 70 #include <net/ifq_var.h> 71 #include <net/radix.h> 72 #include <net/route.h> 73 #include <net/if_clone.h> 74 #include <net/netisr2.h> 75 #include <net/netmsg2.h> 76 77 #include <machine/atomic.h> 78 #include <machine/stdarg.h> 79 #include <machine/smp.h> 80 81 #if defined(INET) || defined(INET6) 82 #include <netinet/in.h> 83 #include <netinet/in_var.h> 84 #include <netinet/if_ether.h> 85 #ifdef INET6 86 #include <netinet6/in6_var.h> 87 #include <netinet6/in6_ifattach.h> 88 #endif /* INET6 */ 89 #endif /* INET || INET6 */ 90 91 struct netmsg_ifaddr { 92 struct netmsg_base base; 93 struct ifaddr *ifa; 94 struct ifnet *ifp; 95 int tail; 96 }; 97 98 struct ifsubq_stage_head { 99 TAILQ_HEAD(, ifsubq_stage) stg_head; 100 } __cachealign; 101 102 struct if_ringmap { 103 int rm_cnt; 104 int rm_grid; 105 int rm_cpumap[]; 106 }; 107 108 #define RINGMAP_FLAG_NONE 0x0 109 #define RINGMAP_FLAG_POWEROF2 0x1 110 111 /* 112 * System initialization 113 */ 114 static void if_attachdomain(void *); 115 static void if_attachdomain1(struct ifnet *); 116 static int ifconf(u_long, caddr_t, struct ucred *); 117 static void ifinit(void *); 118 static void ifnetinit(void *); 119 static void if_slowtimo(void *); 120 static void link_rtrequest(int, struct rtentry *); 121 static int if_rtdel(struct radix_node *, void *); 122 static void if_slowtimo_dispatch(netmsg_t); 123 124 /* Helper functions */ 125 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 126 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 127 static struct ifnet_array *ifnet_array_alloc(int); 128 static void ifnet_array_free(struct ifnet_array *); 129 static struct ifnet_array *ifnet_array_add(struct ifnet *, 130 const struct ifnet_array *); 131 static struct ifnet_array *ifnet_array_del(struct ifnet *, 132 const struct ifnet_array *); 133 static struct ifg_group *if_creategroup(const char *); 134 static int if_destroygroup(struct ifg_group *); 135 static int if_delgroup_locked(struct ifnet *, const char *); 136 static int if_getgroups(struct ifgroupreq *, struct ifnet *); 137 static int if_getgroupmembers(struct ifgroupreq *); 138 139 #ifdef INET6 140 /* 141 * XXX: declare here to avoid to include many inet6 related files.. 142 * should be more generalized? 143 */ 144 extern void nd6_setmtu(struct ifnet *); 145 #endif 146 147 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 148 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 149 SYSCTL_NODE(_net_link, OID_AUTO, ringmap, CTLFLAG_RW, 0, "link ringmap"); 150 151 static int ifsq_stage_cntmax = 16; 152 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 153 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 154 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 155 156 static int if_stats_compat = 0; 157 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 158 &if_stats_compat, 0, "Compat the old ifnet stats"); 159 160 static int if_ringmap_dumprdr = 0; 161 SYSCTL_INT(_net_link_ringmap, OID_AUTO, dump_rdr, CTLFLAG_RW, 162 &if_ringmap_dumprdr, 0, "dump redirect table"); 163 164 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 165 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifnetinit, NULL); 166 167 static if_com_alloc_t *if_com_alloc[256]; 168 static if_com_free_t *if_com_free[256]; 169 170 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 171 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 172 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 173 174 int ifqmaxlen = IFQ_MAXLEN; 175 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 176 struct ifgrouphead ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 177 static struct lock ifgroup_lock; 178 179 static struct ifnet_array ifnet_array0; 180 static struct ifnet_array *ifnet_array = &ifnet_array0; 181 182 static struct callout if_slowtimo_timer; 183 static struct netmsg_base if_slowtimo_netmsg; 184 185 int if_index = 0; 186 struct ifnet **ifindex2ifnet = NULL; 187 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 188 189 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 190 191 #ifdef notyet 192 #define IFQ_KTR_STRING "ifq=%p" 193 #define IFQ_KTR_ARGS struct ifaltq *ifq 194 #ifndef KTR_IFQ 195 #define KTR_IFQ KTR_ALL 196 #endif 197 KTR_INFO_MASTER(ifq); 198 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 199 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 200 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 201 202 #define IF_START_KTR_STRING "ifp=%p" 203 #define IF_START_KTR_ARGS struct ifnet *ifp 204 #ifndef KTR_IF_START 205 #define KTR_IF_START KTR_ALL 206 #endif 207 KTR_INFO_MASTER(if_start); 208 KTR_INFO(KTR_IF_START, if_start, run, 0, 209 IF_START_KTR_STRING, IF_START_KTR_ARGS); 210 KTR_INFO(KTR_IF_START, if_start, sched, 1, 211 IF_START_KTR_STRING, IF_START_KTR_ARGS); 212 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 213 IF_START_KTR_STRING, IF_START_KTR_ARGS); 214 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 215 IF_START_KTR_STRING, IF_START_KTR_ARGS); 216 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 217 IF_START_KTR_STRING, IF_START_KTR_ARGS); 218 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 219 #endif /* notyet */ 220 221 /* 222 * Network interface utility routines. 223 * 224 * Routines with ifa_ifwith* names take sockaddr *'s as 225 * parameters. 226 */ 227 /* ARGSUSED */ 228 static void 229 ifinit(void *dummy) 230 { 231 lockinit(&ifgroup_lock, "ifgroup", 0, 0); 232 233 callout_init_mp(&if_slowtimo_timer); 234 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 235 MSGF_PRIORITY, if_slowtimo_dispatch); 236 237 /* Start if_slowtimo */ 238 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 239 } 240 241 static void 242 ifsq_ifstart_ipifunc(void *arg) 243 { 244 struct ifaltq_subque *ifsq = arg; 245 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 246 247 crit_enter(); 248 if (lmsg->ms_flags & MSGF_DONE) 249 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 250 crit_exit(); 251 } 252 253 static __inline void 254 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 255 { 256 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 257 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 258 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 259 stage->stg_cnt = 0; 260 stage->stg_len = 0; 261 } 262 263 static __inline void 264 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 265 { 266 KKASSERT((stage->stg_flags & 267 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 268 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 269 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 270 } 271 272 /* 273 * Schedule ifnet.if_start on the subqueue owner CPU 274 */ 275 static void 276 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 277 { 278 int cpu; 279 280 if (!force && curthread->td_type == TD_TYPE_NETISR && 281 ifsq_stage_cntmax > 0) { 282 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 283 284 stage->stg_cnt = 0; 285 stage->stg_len = 0; 286 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 287 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 288 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 289 return; 290 } 291 292 cpu = ifsq_get_cpuid(ifsq); 293 if (cpu != mycpuid) 294 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 295 else 296 ifsq_ifstart_ipifunc(ifsq); 297 } 298 299 /* 300 * NOTE: 301 * This function will release ifnet.if_start subqueue interlock, 302 * if ifnet.if_start for the subqueue does not need to be scheduled 303 */ 304 static __inline int 305 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 306 { 307 if (!running || ifsq_is_empty(ifsq) 308 #ifdef ALTQ 309 || ifsq->ifsq_altq->altq_tbr != NULL 310 #endif 311 ) { 312 ALTQ_SQ_LOCK(ifsq); 313 /* 314 * ifnet.if_start subqueue interlock is released, if: 315 * 1) Hardware can not take any packets, due to 316 * o interface is marked down 317 * o hardware queue is full (ifsq_is_oactive) 318 * Under the second situation, hardware interrupt 319 * or polling(4) will call/schedule ifnet.if_start 320 * on the subqueue when hardware queue is ready 321 * 2) There is no packet in the subqueue. 322 * Further ifq_dispatch or ifq_handoff will call/ 323 * schedule ifnet.if_start on the subqueue. 324 * 3) TBR is used and it does not allow further 325 * dequeueing. 326 * TBR callout will call ifnet.if_start on the 327 * subqueue. 328 */ 329 if (!running || !ifsq_data_ready(ifsq)) { 330 ifsq_clr_started(ifsq); 331 ALTQ_SQ_UNLOCK(ifsq); 332 return 0; 333 } 334 ALTQ_SQ_UNLOCK(ifsq); 335 } 336 return 1; 337 } 338 339 static void 340 ifsq_ifstart_dispatch(netmsg_t msg) 341 { 342 struct lwkt_msg *lmsg = &msg->base.lmsg; 343 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 344 struct ifnet *ifp = ifsq_get_ifp(ifsq); 345 struct globaldata *gd = mycpu; 346 int running = 0, need_sched; 347 348 crit_enter_gd(gd); 349 350 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 351 352 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 353 /* 354 * We need to chase the subqueue owner CPU change. 355 */ 356 ifsq_ifstart_schedule(ifsq, 1); 357 crit_exit_gd(gd); 358 return; 359 } 360 361 ifsq_serialize_hw(ifsq); 362 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 363 ifp->if_start(ifp, ifsq); 364 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 365 running = 1; 366 } 367 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 368 ifsq_deserialize_hw(ifsq); 369 370 if (need_sched) { 371 /* 372 * More data need to be transmitted, ifnet.if_start is 373 * scheduled on the subqueue owner CPU, and we keep going. 374 * NOTE: ifnet.if_start subqueue interlock is not released. 375 */ 376 ifsq_ifstart_schedule(ifsq, 0); 377 } 378 379 crit_exit_gd(gd); 380 } 381 382 /* Device driver ifnet.if_start helper function */ 383 void 384 ifsq_devstart(struct ifaltq_subque *ifsq) 385 { 386 struct ifnet *ifp = ifsq_get_ifp(ifsq); 387 int running = 0; 388 389 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 390 391 ALTQ_SQ_LOCK(ifsq); 392 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 393 ALTQ_SQ_UNLOCK(ifsq); 394 return; 395 } 396 ifsq_set_started(ifsq); 397 ALTQ_SQ_UNLOCK(ifsq); 398 399 ifp->if_start(ifp, ifsq); 400 401 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 402 running = 1; 403 404 if (ifsq_ifstart_need_schedule(ifsq, running)) { 405 /* 406 * More data need to be transmitted, ifnet.if_start is 407 * scheduled on ifnet's CPU, and we keep going. 408 * NOTE: ifnet.if_start interlock is not released. 409 */ 410 ifsq_ifstart_schedule(ifsq, 0); 411 } 412 } 413 414 void 415 if_devstart(struct ifnet *ifp) 416 { 417 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 418 } 419 420 /* Device driver ifnet.if_start schedule helper function */ 421 void 422 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 423 { 424 ifsq_ifstart_schedule(ifsq, 1); 425 } 426 427 void 428 if_devstart_sched(struct ifnet *ifp) 429 { 430 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 431 } 432 433 static void 434 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 435 { 436 lwkt_serialize_enter(ifp->if_serializer); 437 } 438 439 static void 440 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 441 { 442 lwkt_serialize_exit(ifp->if_serializer); 443 } 444 445 static int 446 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 447 { 448 return lwkt_serialize_try(ifp->if_serializer); 449 } 450 451 #ifdef INVARIANTS 452 static void 453 if_default_serialize_assert(struct ifnet *ifp, 454 enum ifnet_serialize slz __unused, 455 boolean_t serialized) 456 { 457 if (serialized) 458 ASSERT_SERIALIZED(ifp->if_serializer); 459 else 460 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 461 } 462 #endif 463 464 /* 465 * Attach an interface to the list of "active" interfaces. 466 * 467 * The serializer is optional. 468 */ 469 void 470 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 471 { 472 unsigned socksize; 473 int namelen, masklen; 474 struct sockaddr_dl *sdl, *sdl_addr; 475 struct ifaddr *ifa; 476 struct ifaltq *ifq; 477 struct ifnet **old_ifindex2ifnet = NULL; 478 struct ifnet_array *old_ifnet_array; 479 int i, q, qlen; 480 char qlenname[64]; 481 482 static int if_indexlim = 8; 483 484 if (ifp->if_serialize != NULL) { 485 KASSERT(ifp->if_deserialize != NULL && 486 ifp->if_tryserialize != NULL && 487 ifp->if_serialize_assert != NULL, 488 ("serialize functions are partially setup")); 489 490 /* 491 * If the device supplies serialize functions, 492 * then clear if_serializer to catch any invalid 493 * usage of this field. 494 */ 495 KASSERT(serializer == NULL, 496 ("both serialize functions and default serializer " 497 "are supplied")); 498 ifp->if_serializer = NULL; 499 } else { 500 KASSERT(ifp->if_deserialize == NULL && 501 ifp->if_tryserialize == NULL && 502 ifp->if_serialize_assert == NULL, 503 ("serialize functions are partially setup")); 504 ifp->if_serialize = if_default_serialize; 505 ifp->if_deserialize = if_default_deserialize; 506 ifp->if_tryserialize = if_default_tryserialize; 507 #ifdef INVARIANTS 508 ifp->if_serialize_assert = if_default_serialize_assert; 509 #endif 510 511 /* 512 * The serializer can be passed in from the device, 513 * allowing the same serializer to be used for both 514 * the interrupt interlock and the device queue. 515 * If not specified, the netif structure will use an 516 * embedded serializer. 517 */ 518 if (serializer == NULL) { 519 serializer = &ifp->if_default_serializer; 520 lwkt_serialize_init(serializer); 521 } 522 ifp->if_serializer = serializer; 523 } 524 525 /* 526 * Make if_addrhead available on all CPUs, since they 527 * could be accessed by any threads. 528 */ 529 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 530 M_IFADDR, M_WAITOK | M_ZERO); 531 for (i = 0; i < ncpus; ++i) 532 TAILQ_INIT(&ifp->if_addrheads[i]); 533 534 TAILQ_INIT(&ifp->if_multiaddrs); 535 TAILQ_INIT(&ifp->if_groups); 536 getmicrotime(&ifp->if_lastchange); 537 if_addgroup(ifp, IFG_ALL); 538 539 /* 540 * create a Link Level name for this device 541 */ 542 namelen = strlen(ifp->if_xname); 543 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 544 socksize = masklen + ifp->if_addrlen; 545 if (socksize < sizeof(*sdl)) 546 socksize = sizeof(*sdl); 547 socksize = RT_ROUNDUP(socksize); 548 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 549 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 550 sdl->sdl_len = socksize; 551 sdl->sdl_family = AF_LINK; 552 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 553 sdl->sdl_nlen = namelen; 554 sdl->sdl_type = ifp->if_type; 555 ifp->if_lladdr = ifa; 556 ifa->ifa_ifp = ifp; 557 ifa->ifa_rtrequest = link_rtrequest; 558 ifa->ifa_addr = (struct sockaddr *)sdl; 559 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 560 ifa->ifa_netmask = (struct sockaddr *)sdl; 561 sdl->sdl_len = masklen; 562 while (namelen != 0) 563 sdl->sdl_data[--namelen] = 0xff; 564 ifa_iflink(ifa, ifp, 0 /* Insert head */); 565 566 /* 567 * Make if_data available on all CPUs, since they could 568 * be updated by hardware interrupt routing, which could 569 * be bound to any CPU. 570 */ 571 ifp->if_data_pcpu = kmalloc(ncpus * sizeof(struct ifdata_pcpu), 572 M_DEVBUF, 573 M_WAITOK | M_ZERO | M_CACHEALIGN); 574 575 if (ifp->if_mapsubq == NULL) 576 ifp->if_mapsubq = ifq_mapsubq_default; 577 578 ifq = &ifp->if_snd; 579 ifq->altq_type = 0; 580 ifq->altq_disc = NULL; 581 ifq->altq_flags &= ALTQF_CANTCHANGE; 582 ifq->altq_tbr = NULL; 583 ifq->altq_ifp = ifp; 584 585 if (ifq->altq_subq_cnt <= 0) 586 ifq->altq_subq_cnt = 1; 587 ifq->altq_subq = 588 kmalloc(ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 589 M_DEVBUF, 590 M_WAITOK | M_ZERO | M_CACHEALIGN); 591 592 if (ifq->altq_maxlen == 0) { 593 if_printf(ifp, "driver didn't set altq_maxlen\n"); 594 ifq_set_maxlen(ifq, ifqmaxlen); 595 } 596 597 /* Allow user to override driver's setting. */ 598 ksnprintf(qlenname, sizeof(qlenname), "net.%s.qlenmax", ifp->if_xname); 599 qlen = -1; 600 TUNABLE_INT_FETCH(qlenname, &qlen); 601 if (qlen > 0) { 602 if_printf(ifp, "qlenmax -> %d\n", qlen); 603 ifq_set_maxlen(ifq, qlen); 604 } 605 606 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 607 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 608 609 ALTQ_SQ_LOCK_INIT(ifsq); 610 ifsq->ifsq_index = q; 611 612 ifsq->ifsq_altq = ifq; 613 ifsq->ifsq_ifp = ifp; 614 615 ifsq->ifsq_maxlen = ifq->altq_maxlen; 616 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 617 ifsq->ifsq_prepended = NULL; 618 ifsq->ifsq_started = 0; 619 ifsq->ifsq_hw_oactive = 0; 620 ifsq_set_cpuid(ifsq, 0); 621 if (ifp->if_serializer != NULL) 622 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 623 624 /* XXX: netisr_ncpus */ 625 ifsq->ifsq_stage = 626 kmalloc(ncpus * sizeof(struct ifsubq_stage), 627 M_DEVBUF, 628 M_WAITOK | M_ZERO | M_CACHEALIGN); 629 for (i = 0; i < ncpus; ++i) 630 ifsq->ifsq_stage[i].stg_subq = ifsq; 631 632 /* 633 * Allocate one if_start message for each CPU, since 634 * the hardware TX ring could be assigned to any CPU. 635 * 636 * NOTE: 637 * If the hardware TX ring polling CPU and the hardware 638 * TX ring interrupt CPU are same, one if_start message 639 * should be enough. 640 */ 641 ifsq->ifsq_ifstart_nmsg = 642 kmalloc(ncpus * sizeof(struct netmsg_base), 643 M_LWKTMSG, M_WAITOK); 644 for (i = 0; i < ncpus; ++i) { 645 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 646 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 647 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 648 } 649 } 650 ifq_set_classic(ifq); 651 652 /* 653 * Increase mbuf cluster/jcluster limits for the mbufs that 654 * could sit on the device queues for quite some time. 655 */ 656 if (ifp->if_nmbclusters > 0) 657 mcl_inclimit(ifp->if_nmbclusters); 658 if (ifp->if_nmbjclusters > 0) 659 mjcl_inclimit(ifp->if_nmbjclusters); 660 661 /* 662 * Install this ifp into ifindex2inet, ifnet queue and ifnet 663 * array after it is setup. 664 * 665 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 666 * by ifnet lock, so that non-netisr threads could get a 667 * consistent view. 668 */ 669 ifnet_lock(); 670 671 /* Don't update if_index until ifindex2ifnet is setup */ 672 ifp->if_index = if_index + 1; 673 sdl_addr->sdl_index = ifp->if_index; 674 675 /* 676 * Install this ifp into ifindex2ifnet 677 */ 678 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 679 unsigned int n; 680 struct ifnet **q; 681 682 /* 683 * Grow ifindex2ifnet 684 */ 685 if_indexlim <<= 1; 686 n = if_indexlim * sizeof(*q); 687 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 688 if (ifindex2ifnet != NULL) { 689 bcopy(ifindex2ifnet, q, n/2); 690 /* Free old ifindex2ifnet after sync all netisrs */ 691 old_ifindex2ifnet = ifindex2ifnet; 692 } 693 ifindex2ifnet = q; 694 } 695 ifindex2ifnet[ifp->if_index] = ifp; 696 /* 697 * Update if_index after this ifp is installed into ifindex2ifnet, 698 * so that netisrs could get a consistent view of ifindex2ifnet. 699 */ 700 cpu_sfence(); 701 if_index = ifp->if_index; 702 703 /* 704 * Install this ifp into ifnet array. 705 */ 706 /* Free old ifnet array after sync all netisrs */ 707 old_ifnet_array = ifnet_array; 708 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 709 710 /* 711 * Install this ifp into ifnet queue. 712 */ 713 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 714 715 ifnet_unlock(); 716 717 /* 718 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 719 * are no longer accessed and we can free them safely later on. 720 */ 721 netmsg_service_sync(); 722 if (old_ifindex2ifnet != NULL) 723 kfree(old_ifindex2ifnet, M_IFADDR); 724 ifnet_array_free(old_ifnet_array); 725 726 if (!SLIST_EMPTY(&domains)) 727 if_attachdomain1(ifp); 728 729 /* Announce the interface. */ 730 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 731 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 732 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 733 } 734 735 static void 736 if_attachdomain(void *dummy) 737 { 738 struct ifnet *ifp; 739 740 ifnet_lock(); 741 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 742 if_attachdomain1(ifp); 743 ifnet_unlock(); 744 } 745 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 746 if_attachdomain, NULL); 747 748 static void 749 if_attachdomain1(struct ifnet *ifp) 750 { 751 struct domain *dp; 752 753 crit_enter(); 754 755 /* address family dependent data region */ 756 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 757 SLIST_FOREACH(dp, &domains, dom_next) 758 if (dp->dom_ifattach) 759 ifp->if_afdata[dp->dom_family] = 760 (*dp->dom_ifattach)(ifp); 761 crit_exit(); 762 } 763 764 /* 765 * Purge all addresses whose type is _not_ AF_LINK 766 */ 767 static void 768 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 769 { 770 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp; 771 struct ifaddr_container *ifac, *next; 772 773 ASSERT_NETISR0; 774 775 /* 776 * The ifaddr processing in the following loop will block, 777 * however, this function is called in netisr0, in which 778 * ifaddr list changes happen, so we don't care about the 779 * blockness of the ifaddr processing here. 780 */ 781 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 782 ifa_link, next) { 783 struct ifaddr *ifa = ifac->ifa; 784 785 /* Ignore marker */ 786 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 787 continue; 788 789 /* Leave link ifaddr as it is */ 790 if (ifa->ifa_addr->sa_family == AF_LINK) 791 continue; 792 #ifdef INET 793 /* XXX: Ugly!! ad hoc just for INET */ 794 if (ifa->ifa_addr->sa_family == AF_INET) { 795 struct ifaliasreq ifr; 796 struct sockaddr_in saved_addr, saved_dst; 797 #ifdef IFADDR_DEBUG_VERBOSE 798 int i; 799 800 kprintf("purge in4 addr %p: ", ifa); 801 for (i = 0; i < ncpus; ++i) { 802 kprintf("%d ", 803 ifa->ifa_containers[i].ifa_refcnt); 804 } 805 kprintf("\n"); 806 #endif 807 808 /* Save information for panic. */ 809 memcpy(&saved_addr, ifa->ifa_addr, sizeof(saved_addr)); 810 if (ifa->ifa_dstaddr != NULL) { 811 memcpy(&saved_dst, ifa->ifa_dstaddr, 812 sizeof(saved_dst)); 813 } else { 814 memset(&saved_dst, 0, sizeof(saved_dst)); 815 } 816 817 bzero(&ifr, sizeof ifr); 818 ifr.ifra_addr = *ifa->ifa_addr; 819 if (ifa->ifa_dstaddr) 820 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 821 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 822 NULL) == 0) 823 continue; 824 825 /* MUST NOT HAPPEN */ 826 panic("%s: in_control failed %x, dst %x", ifp->if_xname, 827 ntohl(saved_addr.sin_addr.s_addr), 828 ntohl(saved_dst.sin_addr.s_addr)); 829 } 830 #endif /* INET */ 831 #ifdef INET6 832 if (ifa->ifa_addr->sa_family == AF_INET6) { 833 #ifdef IFADDR_DEBUG_VERBOSE 834 int i; 835 836 kprintf("purge in6 addr %p: ", ifa); 837 for (i = 0; i < ncpus; ++i) { 838 kprintf("%d ", 839 ifa->ifa_containers[i].ifa_refcnt); 840 } 841 kprintf("\n"); 842 #endif 843 844 in6_purgeaddr(ifa); 845 /* ifp_addrhead is already updated */ 846 continue; 847 } 848 #endif /* INET6 */ 849 if_printf(ifp, "destroy ifaddr family %d\n", 850 ifa->ifa_addr->sa_family); 851 ifa_ifunlink(ifa, ifp); 852 ifa_destroy(ifa); 853 } 854 855 netisr_replymsg(&nmsg->base, 0); 856 } 857 858 void 859 if_purgeaddrs_nolink(struct ifnet *ifp) 860 { 861 struct netmsg_base nmsg; 862 863 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 864 if_purgeaddrs_nolink_dispatch); 865 nmsg.lmsg.u.ms_resultp = ifp; 866 netisr_domsg(&nmsg, 0); 867 } 868 869 static void 870 ifq_stage_detach_handler(netmsg_t nmsg) 871 { 872 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 873 int q; 874 875 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 876 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 877 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 878 879 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 880 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 881 } 882 lwkt_replymsg(&nmsg->lmsg, 0); 883 } 884 885 static void 886 ifq_stage_detach(struct ifaltq *ifq) 887 { 888 struct netmsg_base base; 889 int cpu; 890 891 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 892 ifq_stage_detach_handler); 893 base.lmsg.u.ms_resultp = ifq; 894 895 /* XXX netisr_ncpus */ 896 for (cpu = 0; cpu < ncpus; ++cpu) 897 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 898 } 899 900 struct netmsg_if_rtdel { 901 struct netmsg_base base; 902 struct ifnet *ifp; 903 }; 904 905 static void 906 if_rtdel_dispatch(netmsg_t msg) 907 { 908 struct netmsg_if_rtdel *rmsg = (void *)msg; 909 int i, cpu; 910 911 cpu = mycpuid; 912 ASSERT_NETISR_NCPUS(cpu); 913 914 for (i = 1; i <= AF_MAX; i++) { 915 struct radix_node_head *rnh; 916 917 if ((rnh = rt_tables[cpu][i]) == NULL) 918 continue; 919 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 920 } 921 netisr_forwardmsg(&msg->base, cpu + 1); 922 } 923 924 /* 925 * Detach an interface, removing it from the 926 * list of "active" interfaces. 927 */ 928 void 929 if_detach(struct ifnet *ifp) 930 { 931 struct ifnet_array *old_ifnet_array; 932 struct ifg_list *ifgl; 933 struct netmsg_if_rtdel msg; 934 struct domain *dp; 935 int q; 936 937 /* Announce that the interface is gone. */ 938 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 939 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 940 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 941 942 /* 943 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 944 * array before it is whacked. 945 * 946 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 947 * by ifnet lock, so that non-netisr threads could get a 948 * consistent view. 949 */ 950 ifnet_lock(); 951 952 /* 953 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 954 */ 955 ifindex2ifnet[ifp->if_index] = NULL; 956 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 957 if_index--; 958 959 /* 960 * Remove this ifp from ifnet queue. 961 */ 962 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 963 964 /* 965 * Remove this ifp from ifnet array. 966 */ 967 /* Free old ifnet array after sync all netisrs */ 968 old_ifnet_array = ifnet_array; 969 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 970 971 ifnet_unlock(); 972 973 ifgroup_lockmgr(LK_EXCLUSIVE); 974 while ((ifgl = TAILQ_FIRST(&ifp->if_groups)) != NULL) 975 if_delgroup_locked(ifp, ifgl->ifgl_group->ifg_group); 976 ifgroup_lockmgr(LK_RELEASE); 977 978 /* 979 * Sync all netisrs so that the old ifnet array is no longer 980 * accessed and we can free it safely later on. 981 */ 982 netmsg_service_sync(); 983 ifnet_array_free(old_ifnet_array); 984 985 /* 986 * Remove routes and flush queues. 987 */ 988 crit_enter(); 989 #ifdef IFPOLL_ENABLE 990 if (ifp->if_flags & IFF_NPOLLING) 991 ifpoll_deregister(ifp); 992 #endif 993 if_down(ifp); 994 995 /* Decrease the mbuf clusters/jclusters limits increased by us */ 996 if (ifp->if_nmbclusters > 0) 997 mcl_inclimit(-ifp->if_nmbclusters); 998 if (ifp->if_nmbjclusters > 0) 999 mjcl_inclimit(-ifp->if_nmbjclusters); 1000 1001 #ifdef ALTQ 1002 if (ifq_is_enabled(&ifp->if_snd)) 1003 altq_disable(&ifp->if_snd); 1004 if (ifq_is_attached(&ifp->if_snd)) 1005 altq_detach(&ifp->if_snd); 1006 #endif 1007 1008 /* 1009 * Clean up all addresses. 1010 */ 1011 ifp->if_lladdr = NULL; 1012 1013 if_purgeaddrs_nolink(ifp); 1014 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 1015 struct ifaddr *ifa; 1016 1017 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1018 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 1019 ("non-link ifaddr is left on if_addrheads")); 1020 1021 ifa_ifunlink(ifa, ifp); 1022 ifa_destroy(ifa); 1023 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 1024 ("there are still ifaddrs left on if_addrheads")); 1025 } 1026 1027 #ifdef INET 1028 /* 1029 * Remove all IPv4 kernel structures related to ifp. 1030 */ 1031 in_ifdetach(ifp); 1032 #endif 1033 1034 #ifdef INET6 1035 /* 1036 * Remove all IPv6 kernel structs related to ifp. This should be done 1037 * before removing routing entries below, since IPv6 interface direct 1038 * routes are expected to be removed by the IPv6-specific kernel API. 1039 * Otherwise, the kernel will detect some inconsistency and bark it. 1040 */ 1041 in6_ifdetach(ifp); 1042 #endif 1043 1044 /* 1045 * Delete all remaining routes using this interface 1046 */ 1047 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1048 if_rtdel_dispatch); 1049 msg.ifp = ifp; 1050 netisr_domsg_global(&msg.base); 1051 1052 SLIST_FOREACH(dp, &domains, dom_next) { 1053 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1054 (*dp->dom_ifdetach)(ifp, 1055 ifp->if_afdata[dp->dom_family]); 1056 } 1057 1058 kfree(ifp->if_addrheads, M_IFADDR); 1059 1060 lwkt_synchronize_ipiqs("if_detach"); 1061 ifq_stage_detach(&ifp->if_snd); 1062 1063 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1064 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1065 1066 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1067 kfree(ifsq->ifsq_stage, M_DEVBUF); 1068 } 1069 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1070 1071 kfree(ifp->if_data_pcpu, M_DEVBUF); 1072 1073 crit_exit(); 1074 } 1075 1076 int 1077 ifgroup_lockmgr(u_int flags) 1078 { 1079 return lockmgr(&ifgroup_lock, flags); 1080 } 1081 1082 /* 1083 * Create an empty interface group. 1084 */ 1085 static struct ifg_group * 1086 if_creategroup(const char *groupname) 1087 { 1088 struct ifg_group *ifg; 1089 1090 ifg = kmalloc(sizeof(*ifg), M_IFNET, M_WAITOK); 1091 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1092 ifg->ifg_refcnt = 0; 1093 ifg->ifg_carp_demoted = 0; 1094 TAILQ_INIT(&ifg->ifg_members); 1095 1096 ifgroup_lockmgr(LK_EXCLUSIVE); 1097 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1098 ifgroup_lockmgr(LK_RELEASE); 1099 1100 EVENTHANDLER_INVOKE(group_attach_event, ifg); 1101 1102 return (ifg); 1103 } 1104 1105 /* 1106 * Destroy an empty interface group. 1107 */ 1108 static int 1109 if_destroygroup(struct ifg_group *ifg) 1110 { 1111 KASSERT(ifg->ifg_refcnt == 0, 1112 ("trying to delete a non-empty interface group")); 1113 1114 ifgroup_lockmgr(LK_EXCLUSIVE); 1115 TAILQ_REMOVE(&ifg_head, ifg, ifg_next); 1116 ifgroup_lockmgr(LK_RELEASE); 1117 1118 EVENTHANDLER_INVOKE(group_detach_event, ifg); 1119 kfree(ifg, M_IFNET); 1120 1121 return (0); 1122 } 1123 1124 /* 1125 * Add the interface to a group. 1126 * The target group will be created if it doesn't exist. 1127 */ 1128 int 1129 if_addgroup(struct ifnet *ifp, const char *groupname) 1130 { 1131 struct ifg_list *ifgl; 1132 struct ifg_group *ifg; 1133 struct ifg_member *ifgm; 1134 1135 if (groupname[0] && 1136 groupname[strlen(groupname) - 1] >= '0' && 1137 groupname[strlen(groupname) - 1] <= '9') 1138 return (EINVAL); 1139 1140 ifgroup_lockmgr(LK_SHARED); 1141 1142 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1143 if (strcmp(ifgl->ifgl_group->ifg_group, groupname) == 0) { 1144 ifgroup_lockmgr(LK_RELEASE); 1145 return (EEXIST); 1146 } 1147 } 1148 1149 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) { 1150 if (strcmp(ifg->ifg_group, groupname) == 0) 1151 break; 1152 } 1153 1154 ifgroup_lockmgr(LK_RELEASE); 1155 1156 if (ifg == NULL) 1157 ifg = if_creategroup(groupname); 1158 1159 ifgl = kmalloc(sizeof(*ifgl), M_IFNET, M_WAITOK); 1160 ifgm = kmalloc(sizeof(*ifgm), M_IFNET, M_WAITOK); 1161 ifgl->ifgl_group = ifg; 1162 ifgm->ifgm_ifp = ifp; 1163 ifg->ifg_refcnt++; 1164 1165 ifgroup_lockmgr(LK_EXCLUSIVE); 1166 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1167 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1168 ifgroup_lockmgr(LK_RELEASE); 1169 1170 EVENTHANDLER_INVOKE(group_change_event, groupname); 1171 1172 return (0); 1173 } 1174 1175 /* 1176 * Remove the interface from a group. 1177 * The group will be destroyed if it becomes empty. 1178 * 1179 * The 'ifgroup_lock' must be hold exclusively when calling this. 1180 */ 1181 static int 1182 if_delgroup_locked(struct ifnet *ifp, const char *groupname) 1183 { 1184 struct ifg_list *ifgl; 1185 struct ifg_member *ifgm; 1186 1187 KKASSERT(lockstatus(&ifgroup_lock, curthread) == LK_EXCLUSIVE); 1188 1189 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1190 if (strcmp(ifgl->ifgl_group->ifg_group, groupname) == 0) 1191 break; 1192 } 1193 if (ifgl == NULL) 1194 return (ENOENT); 1195 1196 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1197 1198 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) { 1199 if (ifgm->ifgm_ifp == ifp) 1200 break; 1201 } 1202 1203 if (ifgm != NULL) { 1204 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1205 1206 ifgroup_lockmgr(LK_RELEASE); 1207 EVENTHANDLER_INVOKE(group_change_event, groupname); 1208 ifgroup_lockmgr(LK_EXCLUSIVE); 1209 1210 kfree(ifgm, M_IFNET); 1211 ifgl->ifgl_group->ifg_refcnt--; 1212 } 1213 1214 if (ifgl->ifgl_group->ifg_refcnt == 0) { 1215 ifgroup_lockmgr(LK_RELEASE); 1216 if_destroygroup(ifgl->ifgl_group); 1217 ifgroup_lockmgr(LK_EXCLUSIVE); 1218 } 1219 1220 kfree(ifgl, M_IFNET); 1221 1222 return (0); 1223 } 1224 1225 int 1226 if_delgroup(struct ifnet *ifp, const char *groupname) 1227 { 1228 int error; 1229 1230 ifgroup_lockmgr(LK_EXCLUSIVE); 1231 error = if_delgroup_locked(ifp, groupname); 1232 ifgroup_lockmgr(LK_RELEASE); 1233 1234 return (error); 1235 } 1236 1237 /* 1238 * Store all the groups that the interface belongs to in memory 1239 * pointed to by data. 1240 */ 1241 static int 1242 if_getgroups(struct ifgroupreq *ifgr, struct ifnet *ifp) 1243 { 1244 struct ifg_list *ifgl; 1245 struct ifg_req *ifgrq, *p; 1246 int len, error; 1247 1248 len = 0; 1249 ifgroup_lockmgr(LK_SHARED); 1250 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1251 len += sizeof(struct ifg_req); 1252 ifgroup_lockmgr(LK_RELEASE); 1253 1254 if (ifgr->ifgr_len == 0) { 1255 /* 1256 * Caller is asking how much memory should be allocated in 1257 * the next request in order to hold all the groups. 1258 */ 1259 ifgr->ifgr_len = len; 1260 return (0); 1261 } else if (ifgr->ifgr_len != len) { 1262 return (EINVAL); 1263 } 1264 1265 ifgrq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO); 1266 if (ifgrq == NULL) 1267 return (ENOMEM); 1268 1269 ifgroup_lockmgr(LK_SHARED); 1270 p = ifgrq; 1271 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1272 if (len < sizeof(struct ifg_req)) { 1273 ifgroup_lockmgr(LK_RELEASE); 1274 return (EINVAL); 1275 } 1276 1277 strlcpy(p->ifgrq_group, ifgl->ifgl_group->ifg_group, 1278 sizeof(ifgrq->ifgrq_group)); 1279 len -= sizeof(struct ifg_req); 1280 p++; 1281 } 1282 ifgroup_lockmgr(LK_RELEASE); 1283 1284 error = copyout(ifgrq, ifgr->ifgr_groups, ifgr->ifgr_len); 1285 kfree(ifgrq, M_TEMP); 1286 if (error) 1287 return (error); 1288 1289 return (0); 1290 } 1291 1292 /* 1293 * Store all the members of a group in memory pointed to by data. 1294 */ 1295 static int 1296 if_getgroupmembers(struct ifgroupreq *ifgr) 1297 { 1298 struct ifg_group *ifg; 1299 struct ifg_member *ifgm; 1300 struct ifg_req *ifgrq, *p; 1301 int len, error; 1302 1303 ifgroup_lockmgr(LK_SHARED); 1304 1305 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) { 1306 if (strcmp(ifg->ifg_group, ifgr->ifgr_name) == 0) 1307 break; 1308 } 1309 if (ifg == NULL) { 1310 ifgroup_lockmgr(LK_RELEASE); 1311 return (ENOENT); 1312 } 1313 1314 len = 0; 1315 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1316 len += sizeof(struct ifg_req); 1317 1318 ifgroup_lockmgr(LK_RELEASE); 1319 1320 if (ifgr->ifgr_len == 0) { 1321 ifgr->ifgr_len = len; 1322 return (0); 1323 } else if (ifgr->ifgr_len != len) { 1324 return (EINVAL); 1325 } 1326 1327 ifgrq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO); 1328 if (ifgrq == NULL) 1329 return (ENOMEM); 1330 1331 ifgroup_lockmgr(LK_SHARED); 1332 p = ifgrq; 1333 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1334 if (len < sizeof(struct ifg_req)) { 1335 ifgroup_lockmgr(LK_RELEASE); 1336 return (EINVAL); 1337 } 1338 1339 strlcpy(p->ifgrq_member, ifgm->ifgm_ifp->if_xname, 1340 sizeof(p->ifgrq_member)); 1341 len -= sizeof(struct ifg_req); 1342 p++; 1343 } 1344 ifgroup_lockmgr(LK_RELEASE); 1345 1346 error = copyout(ifgrq, ifgr->ifgr_groups, ifgr->ifgr_len); 1347 kfree(ifgrq, M_TEMP); 1348 if (error) 1349 return (error); 1350 1351 return (0); 1352 } 1353 1354 /* 1355 * Delete Routes for a Network Interface 1356 * 1357 * Called for each routing entry via the rnh->rnh_walktree() call above 1358 * to delete all route entries referencing a detaching network interface. 1359 * 1360 * Arguments: 1361 * rn pointer to node in the routing table 1362 * arg argument passed to rnh->rnh_walktree() - detaching interface 1363 * 1364 * Returns: 1365 * 0 successful 1366 * errno failed - reason indicated 1367 * 1368 */ 1369 static int 1370 if_rtdel(struct radix_node *rn, void *arg) 1371 { 1372 struct rtentry *rt = (struct rtentry *)rn; 1373 struct ifnet *ifp = arg; 1374 int err; 1375 1376 if (rt->rt_ifp == ifp) { 1377 1378 /* 1379 * Protect (sorta) against walktree recursion problems 1380 * with cloned routes 1381 */ 1382 if (!(rt->rt_flags & RTF_UP)) 1383 return (0); 1384 1385 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1386 rt_mask(rt), rt->rt_flags, 1387 NULL); 1388 if (err) { 1389 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1390 } 1391 } 1392 1393 return (0); 1394 } 1395 1396 static __inline boolean_t 1397 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1398 { 1399 if (old_ifa == NULL) 1400 return TRUE; 1401 1402 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1403 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1404 return TRUE; 1405 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1406 (cur_ifa->ifa_flags & IFA_ROUTE)) 1407 return TRUE; 1408 return FALSE; 1409 } 1410 1411 /* 1412 * Locate an interface based on a complete address. 1413 */ 1414 struct ifaddr * 1415 ifa_ifwithaddr(struct sockaddr *addr) 1416 { 1417 const struct ifnet_array *arr; 1418 int i; 1419 1420 arr = ifnet_array_get(); 1421 for (i = 0; i < arr->ifnet_count; ++i) { 1422 struct ifnet *ifp = arr->ifnet_arr[i]; 1423 struct ifaddr_container *ifac; 1424 1425 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1426 struct ifaddr *ifa = ifac->ifa; 1427 1428 if (ifa->ifa_addr->sa_family != addr->sa_family) 1429 continue; 1430 if (sa_equal(addr, ifa->ifa_addr)) 1431 return (ifa); 1432 if ((ifp->if_flags & IFF_BROADCAST) && 1433 ifa->ifa_broadaddr && 1434 /* IPv6 doesn't have broadcast */ 1435 ifa->ifa_broadaddr->sa_len != 0 && 1436 sa_equal(ifa->ifa_broadaddr, addr)) 1437 return (ifa); 1438 } 1439 } 1440 return (NULL); 1441 } 1442 1443 /* 1444 * Locate the point to point interface with a given destination address. 1445 */ 1446 struct ifaddr * 1447 ifa_ifwithdstaddr(struct sockaddr *addr) 1448 { 1449 const struct ifnet_array *arr; 1450 int i; 1451 1452 arr = ifnet_array_get(); 1453 for (i = 0; i < arr->ifnet_count; ++i) { 1454 struct ifnet *ifp = arr->ifnet_arr[i]; 1455 struct ifaddr_container *ifac; 1456 1457 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1458 continue; 1459 1460 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1461 struct ifaddr *ifa = ifac->ifa; 1462 1463 if (ifa->ifa_addr->sa_family != addr->sa_family) 1464 continue; 1465 if (ifa->ifa_dstaddr && 1466 sa_equal(addr, ifa->ifa_dstaddr)) 1467 return (ifa); 1468 } 1469 } 1470 return (NULL); 1471 } 1472 1473 /* 1474 * Find an interface on a specific network. If many, choice 1475 * is most specific found. 1476 */ 1477 struct ifaddr * 1478 ifa_ifwithnet(struct sockaddr *addr) 1479 { 1480 struct ifaddr *ifa_maybe = NULL; 1481 u_int af = addr->sa_family; 1482 char *addr_data = addr->sa_data, *cplim; 1483 const struct ifnet_array *arr; 1484 int i; 1485 1486 /* 1487 * AF_LINK addresses can be looked up directly by their index number, 1488 * so do that if we can. 1489 */ 1490 if (af == AF_LINK) { 1491 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1492 1493 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1494 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1495 } 1496 1497 /* 1498 * Scan though each interface, looking for ones that have 1499 * addresses in this address family. 1500 */ 1501 arr = ifnet_array_get(); 1502 for (i = 0; i < arr->ifnet_count; ++i) { 1503 struct ifnet *ifp = arr->ifnet_arr[i]; 1504 struct ifaddr_container *ifac; 1505 1506 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1507 struct ifaddr *ifa = ifac->ifa; 1508 char *cp, *cp2, *cp3; 1509 1510 if (ifa->ifa_addr->sa_family != af) 1511 next: continue; 1512 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1513 /* 1514 * This is a bit broken as it doesn't 1515 * take into account that the remote end may 1516 * be a single node in the network we are 1517 * looking for. 1518 * The trouble is that we don't know the 1519 * netmask for the remote end. 1520 */ 1521 if (ifa->ifa_dstaddr != NULL && 1522 sa_equal(addr, ifa->ifa_dstaddr)) 1523 return (ifa); 1524 } else { 1525 /* 1526 * if we have a special address handler, 1527 * then use it instead of the generic one. 1528 */ 1529 if (ifa->ifa_claim_addr) { 1530 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1531 return (ifa); 1532 } else { 1533 continue; 1534 } 1535 } 1536 1537 /* 1538 * Scan all the bits in the ifa's address. 1539 * If a bit dissagrees with what we are 1540 * looking for, mask it with the netmask 1541 * to see if it really matters. 1542 * (A byte at a time) 1543 */ 1544 if (ifa->ifa_netmask == 0) 1545 continue; 1546 cp = addr_data; 1547 cp2 = ifa->ifa_addr->sa_data; 1548 cp3 = ifa->ifa_netmask->sa_data; 1549 cplim = ifa->ifa_netmask->sa_len + 1550 (char *)ifa->ifa_netmask; 1551 while (cp3 < cplim) 1552 if ((*cp++ ^ *cp2++) & *cp3++) 1553 goto next; /* next address! */ 1554 /* 1555 * If the netmask of what we just found 1556 * is more specific than what we had before 1557 * (if we had one) then remember the new one 1558 * before continuing to search for an even 1559 * better one. If the netmasks are equal, 1560 * we prefer the this ifa based on the result 1561 * of ifa_prefer(). 1562 */ 1563 if (ifa_maybe == NULL || 1564 rn_refines((char *)ifa->ifa_netmask, 1565 (char *)ifa_maybe->ifa_netmask) || 1566 (sa_equal(ifa_maybe->ifa_netmask, 1567 ifa->ifa_netmask) && 1568 ifa_prefer(ifa, ifa_maybe))) 1569 ifa_maybe = ifa; 1570 } 1571 } 1572 } 1573 return (ifa_maybe); 1574 } 1575 1576 /* 1577 * Find an interface address specific to an interface best matching 1578 * a given address. 1579 */ 1580 struct ifaddr * 1581 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1582 { 1583 struct ifaddr_container *ifac; 1584 char *cp, *cp2, *cp3; 1585 char *cplim; 1586 struct ifaddr *ifa_maybe = NULL; 1587 u_int af = addr->sa_family; 1588 1589 if (af >= AF_MAX) 1590 return (0); 1591 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1592 struct ifaddr *ifa = ifac->ifa; 1593 1594 if (ifa->ifa_addr->sa_family != af) 1595 continue; 1596 if (ifa_maybe == NULL) 1597 ifa_maybe = ifa; 1598 if (ifa->ifa_netmask == NULL) { 1599 if (sa_equal(addr, ifa->ifa_addr) || 1600 (ifa->ifa_dstaddr != NULL && 1601 sa_equal(addr, ifa->ifa_dstaddr))) 1602 return (ifa); 1603 continue; 1604 } 1605 if (ifp->if_flags & IFF_POINTOPOINT) { 1606 if (sa_equal(addr, ifa->ifa_dstaddr)) 1607 return (ifa); 1608 } else { 1609 cp = addr->sa_data; 1610 cp2 = ifa->ifa_addr->sa_data; 1611 cp3 = ifa->ifa_netmask->sa_data; 1612 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1613 for (; cp3 < cplim; cp3++) 1614 if ((*cp++ ^ *cp2++) & *cp3) 1615 break; 1616 if (cp3 == cplim) 1617 return (ifa); 1618 } 1619 } 1620 return (ifa_maybe); 1621 } 1622 1623 /* 1624 * Default action when installing a route with a Link Level gateway. 1625 * Lookup an appropriate real ifa to point to. 1626 * This should be moved to /sys/net/link.c eventually. 1627 */ 1628 static void 1629 link_rtrequest(int cmd, struct rtentry *rt) 1630 { 1631 struct ifaddr *ifa; 1632 struct sockaddr *dst; 1633 struct ifnet *ifp; 1634 1635 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1636 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1637 return; 1638 ifa = ifaof_ifpforaddr(dst, ifp); 1639 if (ifa != NULL) { 1640 IFAFREE(rt->rt_ifa); 1641 IFAREF(ifa); 1642 rt->rt_ifa = ifa; 1643 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1644 ifa->ifa_rtrequest(cmd, rt); 1645 } 1646 } 1647 1648 struct netmsg_ifroute { 1649 struct netmsg_base base; 1650 struct ifnet *ifp; 1651 int flag; 1652 int fam; 1653 }; 1654 1655 /* 1656 * Mark an interface down and notify protocols of the transition. 1657 */ 1658 static void 1659 if_unroute_dispatch(netmsg_t nmsg) 1660 { 1661 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1662 struct ifnet *ifp = msg->ifp; 1663 int flag = msg->flag, fam = msg->fam; 1664 struct ifaddr_container *ifac; 1665 1666 ASSERT_NETISR0; 1667 1668 ifp->if_flags &= ~flag; 1669 getmicrotime(&ifp->if_lastchange); 1670 rt_ifmsg(ifp); 1671 1672 /* 1673 * The ifaddr processing in the following loop will block, 1674 * however, this function is called in netisr0, in which 1675 * ifaddr list changes happen, so we don't care about the 1676 * blockness of the ifaddr processing here. 1677 */ 1678 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1679 struct ifaddr *ifa = ifac->ifa; 1680 1681 /* Ignore marker */ 1682 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1683 continue; 1684 1685 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1686 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1687 } 1688 1689 ifq_purge_all(&ifp->if_snd); 1690 netisr_replymsg(&nmsg->base, 0); 1691 } 1692 1693 static void 1694 if_unroute(struct ifnet *ifp, int flag, int fam) 1695 { 1696 struct netmsg_ifroute msg; 1697 1698 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1699 if_unroute_dispatch); 1700 msg.ifp = ifp; 1701 msg.flag = flag; 1702 msg.fam = fam; 1703 netisr_domsg(&msg.base, 0); 1704 } 1705 1706 /* 1707 * Mark an interface up and notify protocols of the transition. 1708 */ 1709 static void 1710 if_route_dispatch(netmsg_t nmsg) 1711 { 1712 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1713 struct ifnet *ifp = msg->ifp; 1714 int flag = msg->flag, fam = msg->fam; 1715 struct ifaddr_container *ifac; 1716 1717 ASSERT_NETISR0; 1718 1719 ifq_purge_all(&ifp->if_snd); 1720 ifp->if_flags |= flag; 1721 getmicrotime(&ifp->if_lastchange); 1722 rt_ifmsg(ifp); 1723 1724 /* 1725 * The ifaddr processing in the following loop will block, 1726 * however, this function is called in netisr0, in which 1727 * ifaddr list changes happen, so we don't care about the 1728 * blockness of the ifaddr processing here. 1729 */ 1730 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1731 struct ifaddr *ifa = ifac->ifa; 1732 1733 /* Ignore marker */ 1734 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1735 continue; 1736 1737 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1738 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1739 } 1740 #ifdef INET6 1741 in6_if_up(ifp); 1742 #endif 1743 1744 netisr_replymsg(&nmsg->base, 0); 1745 } 1746 1747 static void 1748 if_route(struct ifnet *ifp, int flag, int fam) 1749 { 1750 struct netmsg_ifroute msg; 1751 1752 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1753 if_route_dispatch); 1754 msg.ifp = ifp; 1755 msg.flag = flag; 1756 msg.fam = fam; 1757 netisr_domsg(&msg.base, 0); 1758 } 1759 1760 /* 1761 * Mark an interface down and notify protocols of the transition. An 1762 * interface going down is also considered to be a synchronizing event. 1763 * We must ensure that all packet processing related to the interface 1764 * has completed before we return so e.g. the caller can free the ifnet 1765 * structure that the mbufs may be referencing. 1766 * 1767 * NOTE: must be called at splnet or eqivalent. 1768 */ 1769 void 1770 if_down(struct ifnet *ifp) 1771 { 1772 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_DOWN); 1773 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1774 netmsg_service_sync(); 1775 } 1776 1777 /* 1778 * Mark an interface up and notify protocols of 1779 * the transition. 1780 * NOTE: must be called at splnet or eqivalent. 1781 */ 1782 void 1783 if_up(struct ifnet *ifp) 1784 { 1785 if_route(ifp, IFF_UP, AF_UNSPEC); 1786 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_UP); 1787 } 1788 1789 /* 1790 * Process a link state change. 1791 * NOTE: must be called at splsoftnet or equivalent. 1792 */ 1793 void 1794 if_link_state_change(struct ifnet *ifp) 1795 { 1796 int link_state = ifp->if_link_state; 1797 1798 rt_ifmsg(ifp); 1799 devctl_notify("IFNET", ifp->if_xname, 1800 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1801 1802 EVENTHANDLER_INVOKE(ifnet_link_event, ifp, link_state); 1803 } 1804 1805 /* 1806 * Handle interface watchdog timer routines. Called 1807 * from softclock, we decrement timers (if set) and 1808 * call the appropriate interface routine on expiration. 1809 */ 1810 static void 1811 if_slowtimo_dispatch(netmsg_t nmsg) 1812 { 1813 struct globaldata *gd = mycpu; 1814 const struct ifnet_array *arr; 1815 int i; 1816 1817 ASSERT_NETISR0; 1818 1819 crit_enter_gd(gd); 1820 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1821 crit_exit_gd(gd); 1822 1823 arr = ifnet_array_get(); 1824 for (i = 0; i < arr->ifnet_count; ++i) { 1825 struct ifnet *ifp = arr->ifnet_arr[i]; 1826 1827 crit_enter_gd(gd); 1828 1829 if (if_stats_compat) { 1830 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1831 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1832 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1833 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1834 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1835 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1836 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1837 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1838 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1839 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1840 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1841 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1842 } 1843 1844 if (ifp->if_timer == 0 || --ifp->if_timer) { 1845 crit_exit_gd(gd); 1846 continue; 1847 } 1848 if (ifp->if_watchdog) { 1849 if (ifnet_tryserialize_all(ifp)) { 1850 (*ifp->if_watchdog)(ifp); 1851 ifnet_deserialize_all(ifp); 1852 } else { 1853 /* try again next timeout */ 1854 ++ifp->if_timer; 1855 } 1856 } 1857 1858 crit_exit_gd(gd); 1859 } 1860 1861 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1862 } 1863 1864 static void 1865 if_slowtimo(void *arg __unused) 1866 { 1867 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1868 1869 KASSERT(mycpuid == 0, ("not on cpu0")); 1870 crit_enter(); 1871 if (lmsg->ms_flags & MSGF_DONE) 1872 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1873 crit_exit(); 1874 } 1875 1876 /* 1877 * Map interface name to 1878 * interface structure pointer. 1879 */ 1880 struct ifnet * 1881 ifunit(const char *name) 1882 { 1883 struct ifnet *ifp; 1884 1885 /* 1886 * Search all the interfaces for this name/number 1887 */ 1888 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1889 1890 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1891 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1892 break; 1893 } 1894 return (ifp); 1895 } 1896 1897 struct ifnet * 1898 ifunit_netisr(const char *name) 1899 { 1900 const struct ifnet_array *arr; 1901 int i; 1902 1903 /* 1904 * Search all the interfaces for this name/number 1905 */ 1906 1907 arr = ifnet_array_get(); 1908 for (i = 0; i < arr->ifnet_count; ++i) { 1909 struct ifnet *ifp = arr->ifnet_arr[i]; 1910 1911 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1912 return ifp; 1913 } 1914 return NULL; 1915 } 1916 1917 /* 1918 * Interface ioctls. 1919 */ 1920 int 1921 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1922 { 1923 struct ifnet *ifp; 1924 struct ifgroupreq *ifgr; 1925 struct ifreq *ifr; 1926 struct ifstat *ifs; 1927 int error, do_ifup = 0; 1928 short oif_flags; 1929 int new_flags; 1930 size_t namelen, onamelen; 1931 char new_name[IFNAMSIZ]; 1932 struct ifaddr *ifa; 1933 struct sockaddr_dl *sdl; 1934 1935 switch (cmd) { 1936 case SIOCGIFCONF: 1937 return (ifconf(cmd, data, cred)); 1938 default: 1939 break; 1940 } 1941 1942 ifr = (struct ifreq *)data; 1943 1944 switch (cmd) { 1945 case SIOCIFCREATE: 1946 case SIOCIFCREATE2: 1947 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1948 return (error); 1949 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1950 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1951 case SIOCIFDESTROY: 1952 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1953 return (error); 1954 return (if_clone_destroy(ifr->ifr_name)); 1955 case SIOCIFGCLONERS: 1956 return (if_clone_list((struct if_clonereq *)data)); 1957 case SIOCGIFGMEMB: 1958 return (if_getgroupmembers((struct ifgroupreq *)data)); 1959 default: 1960 break; 1961 } 1962 1963 /* 1964 * Nominal ioctl through interface, lookup the ifp and obtain a 1965 * lock to serialize the ifconfig ioctl operation. 1966 */ 1967 ifnet_lock(); 1968 1969 ifp = ifunit(ifr->ifr_name); 1970 if (ifp == NULL) { 1971 ifnet_unlock(); 1972 return (ENXIO); 1973 } 1974 error = 0; 1975 1976 switch (cmd) { 1977 case SIOCGIFINDEX: 1978 ifr->ifr_index = ifp->if_index; 1979 break; 1980 1981 case SIOCGIFFLAGS: 1982 ifr->ifr_flags = ifp->if_flags; 1983 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1984 break; 1985 1986 case SIOCGIFCAP: 1987 ifr->ifr_reqcap = ifp->if_capabilities; 1988 ifr->ifr_curcap = ifp->if_capenable; 1989 break; 1990 1991 case SIOCGIFMETRIC: 1992 ifr->ifr_metric = ifp->if_metric; 1993 break; 1994 1995 case SIOCGIFMTU: 1996 ifr->ifr_mtu = ifp->if_mtu; 1997 break; 1998 1999 case SIOCGIFTSOLEN: 2000 ifr->ifr_tsolen = ifp->if_tsolen; 2001 break; 2002 2003 case SIOCGIFDATA: 2004 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 2005 sizeof(ifp->if_data)); 2006 break; 2007 2008 case SIOCGIFPHYS: 2009 ifr->ifr_phys = ifp->if_physical; 2010 break; 2011 2012 case SIOCGIFPOLLCPU: 2013 ifr->ifr_pollcpu = -1; 2014 break; 2015 2016 case SIOCSIFPOLLCPU: 2017 break; 2018 2019 case SIOCSIFFLAGS: 2020 error = priv_check_cred(cred, PRIV_ROOT, 0); 2021 if (error) 2022 break; 2023 new_flags = (ifr->ifr_flags & 0xffff) | 2024 (ifr->ifr_flagshigh << 16); 2025 if (ifp->if_flags & IFF_SMART) { 2026 /* Smart drivers twiddle their own routes */ 2027 } else if (ifp->if_flags & IFF_UP && 2028 (new_flags & IFF_UP) == 0) { 2029 if_down(ifp); 2030 } else if (new_flags & IFF_UP && 2031 (ifp->if_flags & IFF_UP) == 0) { 2032 do_ifup = 1; 2033 } 2034 2035 #ifdef IFPOLL_ENABLE 2036 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 2037 if (new_flags & IFF_NPOLLING) 2038 ifpoll_register(ifp); 2039 else 2040 ifpoll_deregister(ifp); 2041 } 2042 #endif 2043 2044 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 2045 (new_flags &~ IFF_CANTCHANGE); 2046 if (new_flags & IFF_PPROMISC) { 2047 /* Permanently promiscuous mode requested */ 2048 ifp->if_flags |= IFF_PROMISC; 2049 } else if (ifp->if_pcount == 0) { 2050 ifp->if_flags &= ~IFF_PROMISC; 2051 } 2052 if (ifp->if_ioctl) { 2053 ifnet_serialize_all(ifp); 2054 ifp->if_ioctl(ifp, cmd, data, cred); 2055 ifnet_deserialize_all(ifp); 2056 } 2057 if (do_ifup) 2058 if_up(ifp); 2059 getmicrotime(&ifp->if_lastchange); 2060 break; 2061 2062 case SIOCSIFCAP: 2063 error = priv_check_cred(cred, PRIV_ROOT, 0); 2064 if (error) 2065 break; 2066 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 2067 error = EINVAL; 2068 break; 2069 } 2070 ifnet_serialize_all(ifp); 2071 ifp->if_ioctl(ifp, cmd, data, cred); 2072 ifnet_deserialize_all(ifp); 2073 break; 2074 2075 case SIOCSIFNAME: 2076 error = priv_check_cred(cred, PRIV_ROOT, 0); 2077 if (error) 2078 break; 2079 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 2080 if (error) 2081 break; 2082 if (new_name[0] == '\0') { 2083 error = EINVAL; 2084 break; 2085 } 2086 if (ifunit(new_name) != NULL) { 2087 error = EEXIST; 2088 break; 2089 } 2090 2091 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 2092 2093 /* Announce the departure of the interface. */ 2094 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 2095 2096 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 2097 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 2098 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 2099 namelen = strlen(new_name); 2100 onamelen = sdl->sdl_nlen; 2101 /* 2102 * Move the address if needed. This is safe because we 2103 * allocate space for a name of length IFNAMSIZ when we 2104 * create this in if_attach(). 2105 */ 2106 if (namelen != onamelen) { 2107 bcopy(sdl->sdl_data + onamelen, 2108 sdl->sdl_data + namelen, sdl->sdl_alen); 2109 } 2110 bcopy(new_name, sdl->sdl_data, namelen); 2111 sdl->sdl_nlen = namelen; 2112 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2113 bzero(sdl->sdl_data, onamelen); 2114 while (namelen != 0) 2115 sdl->sdl_data[--namelen] = 0xff; 2116 2117 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 2118 2119 /* Announce the return of the interface. */ 2120 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2121 break; 2122 2123 case SIOCSIFMETRIC: 2124 error = priv_check_cred(cred, PRIV_ROOT, 0); 2125 if (error) 2126 break; 2127 ifp->if_metric = ifr->ifr_metric; 2128 getmicrotime(&ifp->if_lastchange); 2129 break; 2130 2131 case SIOCSIFPHYS: 2132 error = priv_check_cred(cred, PRIV_ROOT, 0); 2133 if (error) 2134 break; 2135 if (ifp->if_ioctl == NULL) { 2136 error = EOPNOTSUPP; 2137 break; 2138 } 2139 ifnet_serialize_all(ifp); 2140 error = ifp->if_ioctl(ifp, cmd, data, cred); 2141 ifnet_deserialize_all(ifp); 2142 if (error == 0) 2143 getmicrotime(&ifp->if_lastchange); 2144 break; 2145 2146 case SIOCSIFMTU: 2147 { 2148 u_long oldmtu = ifp->if_mtu; 2149 2150 error = priv_check_cred(cred, PRIV_ROOT, 0); 2151 if (error) 2152 break; 2153 if (ifp->if_ioctl == NULL) { 2154 error = EOPNOTSUPP; 2155 break; 2156 } 2157 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2158 error = EINVAL; 2159 break; 2160 } 2161 ifnet_serialize_all(ifp); 2162 error = ifp->if_ioctl(ifp, cmd, data, cred); 2163 ifnet_deserialize_all(ifp); 2164 if (error == 0) { 2165 getmicrotime(&ifp->if_lastchange); 2166 rt_ifmsg(ifp); 2167 } 2168 /* 2169 * If the link MTU changed, do network layer specific procedure. 2170 */ 2171 if (ifp->if_mtu != oldmtu) { 2172 #ifdef INET6 2173 nd6_setmtu(ifp); 2174 #endif 2175 } 2176 break; 2177 } 2178 2179 case SIOCSIFTSOLEN: 2180 error = priv_check_cred(cred, PRIV_ROOT, 0); 2181 if (error) 2182 break; 2183 2184 /* XXX need driver supplied upper limit */ 2185 if (ifr->ifr_tsolen <= 0) { 2186 error = EINVAL; 2187 break; 2188 } 2189 ifp->if_tsolen = ifr->ifr_tsolen; 2190 break; 2191 2192 case SIOCADDMULTI: 2193 case SIOCDELMULTI: 2194 error = priv_check_cred(cred, PRIV_ROOT, 0); 2195 if (error) 2196 break; 2197 2198 /* Don't allow group membership on non-multicast interfaces. */ 2199 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2200 error = EOPNOTSUPP; 2201 break; 2202 } 2203 2204 /* Don't let users screw up protocols' entries. */ 2205 if (ifr->ifr_addr.sa_family != AF_LINK) { 2206 error = EINVAL; 2207 break; 2208 } 2209 2210 if (cmd == SIOCADDMULTI) { 2211 struct ifmultiaddr *ifma; 2212 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2213 } else { 2214 error = if_delmulti(ifp, &ifr->ifr_addr); 2215 } 2216 if (error == 0) 2217 getmicrotime(&ifp->if_lastchange); 2218 break; 2219 2220 case SIOCSIFPHYADDR: 2221 case SIOCDIFPHYADDR: 2222 #ifdef INET6 2223 case SIOCSIFPHYADDR_IN6: 2224 #endif 2225 case SIOCSLIFPHYADDR: 2226 case SIOCSIFMEDIA: 2227 case SIOCSIFGENERIC: 2228 error = priv_check_cred(cred, PRIV_ROOT, 0); 2229 if (error) 2230 break; 2231 if (ifp->if_ioctl == NULL) { 2232 error = EOPNOTSUPP; 2233 break; 2234 } 2235 ifnet_serialize_all(ifp); 2236 error = ifp->if_ioctl(ifp, cmd, data, cred); 2237 ifnet_deserialize_all(ifp); 2238 if (error == 0) 2239 getmicrotime(&ifp->if_lastchange); 2240 break; 2241 2242 case SIOCGIFSTATUS: 2243 ifs = (struct ifstat *)data; 2244 ifs->ascii[0] = '\0'; 2245 /* fall through */ 2246 case SIOCGIFPSRCADDR: 2247 case SIOCGIFPDSTADDR: 2248 case SIOCGLIFPHYADDR: 2249 case SIOCGIFMEDIA: 2250 case SIOCGIFGENERIC: 2251 if (ifp->if_ioctl == NULL) { 2252 error = EOPNOTSUPP; 2253 break; 2254 } 2255 ifnet_serialize_all(ifp); 2256 error = ifp->if_ioctl(ifp, cmd, data, cred); 2257 ifnet_deserialize_all(ifp); 2258 break; 2259 2260 case SIOCSIFLLADDR: 2261 error = priv_check_cred(cred, PRIV_ROOT, 0); 2262 if (error) 2263 break; 2264 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2265 ifr->ifr_addr.sa_len); 2266 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2267 break; 2268 2269 case SIOCAIFGROUP: 2270 ifgr = (struct ifgroupreq *)ifr; 2271 if ((error = priv_check_cred(cred, PRIV_NET_ADDIFGROUP, 0))) 2272 return (error); 2273 if ((error = if_addgroup(ifp, ifgr->ifgr_group))) 2274 return (error); 2275 break; 2276 2277 case SIOCDIFGROUP: 2278 ifgr = (struct ifgroupreq *)ifr; 2279 if ((error = priv_check_cred(cred, PRIV_NET_DELIFGROUP, 0))) 2280 return (error); 2281 if ((error = if_delgroup(ifp, ifgr->ifgr_group))) 2282 return (error); 2283 break; 2284 2285 case SIOCGIFGROUP: 2286 ifgr = (struct ifgroupreq *)ifr; 2287 if ((error = if_getgroups(ifgr, ifp))) 2288 return (error); 2289 break; 2290 2291 default: 2292 oif_flags = ifp->if_flags; 2293 if (so->so_proto == 0) { 2294 error = EOPNOTSUPP; 2295 break; 2296 } 2297 error = so_pru_control_direct(so, cmd, data, ifp); 2298 2299 /* 2300 * If the socket control method returns EOPNOTSUPP, pass the 2301 * request directly to the interface. 2302 * 2303 * Exclude the SIOCSIF{ADDR,BRDADDR,DSTADDR,NETMASK} ioctls, 2304 * because drivers may trust these ioctls to come from an 2305 * already privileged layer and thus do not perform credentials 2306 * checks or input validation. 2307 */ 2308 if (error == EOPNOTSUPP && 2309 ifp->if_ioctl != NULL && 2310 cmd != SIOCSIFADDR && 2311 cmd != SIOCSIFBRDADDR && 2312 cmd != SIOCSIFDSTADDR && 2313 cmd != SIOCSIFNETMASK) { 2314 ifnet_serialize_all(ifp); 2315 error = ifp->if_ioctl(ifp, cmd, data, cred); 2316 ifnet_deserialize_all(ifp); 2317 } 2318 2319 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2320 #ifdef INET6 2321 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2322 if (ifp->if_flags & IFF_UP) { 2323 crit_enter(); 2324 in6_if_up(ifp); 2325 crit_exit(); 2326 } 2327 #endif 2328 } 2329 break; 2330 } 2331 2332 ifnet_unlock(); 2333 return (error); 2334 } 2335 2336 /* 2337 * Set/clear promiscuous mode on interface ifp based on the truth value 2338 * of pswitch. The calls are reference counted so that only the first 2339 * "on" request actually has an effect, as does the final "off" request. 2340 * Results are undefined if the "off" and "on" requests are not matched. 2341 */ 2342 int 2343 ifpromisc(struct ifnet *ifp, int pswitch) 2344 { 2345 struct ifreq ifr; 2346 int error; 2347 int oldflags; 2348 2349 oldflags = ifp->if_flags; 2350 if (ifp->if_flags & IFF_PPROMISC) { 2351 /* Do nothing if device is in permanently promiscuous mode */ 2352 ifp->if_pcount += pswitch ? 1 : -1; 2353 return (0); 2354 } 2355 if (pswitch) { 2356 /* 2357 * If the device is not configured up, we cannot put it in 2358 * promiscuous mode. 2359 */ 2360 if ((ifp->if_flags & IFF_UP) == 0) 2361 return (ENETDOWN); 2362 if (ifp->if_pcount++ != 0) 2363 return (0); 2364 ifp->if_flags |= IFF_PROMISC; 2365 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2366 ifp->if_xname); 2367 } else { 2368 if (--ifp->if_pcount > 0) 2369 return (0); 2370 ifp->if_flags &= ~IFF_PROMISC; 2371 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2372 ifp->if_xname); 2373 } 2374 ifr.ifr_flags = ifp->if_flags; 2375 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2376 ifnet_serialize_all(ifp); 2377 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2378 ifnet_deserialize_all(ifp); 2379 if (error == 0) 2380 rt_ifmsg(ifp); 2381 else 2382 ifp->if_flags = oldflags; 2383 return error; 2384 } 2385 2386 /* 2387 * Return interface configuration 2388 * of system. List may be used 2389 * in later ioctl's (above) to get 2390 * other information. 2391 */ 2392 static int 2393 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2394 { 2395 struct ifconf *ifc = (struct ifconf *)data; 2396 struct ifnet *ifp; 2397 struct sockaddr *sa; 2398 struct ifreq ifr, *ifrp; 2399 int space = ifc->ifc_len, error = 0; 2400 2401 ifrp = ifc->ifc_req; 2402 2403 ifnet_lock(); 2404 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2405 struct ifaddr_container *ifac, *ifac_mark; 2406 struct ifaddr_marker mark; 2407 struct ifaddrhead *head; 2408 int addrs; 2409 2410 if (space <= sizeof ifr) 2411 break; 2412 2413 /* 2414 * Zero the stack declared structure first to prevent 2415 * memory disclosure. 2416 */ 2417 bzero(&ifr, sizeof(ifr)); 2418 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2419 >= sizeof(ifr.ifr_name)) { 2420 error = ENAMETOOLONG; 2421 break; 2422 } 2423 2424 /* 2425 * Add a marker, since copyout() could block and during that 2426 * period the list could be changed. Inserting the marker to 2427 * the header of the list will not cause trouble for the code 2428 * assuming that the first element of the list is AF_LINK; the 2429 * marker will be moved to the next position w/o blocking. 2430 */ 2431 ifa_marker_init(&mark, ifp); 2432 ifac_mark = &mark.ifac; 2433 head = &ifp->if_addrheads[mycpuid]; 2434 2435 addrs = 0; 2436 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2437 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2438 struct ifaddr *ifa = ifac->ifa; 2439 2440 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2441 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2442 2443 /* Ignore marker */ 2444 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2445 continue; 2446 2447 if (space <= sizeof ifr) 2448 break; 2449 sa = ifa->ifa_addr; 2450 if (cred->cr_prison && 2451 prison_if(cred, sa)) 2452 continue; 2453 addrs++; 2454 /* 2455 * Keep a reference on this ifaddr, so that it will 2456 * not be destroyed when its address is copied to 2457 * the userland, which could block. 2458 */ 2459 IFAREF(ifa); 2460 if (sa->sa_len <= sizeof(*sa)) { 2461 ifr.ifr_addr = *sa; 2462 error = copyout(&ifr, ifrp, sizeof ifr); 2463 ifrp++; 2464 } else { 2465 if (space < (sizeof ifr) + sa->sa_len - 2466 sizeof(*sa)) { 2467 IFAFREE(ifa); 2468 break; 2469 } 2470 space -= sa->sa_len - sizeof(*sa); 2471 error = copyout(&ifr, ifrp, 2472 sizeof ifr.ifr_name); 2473 if (error == 0) 2474 error = copyout(sa, &ifrp->ifr_addr, 2475 sa->sa_len); 2476 ifrp = (struct ifreq *) 2477 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2478 } 2479 IFAFREE(ifa); 2480 if (error) 2481 break; 2482 space -= sizeof ifr; 2483 } 2484 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2485 if (error) 2486 break; 2487 if (!addrs) { 2488 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2489 error = copyout(&ifr, ifrp, sizeof ifr); 2490 if (error) 2491 break; 2492 space -= sizeof ifr; 2493 ifrp++; 2494 } 2495 } 2496 ifnet_unlock(); 2497 2498 ifc->ifc_len -= space; 2499 return (error); 2500 } 2501 2502 /* 2503 * Just like if_promisc(), but for all-multicast-reception mode. 2504 */ 2505 int 2506 if_allmulti(struct ifnet *ifp, int onswitch) 2507 { 2508 int error = 0; 2509 struct ifreq ifr; 2510 2511 crit_enter(); 2512 2513 if (onswitch) { 2514 if (ifp->if_amcount++ == 0) { 2515 ifp->if_flags |= IFF_ALLMULTI; 2516 ifr.ifr_flags = ifp->if_flags; 2517 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2518 ifnet_serialize_all(ifp); 2519 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2520 NULL); 2521 ifnet_deserialize_all(ifp); 2522 } 2523 } else { 2524 if (ifp->if_amcount > 1) { 2525 ifp->if_amcount--; 2526 } else { 2527 ifp->if_amcount = 0; 2528 ifp->if_flags &= ~IFF_ALLMULTI; 2529 ifr.ifr_flags = ifp->if_flags; 2530 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2531 ifnet_serialize_all(ifp); 2532 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2533 NULL); 2534 ifnet_deserialize_all(ifp); 2535 } 2536 } 2537 2538 crit_exit(); 2539 2540 if (error == 0) 2541 rt_ifmsg(ifp); 2542 return error; 2543 } 2544 2545 /* 2546 * Add a multicast listenership to the interface in question. 2547 * The link layer provides a routine which converts 2548 */ 2549 int 2550 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2551 struct ifmultiaddr **retifma) 2552 { 2553 struct sockaddr *llsa, *dupsa; 2554 int error; 2555 struct ifmultiaddr *ifma; 2556 2557 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2558 2559 /* 2560 * If the matching multicast address already exists 2561 * then don't add a new one, just add a reference 2562 */ 2563 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2564 if (sa_equal(sa, ifma->ifma_addr)) { 2565 ifma->ifma_refcount++; 2566 if (retifma) 2567 *retifma = ifma; 2568 return 0; 2569 } 2570 } 2571 2572 /* 2573 * Give the link layer a chance to accept/reject it, and also 2574 * find out which AF_LINK address this maps to, if it isn't one 2575 * already. 2576 */ 2577 if (ifp->if_resolvemulti) { 2578 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2579 if (error) 2580 return error; 2581 } else { 2582 llsa = NULL; 2583 } 2584 2585 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2586 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2587 bcopy(sa, dupsa, sa->sa_len); 2588 2589 ifma->ifma_addr = dupsa; 2590 ifma->ifma_lladdr = llsa; 2591 ifma->ifma_ifp = ifp; 2592 ifma->ifma_refcount = 1; 2593 ifma->ifma_protospec = NULL; 2594 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2595 2596 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2597 if (retifma) 2598 *retifma = ifma; 2599 2600 if (llsa != NULL) { 2601 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2602 if (sa_equal(ifma->ifma_addr, llsa)) 2603 break; 2604 } 2605 if (ifma) { 2606 ifma->ifma_refcount++; 2607 } else { 2608 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2609 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2610 bcopy(llsa, dupsa, llsa->sa_len); 2611 ifma->ifma_addr = dupsa; 2612 ifma->ifma_ifp = ifp; 2613 ifma->ifma_refcount = 1; 2614 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2615 } 2616 } 2617 /* 2618 * We are certain we have added something, so call down to the 2619 * interface to let them know about it. 2620 */ 2621 if (ifp->if_ioctl) 2622 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2623 2624 return 0; 2625 } 2626 2627 int 2628 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2629 struct ifmultiaddr **retifma) 2630 { 2631 int error; 2632 2633 ifnet_serialize_all(ifp); 2634 error = if_addmulti_serialized(ifp, sa, retifma); 2635 ifnet_deserialize_all(ifp); 2636 2637 return error; 2638 } 2639 2640 /* 2641 * Remove a reference to a multicast address on this interface. Yell 2642 * if the request does not match an existing membership. 2643 */ 2644 static int 2645 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2646 { 2647 struct ifmultiaddr *ifma; 2648 2649 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2650 2651 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2652 if (sa_equal(sa, ifma->ifma_addr)) 2653 break; 2654 if (ifma == NULL) 2655 return ENOENT; 2656 2657 if (ifma->ifma_refcount > 1) { 2658 ifma->ifma_refcount--; 2659 return 0; 2660 } 2661 2662 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2663 sa = ifma->ifma_lladdr; 2664 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2665 /* 2666 * Make sure the interface driver is notified 2667 * in the case of a link layer mcast group being left. 2668 */ 2669 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2670 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2671 kfree(ifma->ifma_addr, M_IFMADDR); 2672 kfree(ifma, M_IFMADDR); 2673 if (sa == NULL) 2674 return 0; 2675 2676 /* 2677 * Now look for the link-layer address which corresponds to 2678 * this network address. It had been squirreled away in 2679 * ifma->ifma_lladdr for this purpose (so we don't have 2680 * to call ifp->if_resolvemulti() again), and we saved that 2681 * value in sa above. If some nasty deleted the 2682 * link-layer address out from underneath us, we can deal because 2683 * the address we stored was is not the same as the one which was 2684 * in the record for the link-layer address. (So we don't complain 2685 * in that case.) 2686 */ 2687 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2688 if (sa_equal(sa, ifma->ifma_addr)) 2689 break; 2690 if (ifma == NULL) 2691 return 0; 2692 2693 if (ifma->ifma_refcount > 1) { 2694 ifma->ifma_refcount--; 2695 return 0; 2696 } 2697 2698 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2699 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2700 kfree(ifma->ifma_addr, M_IFMADDR); 2701 kfree(sa, M_IFMADDR); 2702 kfree(ifma, M_IFMADDR); 2703 2704 return 0; 2705 } 2706 2707 int 2708 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2709 { 2710 int error; 2711 2712 ifnet_serialize_all(ifp); 2713 error = if_delmulti_serialized(ifp, sa); 2714 ifnet_deserialize_all(ifp); 2715 2716 return error; 2717 } 2718 2719 /* 2720 * Delete all multicast group membership for an interface. 2721 * Should be used to quickly flush all multicast filters. 2722 */ 2723 void 2724 if_delallmulti_serialized(struct ifnet *ifp) 2725 { 2726 struct ifmultiaddr *ifma, mark; 2727 struct sockaddr sa; 2728 2729 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2730 2731 bzero(&sa, sizeof(sa)); 2732 sa.sa_family = AF_UNSPEC; 2733 sa.sa_len = sizeof(sa); 2734 2735 bzero(&mark, sizeof(mark)); 2736 mark.ifma_addr = &sa; 2737 2738 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2739 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2740 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2741 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2742 ifma_link); 2743 2744 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2745 continue; 2746 2747 if_delmulti_serialized(ifp, ifma->ifma_addr); 2748 } 2749 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2750 } 2751 2752 2753 /* 2754 * Set the link layer address on an interface. 2755 * 2756 * At this time we only support certain types of interfaces, 2757 * and we don't allow the length of the address to change. 2758 */ 2759 int 2760 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2761 { 2762 struct sockaddr_dl *sdl; 2763 struct ifreq ifr; 2764 2765 sdl = IF_LLSOCKADDR(ifp); 2766 if (sdl == NULL) 2767 return (EINVAL); 2768 if (len != sdl->sdl_alen) /* don't allow length to change */ 2769 return (EINVAL); 2770 switch (ifp->if_type) { 2771 case IFT_ETHER: /* these types use struct arpcom */ 2772 case IFT_XETHER: 2773 case IFT_L2VLAN: 2774 case IFT_IEEE8023ADLAG: 2775 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2776 bcopy(lladdr, LLADDR(sdl), len); 2777 break; 2778 default: 2779 return (ENODEV); 2780 } 2781 /* 2782 * If the interface is already up, we need 2783 * to re-init it in order to reprogram its 2784 * address filter. 2785 */ 2786 ifnet_serialize_all(ifp); 2787 if ((ifp->if_flags & IFF_UP) != 0) { 2788 #ifdef INET 2789 struct ifaddr_container *ifac; 2790 #endif 2791 2792 ifp->if_flags &= ~IFF_UP; 2793 ifr.ifr_flags = ifp->if_flags; 2794 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2795 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2796 NULL); 2797 ifp->if_flags |= IFF_UP; 2798 ifr.ifr_flags = ifp->if_flags; 2799 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2800 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2801 NULL); 2802 #ifdef INET 2803 /* 2804 * Also send gratuitous ARPs to notify other nodes about 2805 * the address change. 2806 */ 2807 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2808 struct ifaddr *ifa = ifac->ifa; 2809 2810 if (ifa->ifa_addr != NULL && 2811 ifa->ifa_addr->sa_family == AF_INET) 2812 arp_gratuitous(ifp, ifa); 2813 } 2814 #endif 2815 } 2816 ifnet_deserialize_all(ifp); 2817 return (0); 2818 } 2819 2820 2821 /* 2822 * Locate an interface based on a complete address. 2823 */ 2824 struct ifnet * 2825 if_bylla(const void *lla, unsigned char lla_len) 2826 { 2827 const struct ifnet_array *arr; 2828 struct ifnet *ifp; 2829 struct sockaddr_dl *sdl; 2830 int i; 2831 2832 arr = ifnet_array_get(); 2833 for (i = 0; i < arr->ifnet_count; ++i) { 2834 ifp = arr->ifnet_arr[i]; 2835 if (ifp->if_addrlen != lla_len) 2836 continue; 2837 2838 sdl = IF_LLSOCKADDR(ifp); 2839 if (memcmp(lla, LLADDR(sdl), lla_len) == 0) 2840 return (ifp); 2841 } 2842 return (NULL); 2843 } 2844 2845 struct ifmultiaddr * 2846 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2847 { 2848 struct ifmultiaddr *ifma; 2849 2850 /* TODO: need ifnet_serialize_main */ 2851 ifnet_serialize_all(ifp); 2852 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2853 if (sa_equal(ifma->ifma_addr, sa)) 2854 break; 2855 ifnet_deserialize_all(ifp); 2856 2857 return ifma; 2858 } 2859 2860 /* 2861 * This function locates the first real ethernet MAC from a network 2862 * card and loads it into node, returning 0 on success or ENOENT if 2863 * no suitable interfaces were found. It is used by the uuid code to 2864 * generate a unique 6-byte number. 2865 */ 2866 int 2867 if_getanyethermac(uint16_t *node, int minlen) 2868 { 2869 struct ifnet *ifp; 2870 struct sockaddr_dl *sdl; 2871 2872 ifnet_lock(); 2873 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2874 if (ifp->if_type != IFT_ETHER) 2875 continue; 2876 sdl = IF_LLSOCKADDR(ifp); 2877 if (sdl->sdl_alen < minlen) 2878 continue; 2879 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2880 minlen); 2881 ifnet_unlock(); 2882 return(0); 2883 } 2884 ifnet_unlock(); 2885 return (ENOENT); 2886 } 2887 2888 /* 2889 * The name argument must be a pointer to storage which will last as 2890 * long as the interface does. For physical devices, the result of 2891 * device_get_name(dev) is a good choice and for pseudo-devices a 2892 * static string works well. 2893 */ 2894 void 2895 if_initname(struct ifnet *ifp, const char *name, int unit) 2896 { 2897 ifp->if_dname = name; 2898 ifp->if_dunit = unit; 2899 if (unit != IF_DUNIT_NONE) 2900 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2901 else 2902 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2903 } 2904 2905 int 2906 if_printf(struct ifnet *ifp, const char *fmt, ...) 2907 { 2908 __va_list ap; 2909 int retval; 2910 2911 retval = kprintf("%s: ", ifp->if_xname); 2912 __va_start(ap, fmt); 2913 retval += kvprintf(fmt, ap); 2914 __va_end(ap); 2915 return (retval); 2916 } 2917 2918 struct ifnet * 2919 if_alloc(uint8_t type) 2920 { 2921 struct ifnet *ifp; 2922 size_t size; 2923 2924 /* 2925 * XXX temporary hack until arpcom is setup in if_l2com 2926 */ 2927 if (type == IFT_ETHER) 2928 size = sizeof(struct arpcom); 2929 else 2930 size = sizeof(struct ifnet); 2931 2932 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2933 2934 ifp->if_type = type; 2935 2936 if (if_com_alloc[type] != NULL) { 2937 ifp->if_l2com = if_com_alloc[type](type, ifp); 2938 if (ifp->if_l2com == NULL) { 2939 kfree(ifp, M_IFNET); 2940 return (NULL); 2941 } 2942 } 2943 return (ifp); 2944 } 2945 2946 void 2947 if_free(struct ifnet *ifp) 2948 { 2949 kfree(ifp, M_IFNET); 2950 } 2951 2952 void 2953 ifq_set_classic(struct ifaltq *ifq) 2954 { 2955 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2956 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2957 } 2958 2959 void 2960 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2961 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2962 { 2963 int q; 2964 2965 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2966 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2967 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2968 KASSERT(request != NULL, ("request is not specified")); 2969 2970 ifq->altq_mapsubq = mapsubq; 2971 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2972 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2973 2974 ifsq->ifsq_enqueue = enqueue; 2975 ifsq->ifsq_dequeue = dequeue; 2976 ifsq->ifsq_request = request; 2977 } 2978 } 2979 2980 static void 2981 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2982 { 2983 2984 classq_add(&ifsq->ifsq_norm, m); 2985 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2986 } 2987 2988 static void 2989 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2990 { 2991 2992 classq_add(&ifsq->ifsq_prio, m); 2993 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2994 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2995 } 2996 2997 static struct mbuf * 2998 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2999 { 3000 struct mbuf *m; 3001 3002 m = classq_get(&ifsq->ifsq_norm); 3003 if (m != NULL) 3004 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 3005 return (m); 3006 } 3007 3008 static struct mbuf * 3009 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 3010 { 3011 struct mbuf *m; 3012 3013 m = classq_get(&ifsq->ifsq_prio); 3014 if (m != NULL) { 3015 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 3016 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 3017 } 3018 return (m); 3019 } 3020 3021 int 3022 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 3023 struct altq_pktattr *pa __unused) 3024 { 3025 3026 M_ASSERTPKTHDR(m); 3027 again: 3028 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 3029 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 3030 struct mbuf *m_drop; 3031 3032 if (m->m_flags & M_PRIO) { 3033 m_drop = NULL; 3034 if (ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen >> 1) && 3035 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt >> 1)) { 3036 /* Try dropping some from normal queue. */ 3037 m_drop = ifsq_norm_dequeue(ifsq); 3038 } 3039 if (m_drop == NULL) 3040 m_drop = ifsq_prio_dequeue(ifsq); 3041 } else { 3042 m_drop = ifsq_norm_dequeue(ifsq); 3043 } 3044 if (m_drop != NULL) { 3045 IFNET_STAT_INC(ifsq->ifsq_ifp, oqdrops, 1); 3046 m_freem(m_drop); 3047 goto again; 3048 } 3049 /* 3050 * No old packets could be dropped! 3051 * NOTE: Caller increases oqdrops. 3052 */ 3053 m_freem(m); 3054 return (ENOBUFS); 3055 } else { 3056 if (m->m_flags & M_PRIO) 3057 ifsq_prio_enqueue(ifsq, m); 3058 else 3059 ifsq_norm_enqueue(ifsq, m); 3060 return (0); 3061 } 3062 } 3063 3064 struct mbuf * 3065 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 3066 { 3067 struct mbuf *m; 3068 3069 switch (op) { 3070 case ALTDQ_POLL: 3071 m = classq_head(&ifsq->ifsq_prio); 3072 if (m == NULL) 3073 m = classq_head(&ifsq->ifsq_norm); 3074 break; 3075 3076 case ALTDQ_REMOVE: 3077 m = ifsq_prio_dequeue(ifsq); 3078 if (m == NULL) 3079 m = ifsq_norm_dequeue(ifsq); 3080 break; 3081 3082 default: 3083 panic("unsupported ALTQ dequeue op: %d", op); 3084 } 3085 return m; 3086 } 3087 3088 int 3089 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 3090 { 3091 switch (req) { 3092 case ALTRQ_PURGE: 3093 for (;;) { 3094 struct mbuf *m; 3095 3096 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 3097 if (m == NULL) 3098 break; 3099 m_freem(m); 3100 } 3101 break; 3102 3103 default: 3104 panic("unsupported ALTQ request: %d", req); 3105 } 3106 return 0; 3107 } 3108 3109 static void 3110 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 3111 { 3112 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3113 int running = 0, need_sched; 3114 3115 /* 3116 * Try to do direct ifnet.if_start on the subqueue first, if there is 3117 * contention on the subqueue hardware serializer, ifnet.if_start on 3118 * the subqueue will be scheduled on the subqueue owner CPU. 3119 */ 3120 if (!ifsq_tryserialize_hw(ifsq)) { 3121 /* 3122 * Subqueue hardware serializer contention happened, 3123 * ifnet.if_start on the subqueue is scheduled on 3124 * the subqueue owner CPU, and we keep going. 3125 */ 3126 ifsq_ifstart_schedule(ifsq, 1); 3127 return; 3128 } 3129 3130 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 3131 ifp->if_start(ifp, ifsq); 3132 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 3133 running = 1; 3134 } 3135 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 3136 3137 ifsq_deserialize_hw(ifsq); 3138 3139 if (need_sched) { 3140 /* 3141 * More data need to be transmitted, ifnet.if_start on the 3142 * subqueue is scheduled on the subqueue owner CPU, and we 3143 * keep going. 3144 * NOTE: ifnet.if_start subqueue interlock is not released. 3145 */ 3146 ifsq_ifstart_schedule(ifsq, force_sched); 3147 } 3148 } 3149 3150 /* 3151 * Subqeue packets staging mechanism: 3152 * 3153 * The packets enqueued into the subqueue are staged to a certain amount 3154 * before the ifnet.if_start on the subqueue is called. In this way, the 3155 * driver could avoid writing to hardware registers upon every packet, 3156 * instead, hardware registers could be written when certain amount of 3157 * packets are put onto hardware TX ring. The measurement on several modern 3158 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 3159 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 3160 * datagrams are transmitted at 1.48Mpps. The performance improvement by 3161 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 3162 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 3163 * 3164 * Subqueue packets staging is performed for two entry points into drivers' 3165 * transmission function: 3166 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 3167 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 3168 * 3169 * Subqueue packets staging will be stopped upon any of the following 3170 * conditions: 3171 * - If the count of packets enqueued on the current CPU is great than or 3172 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 3173 * - If the total length of packets enqueued on the current CPU is great 3174 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 3175 * cut from the hardware's MTU mainly bacause a full TCP segment's size 3176 * is usually less than hardware's MTU. 3177 * - ifsq_ifstart_schedule() is not pending on the current CPU and 3178 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 3179 * released. 3180 * - The if_start_rollup(), which is registered as low priority netisr 3181 * rollup function, is called; probably because no more work is pending 3182 * for netisr. 3183 * 3184 * NOTE: 3185 * Currently subqueue packet staging is only performed in netisr threads. 3186 */ 3187 int 3188 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 3189 { 3190 struct ifaltq *ifq = &ifp->if_snd; 3191 struct ifaltq_subque *ifsq; 3192 int error, start = 0, len, mcast = 0, avoid_start = 0; 3193 struct ifsubq_stage_head *head = NULL; 3194 struct ifsubq_stage *stage = NULL; 3195 struct globaldata *gd = mycpu; 3196 struct thread *td = gd->gd_curthread; 3197 3198 crit_enter_quick(td); 3199 3200 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 3201 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 3202 3203 len = m->m_pkthdr.len; 3204 if (m->m_flags & M_MCAST) 3205 mcast = 1; 3206 3207 if (td->td_type == TD_TYPE_NETISR) { 3208 head = &ifsubq_stage_heads[mycpuid]; 3209 stage = ifsq_get_stage(ifsq, mycpuid); 3210 3211 stage->stg_cnt++; 3212 stage->stg_len += len; 3213 if (stage->stg_cnt < ifsq_stage_cntmax && 3214 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3215 avoid_start = 1; 3216 } 3217 3218 ALTQ_SQ_LOCK(ifsq); 3219 error = ifsq_enqueue_locked(ifsq, m, pa); 3220 if (error) { 3221 IFNET_STAT_INC(ifp, oqdrops, 1); 3222 if (!ifsq_data_ready(ifsq)) { 3223 ALTQ_SQ_UNLOCK(ifsq); 3224 crit_exit_quick(td); 3225 return error; 3226 } 3227 avoid_start = 0; 3228 } 3229 if (!ifsq_is_started(ifsq)) { 3230 if (avoid_start) { 3231 ALTQ_SQ_UNLOCK(ifsq); 3232 3233 KKASSERT(!error); 3234 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3235 ifsq_stage_insert(head, stage); 3236 3237 IFNET_STAT_INC(ifp, obytes, len); 3238 if (mcast) 3239 IFNET_STAT_INC(ifp, omcasts, 1); 3240 crit_exit_quick(td); 3241 return error; 3242 } 3243 3244 /* 3245 * Hold the subqueue interlock of ifnet.if_start 3246 */ 3247 ifsq_set_started(ifsq); 3248 start = 1; 3249 } 3250 ALTQ_SQ_UNLOCK(ifsq); 3251 3252 if (!error) { 3253 IFNET_STAT_INC(ifp, obytes, len); 3254 if (mcast) 3255 IFNET_STAT_INC(ifp, omcasts, 1); 3256 } 3257 3258 if (stage != NULL) { 3259 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3260 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3261 if (!avoid_start) { 3262 ifsq_stage_remove(head, stage); 3263 ifsq_ifstart_schedule(ifsq, 1); 3264 } 3265 crit_exit_quick(td); 3266 return error; 3267 } 3268 3269 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3270 ifsq_stage_remove(head, stage); 3271 } else { 3272 stage->stg_cnt = 0; 3273 stage->stg_len = 0; 3274 } 3275 } 3276 3277 if (!start) { 3278 crit_exit_quick(td); 3279 return error; 3280 } 3281 3282 ifsq_ifstart_try(ifsq, 0); 3283 3284 crit_exit_quick(td); 3285 return error; 3286 } 3287 3288 void * 3289 ifa_create(int size) 3290 { 3291 struct ifaddr *ifa; 3292 int i; 3293 3294 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3295 3296 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3297 3298 /* 3299 * Make ifa_container availabel on all CPUs, since they 3300 * could be accessed by any threads. 3301 */ 3302 ifa->ifa_containers = 3303 kmalloc(ncpus * sizeof(struct ifaddr_container), 3304 M_IFADDR, 3305 M_INTWAIT | M_ZERO | M_CACHEALIGN); 3306 3307 ifa->ifa_ncnt = ncpus; 3308 for (i = 0; i < ncpus; ++i) { 3309 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3310 3311 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3312 ifac->ifa = ifa; 3313 ifac->ifa_refcnt = 1; 3314 } 3315 #ifdef IFADDR_DEBUG 3316 kprintf("alloc ifa %p %d\n", ifa, size); 3317 #endif 3318 return ifa; 3319 } 3320 3321 void 3322 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3323 { 3324 struct ifaddr *ifa = ifac->ifa; 3325 3326 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3327 KKASSERT(ifac->ifa_refcnt == 0); 3328 KASSERT(ifac->ifa_listmask == 0, 3329 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3330 3331 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3332 3333 #ifdef IFADDR_DEBUG_VERBOSE 3334 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3335 #endif 3336 3337 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3338 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3339 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3340 #ifdef IFADDR_DEBUG 3341 kprintf("free ifa %p\n", ifa); 3342 #endif 3343 kfree(ifa->ifa_containers, M_IFADDR); 3344 kfree(ifa, M_IFADDR); 3345 } 3346 } 3347 3348 static void 3349 ifa_iflink_dispatch(netmsg_t nmsg) 3350 { 3351 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3352 struct ifaddr *ifa = msg->ifa; 3353 struct ifnet *ifp = msg->ifp; 3354 int cpu = mycpuid; 3355 struct ifaddr_container *ifac; 3356 3357 crit_enter(); 3358 3359 ifac = &ifa->ifa_containers[cpu]; 3360 ASSERT_IFAC_VALID(ifac); 3361 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3362 ("ifaddr is on if_addrheads")); 3363 3364 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3365 if (msg->tail) 3366 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3367 else 3368 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3369 3370 crit_exit(); 3371 3372 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3373 } 3374 3375 void 3376 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3377 { 3378 struct netmsg_ifaddr msg; 3379 3380 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3381 0, ifa_iflink_dispatch); 3382 msg.ifa = ifa; 3383 msg.ifp = ifp; 3384 msg.tail = tail; 3385 3386 netisr_domsg(&msg.base, 0); 3387 } 3388 3389 static void 3390 ifa_ifunlink_dispatch(netmsg_t nmsg) 3391 { 3392 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3393 struct ifaddr *ifa = msg->ifa; 3394 struct ifnet *ifp = msg->ifp; 3395 int cpu = mycpuid; 3396 struct ifaddr_container *ifac; 3397 3398 crit_enter(); 3399 3400 ifac = &ifa->ifa_containers[cpu]; 3401 ASSERT_IFAC_VALID(ifac); 3402 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3403 ("ifaddr is not on if_addrhead")); 3404 3405 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3406 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3407 3408 crit_exit(); 3409 3410 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3411 } 3412 3413 void 3414 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3415 { 3416 struct netmsg_ifaddr msg; 3417 3418 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3419 0, ifa_ifunlink_dispatch); 3420 msg.ifa = ifa; 3421 msg.ifp = ifp; 3422 3423 netisr_domsg(&msg.base, 0); 3424 } 3425 3426 static void 3427 ifa_destroy_dispatch(netmsg_t nmsg) 3428 { 3429 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3430 3431 IFAFREE(msg->ifa); 3432 netisr_forwardmsg_all(&nmsg->base, mycpuid + 1); 3433 } 3434 3435 void 3436 ifa_destroy(struct ifaddr *ifa) 3437 { 3438 struct netmsg_ifaddr msg; 3439 3440 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3441 0, ifa_destroy_dispatch); 3442 msg.ifa = ifa; 3443 3444 netisr_domsg(&msg.base, 0); 3445 } 3446 3447 static void 3448 if_start_rollup(void) 3449 { 3450 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3451 struct ifsubq_stage *stage; 3452 3453 crit_enter(); 3454 3455 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3456 struct ifaltq_subque *ifsq = stage->stg_subq; 3457 int is_sched = 0; 3458 3459 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3460 is_sched = 1; 3461 ifsq_stage_remove(head, stage); 3462 3463 if (is_sched) { 3464 ifsq_ifstart_schedule(ifsq, 1); 3465 } else { 3466 int start = 0; 3467 3468 ALTQ_SQ_LOCK(ifsq); 3469 if (!ifsq_is_started(ifsq)) { 3470 /* 3471 * Hold the subqueue interlock of 3472 * ifnet.if_start 3473 */ 3474 ifsq_set_started(ifsq); 3475 start = 1; 3476 } 3477 ALTQ_SQ_UNLOCK(ifsq); 3478 3479 if (start) 3480 ifsq_ifstart_try(ifsq, 1); 3481 } 3482 KKASSERT((stage->stg_flags & 3483 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3484 } 3485 3486 crit_exit(); 3487 } 3488 3489 static void 3490 ifnetinit(void *dummy __unused) 3491 { 3492 int i; 3493 3494 /* XXX netisr_ncpus */ 3495 for (i = 0; i < ncpus; ++i) 3496 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3497 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3498 } 3499 3500 void 3501 if_register_com_alloc(u_char type, 3502 if_com_alloc_t *a, if_com_free_t *f) 3503 { 3504 3505 KASSERT(if_com_alloc[type] == NULL, 3506 ("if_register_com_alloc: %d already registered", type)); 3507 KASSERT(if_com_free[type] == NULL, 3508 ("if_register_com_alloc: %d free already registered", type)); 3509 3510 if_com_alloc[type] = a; 3511 if_com_free[type] = f; 3512 } 3513 3514 void 3515 if_deregister_com_alloc(u_char type) 3516 { 3517 3518 KASSERT(if_com_alloc[type] != NULL, 3519 ("if_deregister_com_alloc: %d not registered", type)); 3520 KASSERT(if_com_free[type] != NULL, 3521 ("if_deregister_com_alloc: %d free not registered", type)); 3522 if_com_alloc[type] = NULL; 3523 if_com_free[type] = NULL; 3524 } 3525 3526 void 3527 ifq_set_maxlen(struct ifaltq *ifq, int len) 3528 { 3529 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3530 } 3531 3532 int 3533 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3534 { 3535 return ALTQ_SUBQ_INDEX_DEFAULT; 3536 } 3537 3538 int 3539 ifq_mapsubq_modulo(struct ifaltq *ifq, int cpuid) 3540 { 3541 3542 return (cpuid % ifq->altq_subq_mappriv); 3543 } 3544 3545 static void 3546 ifsq_watchdog(void *arg) 3547 { 3548 struct ifsubq_watchdog *wd = arg; 3549 struct ifnet *ifp; 3550 3551 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3552 goto done; 3553 3554 ifp = ifsq_get_ifp(wd->wd_subq); 3555 if (ifnet_tryserialize_all(ifp)) { 3556 wd->wd_watchdog(wd->wd_subq); 3557 ifnet_deserialize_all(ifp); 3558 } else { 3559 /* try again next timeout */ 3560 wd->wd_timer = 1; 3561 } 3562 done: 3563 ifsq_watchdog_reset(wd); 3564 } 3565 3566 static void 3567 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3568 { 3569 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3570 ifsq_get_cpuid(wd->wd_subq)); 3571 } 3572 3573 void 3574 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3575 ifsq_watchdog_t watchdog) 3576 { 3577 callout_init_mp(&wd->wd_callout); 3578 wd->wd_timer = 0; 3579 wd->wd_subq = ifsq; 3580 wd->wd_watchdog = watchdog; 3581 } 3582 3583 void 3584 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3585 { 3586 wd->wd_timer = 0; 3587 ifsq_watchdog_reset(wd); 3588 } 3589 3590 void 3591 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3592 { 3593 wd->wd_timer = 0; 3594 callout_stop(&wd->wd_callout); 3595 } 3596 3597 void 3598 ifnet_lock(void) 3599 { 3600 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3601 ("try holding ifnet lock in netisr")); 3602 mtx_lock(&ifnet_mtx); 3603 } 3604 3605 void 3606 ifnet_unlock(void) 3607 { 3608 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3609 ("try holding ifnet lock in netisr")); 3610 mtx_unlock(&ifnet_mtx); 3611 } 3612 3613 static struct ifnet_array * 3614 ifnet_array_alloc(int count) 3615 { 3616 struct ifnet_array *arr; 3617 3618 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3619 M_IFNET, M_WAITOK); 3620 arr->ifnet_count = count; 3621 3622 return arr; 3623 } 3624 3625 static void 3626 ifnet_array_free(struct ifnet_array *arr) 3627 { 3628 if (arr == &ifnet_array0) 3629 return; 3630 kfree(arr, M_IFNET); 3631 } 3632 3633 static struct ifnet_array * 3634 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3635 { 3636 struct ifnet_array *arr; 3637 int count, i; 3638 3639 KASSERT(old_arr->ifnet_count >= 0, 3640 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3641 count = old_arr->ifnet_count + 1; 3642 arr = ifnet_array_alloc(count); 3643 3644 /* 3645 * Save the old ifnet array and append this ifp to the end of 3646 * the new ifnet array. 3647 */ 3648 for (i = 0; i < old_arr->ifnet_count; ++i) { 3649 KASSERT(old_arr->ifnet_arr[i] != ifp, 3650 ("%s is already in ifnet array", ifp->if_xname)); 3651 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3652 } 3653 KASSERT(i == count - 1, 3654 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3655 ifp->if_xname, count - 1, i)); 3656 arr->ifnet_arr[i] = ifp; 3657 3658 return arr; 3659 } 3660 3661 static struct ifnet_array * 3662 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3663 { 3664 struct ifnet_array *arr; 3665 int count, i, idx, found = 0; 3666 3667 KASSERT(old_arr->ifnet_count > 0, 3668 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3669 count = old_arr->ifnet_count - 1; 3670 arr = ifnet_array_alloc(count); 3671 3672 /* 3673 * Save the old ifnet array, but skip this ifp. 3674 */ 3675 idx = 0; 3676 for (i = 0; i < old_arr->ifnet_count; ++i) { 3677 if (old_arr->ifnet_arr[i] == ifp) { 3678 KASSERT(!found, 3679 ("dup %s is in ifnet array", ifp->if_xname)); 3680 found = 1; 3681 continue; 3682 } 3683 KASSERT(idx < count, 3684 ("invalid ifnet array index %d, count %d", idx, count)); 3685 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3686 ++idx; 3687 } 3688 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3689 KASSERT(idx == count, 3690 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3691 ifp->if_xname, count, idx)); 3692 3693 return arr; 3694 } 3695 3696 const struct ifnet_array * 3697 ifnet_array_get(void) 3698 { 3699 const struct ifnet_array *ret; 3700 3701 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3702 ret = ifnet_array; 3703 /* Make sure 'ret' is really used. */ 3704 cpu_ccfence(); 3705 return (ret); 3706 } 3707 3708 int 3709 ifnet_array_isempty(void) 3710 { 3711 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3712 if (ifnet_array->ifnet_count == 0) 3713 return 1; 3714 else 3715 return 0; 3716 } 3717 3718 void 3719 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3720 { 3721 struct ifaddr *ifa; 3722 3723 memset(mark, 0, sizeof(*mark)); 3724 ifa = &mark->ifa; 3725 3726 mark->ifac.ifa = ifa; 3727 3728 ifa->ifa_addr = &mark->addr; 3729 ifa->ifa_dstaddr = &mark->dstaddr; 3730 ifa->ifa_netmask = &mark->netmask; 3731 ifa->ifa_ifp = ifp; 3732 } 3733 3734 static int 3735 if_ringcnt_fixup(int ring_cnt, int ring_cntmax) 3736 { 3737 3738 KASSERT(ring_cntmax > 0, ("invalid ring count max %d", ring_cntmax)); 3739 3740 if (ring_cnt <= 0 || ring_cnt > ring_cntmax) 3741 ring_cnt = ring_cntmax; 3742 if (ring_cnt > netisr_ncpus) 3743 ring_cnt = netisr_ncpus; 3744 return (ring_cnt); 3745 } 3746 3747 static void 3748 if_ringmap_set_grid(device_t dev, struct if_ringmap *rm, int grid) 3749 { 3750 int i, offset; 3751 3752 KASSERT(grid > 0, ("invalid if_ringmap grid %d", grid)); 3753 KASSERT(grid >= rm->rm_cnt, ("invalid if_ringmap grid %d, count %d", 3754 grid, rm->rm_cnt)); 3755 rm->rm_grid = grid; 3756 3757 offset = (rm->rm_grid * device_get_unit(dev)) % netisr_ncpus; 3758 for (i = 0; i < rm->rm_cnt; ++i) { 3759 rm->rm_cpumap[i] = offset + i; 3760 KASSERT(rm->rm_cpumap[i] < netisr_ncpus, 3761 ("invalid cpumap[%d] = %d, offset %d", i, 3762 rm->rm_cpumap[i], offset)); 3763 } 3764 } 3765 3766 static struct if_ringmap * 3767 if_ringmap_alloc_flags(device_t dev, int ring_cnt, int ring_cntmax, 3768 uint32_t flags) 3769 { 3770 struct if_ringmap *rm; 3771 int i, grid = 0, prev_grid; 3772 3773 ring_cnt = if_ringcnt_fixup(ring_cnt, ring_cntmax); 3774 rm = kmalloc(__offsetof(struct if_ringmap, rm_cpumap[ring_cnt]), 3775 M_DEVBUF, M_WAITOK | M_ZERO); 3776 3777 rm->rm_cnt = ring_cnt; 3778 if (flags & RINGMAP_FLAG_POWEROF2) 3779 rm->rm_cnt = 1 << (fls(rm->rm_cnt) - 1); 3780 3781 prev_grid = netisr_ncpus; 3782 for (i = 0; i < netisr_ncpus; ++i) { 3783 if (netisr_ncpus % (i + 1) != 0) 3784 continue; 3785 3786 grid = netisr_ncpus / (i + 1); 3787 if (rm->rm_cnt > grid) { 3788 grid = prev_grid; 3789 break; 3790 } 3791 3792 if (rm->rm_cnt > netisr_ncpus / (i + 2)) 3793 break; 3794 prev_grid = grid; 3795 } 3796 if_ringmap_set_grid(dev, rm, grid); 3797 3798 return (rm); 3799 } 3800 3801 struct if_ringmap * 3802 if_ringmap_alloc(device_t dev, int ring_cnt, int ring_cntmax) 3803 { 3804 3805 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3806 RINGMAP_FLAG_NONE)); 3807 } 3808 3809 struct if_ringmap * 3810 if_ringmap_alloc2(device_t dev, int ring_cnt, int ring_cntmax) 3811 { 3812 3813 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3814 RINGMAP_FLAG_POWEROF2)); 3815 } 3816 3817 void 3818 if_ringmap_free(struct if_ringmap *rm) 3819 { 3820 3821 kfree(rm, M_DEVBUF); 3822 } 3823 3824 /* 3825 * Align the two ringmaps. 3826 * 3827 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3828 * 3829 * Before: 3830 * 3831 * CPU 0 1 2 3 4 5 6 7 3832 * NIC_RX n0 n1 n2 n3 3833 * NIC_TX N0 N1 3834 * 3835 * After: 3836 * 3837 * CPU 0 1 2 3 4 5 6 7 3838 * NIC_RX n0 n1 n2 n3 3839 * NIC_TX N0 N1 3840 */ 3841 void 3842 if_ringmap_align(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3843 { 3844 3845 if (rm0->rm_grid > rm1->rm_grid) 3846 if_ringmap_set_grid(dev, rm1, rm0->rm_grid); 3847 else if (rm0->rm_grid < rm1->rm_grid) 3848 if_ringmap_set_grid(dev, rm0, rm1->rm_grid); 3849 } 3850 3851 void 3852 if_ringmap_match(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3853 { 3854 int subset_grid, cnt, divisor, mod, offset, i; 3855 struct if_ringmap *subset_rm, *rm; 3856 int old_rm0_grid, old_rm1_grid; 3857 3858 if (rm0->rm_grid == rm1->rm_grid) 3859 return; 3860 3861 /* Save grid for later use */ 3862 old_rm0_grid = rm0->rm_grid; 3863 old_rm1_grid = rm1->rm_grid; 3864 3865 if_ringmap_align(dev, rm0, rm1); 3866 3867 /* 3868 * Re-shuffle rings to get more even distribution. 3869 * 3870 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3871 * 3872 * CPU 0 1 2 3 4 5 6 7 8 9 10 11 3873 * 3874 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 3875 * NIC_TX A0 A1 B0 B1 C0 C1 3876 * 3877 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3 3878 * NIC_TX D0 D1 E0 E1 F0 F1 3879 */ 3880 3881 if (rm0->rm_cnt >= (2 * old_rm1_grid)) { 3882 cnt = rm0->rm_cnt; 3883 subset_grid = old_rm1_grid; 3884 subset_rm = rm1; 3885 rm = rm0; 3886 } else if (rm1->rm_cnt > (2 * old_rm0_grid)) { 3887 cnt = rm1->rm_cnt; 3888 subset_grid = old_rm0_grid; 3889 subset_rm = rm0; 3890 rm = rm1; 3891 } else { 3892 /* No space to shuffle. */ 3893 return; 3894 } 3895 3896 mod = cnt / subset_grid; 3897 KKASSERT(mod >= 2); 3898 divisor = netisr_ncpus / rm->rm_grid; 3899 offset = ((device_get_unit(dev) / divisor) % mod) * subset_grid; 3900 3901 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3902 subset_rm->rm_cpumap[i] += offset; 3903 KASSERT(subset_rm->rm_cpumap[i] < netisr_ncpus, 3904 ("match: invalid cpumap[%d] = %d, offset %d", 3905 i, subset_rm->rm_cpumap[i], offset)); 3906 } 3907 #ifdef INVARIANTS 3908 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3909 int j; 3910 3911 for (j = 0; j < rm->rm_cnt; ++j) { 3912 if (rm->rm_cpumap[j] == subset_rm->rm_cpumap[i]) 3913 break; 3914 } 3915 KASSERT(j < rm->rm_cnt, 3916 ("subset cpumap[%d] = %d not found in superset", 3917 i, subset_rm->rm_cpumap[i])); 3918 } 3919 #endif 3920 } 3921 3922 int 3923 if_ringmap_count(const struct if_ringmap *rm) 3924 { 3925 3926 return (rm->rm_cnt); 3927 } 3928 3929 int 3930 if_ringmap_cpumap(const struct if_ringmap *rm, int ring) 3931 { 3932 3933 KASSERT(ring >= 0 && ring < rm->rm_cnt, ("invalid ring %d", ring)); 3934 return (rm->rm_cpumap[ring]); 3935 } 3936 3937 void 3938 if_ringmap_rdrtable(const struct if_ringmap *rm, int table[], int table_nent) 3939 { 3940 int i, grid_idx, grid_cnt, patch_off, patch_cnt, ncopy; 3941 3942 KASSERT(table_nent > 0 && (table_nent & NETISR_CPUMASK) == 0, 3943 ("invalid redirect table entries %d", table_nent)); 3944 3945 grid_idx = 0; 3946 for (i = 0; i < NETISR_CPUMAX; ++i) { 3947 table[i] = grid_idx++ % rm->rm_cnt; 3948 3949 if (grid_idx == rm->rm_grid) 3950 grid_idx = 0; 3951 } 3952 3953 /* 3954 * Make the ring distributed more evenly for the remainder 3955 * of each grid. 3956 * 3957 * e.g. 12 netisrs, rm contains 8 rings. 3958 * 3959 * Redirect table before: 3960 * 3961 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3 3962 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7 3963 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3 3964 * .... 3965 * 3966 * Redirect table after being patched (pX, patched entries): 3967 * 3968 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3 3969 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7 3970 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7 3971 * .... 3972 */ 3973 patch_cnt = rm->rm_grid % rm->rm_cnt; 3974 if (patch_cnt == 0) 3975 goto done; 3976 patch_off = rm->rm_grid - (rm->rm_grid % rm->rm_cnt); 3977 3978 grid_cnt = roundup(NETISR_CPUMAX, rm->rm_grid) / rm->rm_grid; 3979 grid_idx = 0; 3980 for (i = 0; i < grid_cnt; ++i) { 3981 int j; 3982 3983 for (j = 0; j < patch_cnt; ++j) { 3984 int fix_idx; 3985 3986 fix_idx = (i * rm->rm_grid) + patch_off + j; 3987 if (fix_idx >= NETISR_CPUMAX) 3988 goto done; 3989 table[fix_idx] = grid_idx++ % rm->rm_cnt; 3990 } 3991 } 3992 done: 3993 /* 3994 * If the device supports larger redirect table, duplicate 3995 * the first NETISR_CPUMAX entries to the rest of the table, 3996 * so that it matches upper layer's expectation: 3997 * (hash & NETISR_CPUMASK) % netisr_ncpus 3998 */ 3999 ncopy = table_nent / NETISR_CPUMAX; 4000 for (i = 1; i < ncopy; ++i) { 4001 memcpy(&table[i * NETISR_CPUMAX], table, 4002 NETISR_CPUMAX * sizeof(table[0])); 4003 } 4004 if (if_ringmap_dumprdr) { 4005 for (i = 0; i < table_nent; ++i) { 4006 if (i != 0 && i % 16 == 0) 4007 kprintf("\n"); 4008 kprintf("%03d ", table[i]); 4009 } 4010 kprintf("\n"); 4011 } 4012 } 4013 4014 int 4015 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS) 4016 { 4017 struct if_ringmap *rm = arg1; 4018 int i, error = 0; 4019 4020 for (i = 0; i < rm->rm_cnt; ++i) { 4021 int cpu = rm->rm_cpumap[i]; 4022 4023 error = SYSCTL_OUT(req, &cpu, sizeof(cpu)); 4024 if (error) 4025 break; 4026 } 4027 return (error); 4028 } 4029