1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet6.h" 35 #include "opt_inet.h" 36 #include "opt_ifpoll.h" 37 38 #include <sys/param.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/socketops.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/mutex.h> 51 #include <sys/sockio.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/domain.h> 55 #include <sys/thread.h> 56 #include <sys/serialize.h> 57 #include <sys/bus.h> 58 59 #include <sys/thread2.h> 60 #include <sys/msgport2.h> 61 #include <sys/mutex2.h> 62 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_types.h> 67 #include <net/if_var.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 #if defined(COMPAT_43) 91 #include <emulation/43bsd/43bsd_socket.h> 92 #endif /* COMPAT_43 */ 93 94 struct netmsg_ifaddr { 95 struct netmsg_base base; 96 struct ifaddr *ifa; 97 struct ifnet *ifp; 98 int tail; 99 }; 100 101 struct ifsubq_stage_head { 102 TAILQ_HEAD(, ifsubq_stage) stg_head; 103 } __cachealign; 104 105 /* 106 * System initialization 107 */ 108 static void if_attachdomain(void *); 109 static void if_attachdomain1(struct ifnet *); 110 static int ifconf(u_long, caddr_t, struct ucred *); 111 static void ifinit(void *); 112 static void ifnetinit(void *); 113 static void if_slowtimo(void *); 114 static void link_rtrequest(int, struct rtentry *); 115 static int if_rtdel(struct radix_node *, void *); 116 117 /* Helper functions */ 118 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 119 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 120 121 #ifdef INET6 122 /* 123 * XXX: declare here to avoid to include many inet6 related files.. 124 * should be more generalized? 125 */ 126 extern void nd6_setmtu(struct ifnet *); 127 #endif 128 129 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 130 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 131 132 static int ifsq_stage_cntmax = 4; 133 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 134 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 135 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 136 137 static int if_stats_compat = 0; 138 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 139 &if_stats_compat, 0, "Compat the old ifnet stats"); 140 141 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL) 142 /* Must be after netisr_init */ 143 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL) 144 145 static if_com_alloc_t *if_com_alloc[256]; 146 static if_com_free_t *if_com_free[256]; 147 148 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 149 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 150 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 151 152 int ifqmaxlen = IFQ_MAXLEN; 153 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 154 155 struct callout if_slowtimo_timer; 156 157 int if_index = 0; 158 struct ifnet **ifindex2ifnet = NULL; 159 static struct thread ifnet_threads[MAXCPU]; 160 161 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 162 163 #ifdef notyet 164 #define IFQ_KTR_STRING "ifq=%p" 165 #define IFQ_KTR_ARGS struct ifaltq *ifq 166 #ifndef KTR_IFQ 167 #define KTR_IFQ KTR_ALL 168 #endif 169 KTR_INFO_MASTER(ifq); 170 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 171 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 172 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 173 174 #define IF_START_KTR_STRING "ifp=%p" 175 #define IF_START_KTR_ARGS struct ifnet *ifp 176 #ifndef KTR_IF_START 177 #define KTR_IF_START KTR_ALL 178 #endif 179 KTR_INFO_MASTER(if_start); 180 KTR_INFO(KTR_IF_START, if_start, run, 0, 181 IF_START_KTR_STRING, IF_START_KTR_ARGS); 182 KTR_INFO(KTR_IF_START, if_start, sched, 1, 183 IF_START_KTR_STRING, IF_START_KTR_ARGS); 184 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 185 IF_START_KTR_STRING, IF_START_KTR_ARGS); 186 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 187 IF_START_KTR_STRING, IF_START_KTR_ARGS); 188 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 189 IF_START_KTR_STRING, IF_START_KTR_ARGS); 190 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 191 #endif 192 193 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 194 195 /* 196 * Network interface utility routines. 197 * 198 * Routines with ifa_ifwith* names take sockaddr *'s as 199 * parameters. 200 */ 201 /* ARGSUSED*/ 202 void 203 ifinit(void *dummy) 204 { 205 struct ifnet *ifp; 206 207 callout_init(&if_slowtimo_timer); 208 209 crit_enter(); 210 TAILQ_FOREACH(ifp, &ifnet, if_link) { 211 if (ifp->if_snd.altq_maxlen == 0) { 212 if_printf(ifp, "XXX: driver didn't set altq_maxlen\n"); 213 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 214 } 215 } 216 crit_exit(); 217 218 if_slowtimo(0); 219 } 220 221 static void 222 ifsq_ifstart_ipifunc(void *arg) 223 { 224 struct ifaltq_subque *ifsq = arg; 225 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 226 227 crit_enter(); 228 if (lmsg->ms_flags & MSGF_DONE) 229 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 230 crit_exit(); 231 } 232 233 static __inline void 234 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 235 { 236 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 237 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 238 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 239 stage->stg_cnt = 0; 240 stage->stg_len = 0; 241 } 242 243 static __inline void 244 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 245 { 246 KKASSERT((stage->stg_flags & 247 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 248 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 249 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 250 } 251 252 /* 253 * Schedule ifnet.if_start on the subqueue owner CPU 254 */ 255 static void 256 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 257 { 258 int cpu; 259 260 if (!force && curthread->td_type == TD_TYPE_NETISR && 261 ifsq_stage_cntmax > 0) { 262 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 263 264 stage->stg_cnt = 0; 265 stage->stg_len = 0; 266 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 267 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 268 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 269 return; 270 } 271 272 cpu = ifsq_get_cpuid(ifsq); 273 if (cpu != mycpuid) 274 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 275 else 276 ifsq_ifstart_ipifunc(ifsq); 277 } 278 279 /* 280 * NOTE: 281 * This function will release ifnet.if_start subqueue interlock, 282 * if ifnet.if_start for the subqueue does not need to be scheduled 283 */ 284 static __inline int 285 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 286 { 287 if (!running || ifsq_is_empty(ifsq) 288 #ifdef ALTQ 289 || ifsq->ifsq_altq->altq_tbr != NULL 290 #endif 291 ) { 292 ALTQ_SQ_LOCK(ifsq); 293 /* 294 * ifnet.if_start subqueue interlock is released, if: 295 * 1) Hardware can not take any packets, due to 296 * o interface is marked down 297 * o hardware queue is full (ifsq_is_oactive) 298 * Under the second situation, hardware interrupt 299 * or polling(4) will call/schedule ifnet.if_start 300 * on the subqueue when hardware queue is ready 301 * 2) There is no packet in the subqueue. 302 * Further ifq_dispatch or ifq_handoff will call/ 303 * schedule ifnet.if_start on the subqueue. 304 * 3) TBR is used and it does not allow further 305 * dequeueing. 306 * TBR callout will call ifnet.if_start on the 307 * subqueue. 308 */ 309 if (!running || !ifsq_data_ready(ifsq)) { 310 ifsq_clr_started(ifsq); 311 ALTQ_SQ_UNLOCK(ifsq); 312 return 0; 313 } 314 ALTQ_SQ_UNLOCK(ifsq); 315 } 316 return 1; 317 } 318 319 static void 320 ifsq_ifstart_dispatch(netmsg_t msg) 321 { 322 struct lwkt_msg *lmsg = &msg->base.lmsg; 323 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 324 struct ifnet *ifp = ifsq_get_ifp(ifsq); 325 struct globaldata *gd = mycpu; 326 int running = 0, need_sched; 327 328 crit_enter_gd(gd); 329 330 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 331 332 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 333 /* 334 * We need to chase the subqueue owner CPU change. 335 */ 336 ifsq_ifstart_schedule(ifsq, 1); 337 crit_exit_gd(gd); 338 return; 339 } 340 341 ifsq_serialize_hw(ifsq); 342 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 343 ifp->if_start(ifp, ifsq); 344 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 345 running = 1; 346 } 347 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 348 ifsq_deserialize_hw(ifsq); 349 350 if (need_sched) { 351 /* 352 * More data need to be transmitted, ifnet.if_start is 353 * scheduled on the subqueue owner CPU, and we keep going. 354 * NOTE: ifnet.if_start subqueue interlock is not released. 355 */ 356 ifsq_ifstart_schedule(ifsq, 0); 357 } 358 359 crit_exit_gd(gd); 360 } 361 362 /* Device driver ifnet.if_start helper function */ 363 void 364 ifsq_devstart(struct ifaltq_subque *ifsq) 365 { 366 struct ifnet *ifp = ifsq_get_ifp(ifsq); 367 int running = 0; 368 369 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 370 371 ALTQ_SQ_LOCK(ifsq); 372 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 373 ALTQ_SQ_UNLOCK(ifsq); 374 return; 375 } 376 ifsq_set_started(ifsq); 377 ALTQ_SQ_UNLOCK(ifsq); 378 379 ifp->if_start(ifp, ifsq); 380 381 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 382 running = 1; 383 384 if (ifsq_ifstart_need_schedule(ifsq, running)) { 385 /* 386 * More data need to be transmitted, ifnet.if_start is 387 * scheduled on ifnet's CPU, and we keep going. 388 * NOTE: ifnet.if_start interlock is not released. 389 */ 390 ifsq_ifstart_schedule(ifsq, 0); 391 } 392 } 393 394 void 395 if_devstart(struct ifnet *ifp) 396 { 397 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 398 } 399 400 /* Device driver ifnet.if_start schedule helper function */ 401 void 402 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 403 { 404 ifsq_ifstart_schedule(ifsq, 1); 405 } 406 407 void 408 if_devstart_sched(struct ifnet *ifp) 409 { 410 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 411 } 412 413 static void 414 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 415 { 416 lwkt_serialize_enter(ifp->if_serializer); 417 } 418 419 static void 420 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 421 { 422 lwkt_serialize_exit(ifp->if_serializer); 423 } 424 425 static int 426 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 427 { 428 return lwkt_serialize_try(ifp->if_serializer); 429 } 430 431 #ifdef INVARIANTS 432 static void 433 if_default_serialize_assert(struct ifnet *ifp, 434 enum ifnet_serialize slz __unused, 435 boolean_t serialized) 436 { 437 if (serialized) 438 ASSERT_SERIALIZED(ifp->if_serializer); 439 else 440 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 441 } 442 #endif 443 444 /* 445 * Attach an interface to the list of "active" interfaces. 446 * 447 * The serializer is optional. 448 */ 449 void 450 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 451 { 452 unsigned socksize, ifasize; 453 int namelen, masklen; 454 struct sockaddr_dl *sdl; 455 struct ifaddr *ifa; 456 struct ifaltq *ifq; 457 int i, q; 458 459 static int if_indexlim = 8; 460 461 if (ifp->if_serialize != NULL) { 462 KASSERT(ifp->if_deserialize != NULL && 463 ifp->if_tryserialize != NULL && 464 ifp->if_serialize_assert != NULL, 465 ("serialize functions are partially setup")); 466 467 /* 468 * If the device supplies serialize functions, 469 * then clear if_serializer to catch any invalid 470 * usage of this field. 471 */ 472 KASSERT(serializer == NULL, 473 ("both serialize functions and default serializer " 474 "are supplied")); 475 ifp->if_serializer = NULL; 476 } else { 477 KASSERT(ifp->if_deserialize == NULL && 478 ifp->if_tryserialize == NULL && 479 ifp->if_serialize_assert == NULL, 480 ("serialize functions are partially setup")); 481 ifp->if_serialize = if_default_serialize; 482 ifp->if_deserialize = if_default_deserialize; 483 ifp->if_tryserialize = if_default_tryserialize; 484 #ifdef INVARIANTS 485 ifp->if_serialize_assert = if_default_serialize_assert; 486 #endif 487 488 /* 489 * The serializer can be passed in from the device, 490 * allowing the same serializer to be used for both 491 * the interrupt interlock and the device queue. 492 * If not specified, the netif structure will use an 493 * embedded serializer. 494 */ 495 if (serializer == NULL) { 496 serializer = &ifp->if_default_serializer; 497 lwkt_serialize_init(serializer); 498 } 499 ifp->if_serializer = serializer; 500 } 501 502 mtx_init(&ifp->if_ioctl_mtx); 503 mtx_lock(&ifp->if_ioctl_mtx); 504 505 lwkt_gettoken(&ifnet_token); /* protect if_index and ifnet tailq */ 506 ifp->if_index = ++if_index; 507 508 /* 509 * XXX - 510 * The old code would work if the interface passed a pre-existing 511 * chain of ifaddrs to this code. We don't trust our callers to 512 * properly initialize the tailq, however, so we no longer allow 513 * this unlikely case. 514 */ 515 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 516 M_IFADDR, M_WAITOK | M_ZERO); 517 for (i = 0; i < ncpus; ++i) 518 TAILQ_INIT(&ifp->if_addrheads[i]); 519 520 TAILQ_INIT(&ifp->if_multiaddrs); 521 TAILQ_INIT(&ifp->if_groups); 522 getmicrotime(&ifp->if_lastchange); 523 if (ifindex2ifnet == NULL || if_index >= if_indexlim) { 524 unsigned int n; 525 struct ifnet **q; 526 527 if_indexlim <<= 1; 528 529 /* grow ifindex2ifnet */ 530 n = if_indexlim * sizeof(*q); 531 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 532 if (ifindex2ifnet) { 533 bcopy(ifindex2ifnet, q, n/2); 534 kfree(ifindex2ifnet, M_IFADDR); 535 } 536 ifindex2ifnet = q; 537 } 538 539 ifindex2ifnet[if_index] = ifp; 540 541 /* 542 * create a Link Level name for this device 543 */ 544 namelen = strlen(ifp->if_xname); 545 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 546 socksize = masklen + ifp->if_addrlen; 547 if (socksize < sizeof(*sdl)) 548 socksize = sizeof(*sdl); 549 socksize = RT_ROUNDUP(socksize); 550 ifasize = sizeof(struct ifaddr) + 2 * socksize; 551 ifa = ifa_create(ifasize, M_WAITOK); 552 sdl = (struct sockaddr_dl *)(ifa + 1); 553 sdl->sdl_len = socksize; 554 sdl->sdl_family = AF_LINK; 555 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 556 sdl->sdl_nlen = namelen; 557 sdl->sdl_index = ifp->if_index; 558 sdl->sdl_type = ifp->if_type; 559 ifp->if_lladdr = ifa; 560 ifa->ifa_ifp = ifp; 561 ifa->ifa_rtrequest = link_rtrequest; 562 ifa->ifa_addr = (struct sockaddr *)sdl; 563 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 564 ifa->ifa_netmask = (struct sockaddr *)sdl; 565 sdl->sdl_len = masklen; 566 while (namelen != 0) 567 sdl->sdl_data[--namelen] = 0xff; 568 ifa_iflink(ifa, ifp, 0 /* Insert head */); 569 570 ifp->if_data_pcpu = kmalloc_cachealign( 571 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 572 573 if (ifp->if_mapsubq == NULL) 574 ifp->if_mapsubq = ifq_mapsubq_default; 575 576 ifq = &ifp->if_snd; 577 ifq->altq_type = 0; 578 ifq->altq_disc = NULL; 579 ifq->altq_flags &= ALTQF_CANTCHANGE; 580 ifq->altq_tbr = NULL; 581 ifq->altq_ifp = ifp; 582 583 if (ifq->altq_subq_cnt <= 0) 584 ifq->altq_subq_cnt = 1; 585 ifq->altq_subq = kmalloc_cachealign( 586 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 587 M_DEVBUF, M_WAITOK | M_ZERO); 588 589 if (ifq->altq_maxlen == 0) { 590 if_printf(ifp, "driver didn't set altq_maxlen\n"); 591 ifq_set_maxlen(ifq, ifqmaxlen); 592 } 593 594 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 595 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 596 597 ALTQ_SQ_LOCK_INIT(ifsq); 598 ifsq->ifsq_index = q; 599 600 ifsq->ifsq_altq = ifq; 601 ifsq->ifsq_ifp = ifp; 602 603 ifsq->ifsq_maxlen = ifq->altq_maxlen; 604 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 605 ifsq->ifsq_prepended = NULL; 606 ifsq->ifsq_started = 0; 607 ifsq->ifsq_hw_oactive = 0; 608 ifsq_set_cpuid(ifsq, 0); 609 if (ifp->if_serializer != NULL) 610 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 611 612 ifsq->ifsq_stage = 613 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 614 M_DEVBUF, M_WAITOK | M_ZERO); 615 for (i = 0; i < ncpus; ++i) 616 ifsq->ifsq_stage[i].stg_subq = ifsq; 617 618 ifsq->ifsq_ifstart_nmsg = 619 kmalloc(ncpus * sizeof(struct netmsg_base), 620 M_LWKTMSG, M_WAITOK); 621 for (i = 0; i < ncpus; ++i) { 622 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 623 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 624 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 625 } 626 } 627 ifq_set_classic(ifq); 628 629 if (!SLIST_EMPTY(&domains)) 630 if_attachdomain1(ifp); 631 632 TAILQ_INSERT_TAIL(&ifnet, ifp, if_link); 633 lwkt_reltoken(&ifnet_token); 634 635 /* Announce the interface. */ 636 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 637 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 638 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 639 640 mtx_unlock(&ifp->if_ioctl_mtx); 641 } 642 643 static void 644 if_attachdomain(void *dummy) 645 { 646 struct ifnet *ifp; 647 648 crit_enter(); 649 TAILQ_FOREACH(ifp, &ifnet, if_list) 650 if_attachdomain1(ifp); 651 crit_exit(); 652 } 653 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 654 if_attachdomain, NULL); 655 656 static void 657 if_attachdomain1(struct ifnet *ifp) 658 { 659 struct domain *dp; 660 661 crit_enter(); 662 663 /* address family dependent data region */ 664 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 665 SLIST_FOREACH(dp, &domains, dom_next) 666 if (dp->dom_ifattach) 667 ifp->if_afdata[dp->dom_family] = 668 (*dp->dom_ifattach)(ifp); 669 crit_exit(); 670 } 671 672 /* 673 * Purge all addresses whose type is _not_ AF_LINK 674 */ 675 void 676 if_purgeaddrs_nolink(struct ifnet *ifp) 677 { 678 struct ifaddr_container *ifac, *next; 679 680 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 681 ifa_link, next) { 682 struct ifaddr *ifa = ifac->ifa; 683 684 /* Leave link ifaddr as it is */ 685 if (ifa->ifa_addr->sa_family == AF_LINK) 686 continue; 687 #ifdef INET 688 /* XXX: Ugly!! ad hoc just for INET */ 689 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 690 struct ifaliasreq ifr; 691 #ifdef IFADDR_DEBUG_VERBOSE 692 int i; 693 694 kprintf("purge in4 addr %p: ", ifa); 695 for (i = 0; i < ncpus; ++i) 696 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 697 kprintf("\n"); 698 #endif 699 700 bzero(&ifr, sizeof ifr); 701 ifr.ifra_addr = *ifa->ifa_addr; 702 if (ifa->ifa_dstaddr) 703 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 704 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, 705 NULL) == 0) 706 continue; 707 } 708 #endif /* INET */ 709 #ifdef INET6 710 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { 711 #ifdef IFADDR_DEBUG_VERBOSE 712 int i; 713 714 kprintf("purge in6 addr %p: ", ifa); 715 for (i = 0; i < ncpus; ++i) 716 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 717 kprintf("\n"); 718 #endif 719 720 in6_purgeaddr(ifa); 721 /* ifp_addrhead is already updated */ 722 continue; 723 } 724 #endif /* INET6 */ 725 ifa_ifunlink(ifa, ifp); 726 ifa_destroy(ifa); 727 } 728 } 729 730 static void 731 ifq_stage_detach_handler(netmsg_t nmsg) 732 { 733 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 734 int q; 735 736 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 737 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 738 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 739 740 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 741 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 742 } 743 lwkt_replymsg(&nmsg->lmsg, 0); 744 } 745 746 static void 747 ifq_stage_detach(struct ifaltq *ifq) 748 { 749 struct netmsg_base base; 750 int cpu; 751 752 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 753 ifq_stage_detach_handler); 754 base.lmsg.u.ms_resultp = ifq; 755 756 for (cpu = 0; cpu < ncpus; ++cpu) 757 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 758 } 759 760 struct netmsg_if_rtdel { 761 struct netmsg_base base; 762 struct ifnet *ifp; 763 }; 764 765 static void 766 if_rtdel_dispatch(netmsg_t msg) 767 { 768 struct netmsg_if_rtdel *rmsg = (void *)msg; 769 int i, nextcpu, cpu; 770 771 cpu = mycpuid; 772 for (i = 1; i <= AF_MAX; i++) { 773 struct radix_node_head *rnh; 774 775 if ((rnh = rt_tables[cpu][i]) == NULL) 776 continue; 777 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 778 } 779 780 nextcpu = cpu + 1; 781 if (nextcpu < ncpus) 782 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 783 else 784 lwkt_replymsg(&rmsg->base.lmsg, 0); 785 } 786 787 /* 788 * Detach an interface, removing it from the 789 * list of "active" interfaces. 790 */ 791 void 792 if_detach(struct ifnet *ifp) 793 { 794 struct netmsg_if_rtdel msg; 795 struct domain *dp; 796 int q; 797 798 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 799 800 /* 801 * Remove routes and flush queues. 802 */ 803 crit_enter(); 804 #ifdef IFPOLL_ENABLE 805 if (ifp->if_flags & IFF_NPOLLING) 806 ifpoll_deregister(ifp); 807 #endif 808 if_down(ifp); 809 810 #ifdef ALTQ 811 if (ifq_is_enabled(&ifp->if_snd)) 812 altq_disable(&ifp->if_snd); 813 if (ifq_is_attached(&ifp->if_snd)) 814 altq_detach(&ifp->if_snd); 815 #endif 816 817 /* 818 * Clean up all addresses. 819 */ 820 ifp->if_lladdr = NULL; 821 822 if_purgeaddrs_nolink(ifp); 823 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 824 struct ifaddr *ifa; 825 826 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 827 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 828 ("non-link ifaddr is left on if_addrheads")); 829 830 ifa_ifunlink(ifa, ifp); 831 ifa_destroy(ifa); 832 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 833 ("there are still ifaddrs left on if_addrheads")); 834 } 835 836 #ifdef INET 837 /* 838 * Remove all IPv4 kernel structures related to ifp. 839 */ 840 in_ifdetach(ifp); 841 #endif 842 843 #ifdef INET6 844 /* 845 * Remove all IPv6 kernel structs related to ifp. This should be done 846 * before removing routing entries below, since IPv6 interface direct 847 * routes are expected to be removed by the IPv6-specific kernel API. 848 * Otherwise, the kernel will detect some inconsistency and bark it. 849 */ 850 in6_ifdetach(ifp); 851 #endif 852 853 /* 854 * Delete all remaining routes using this interface 855 */ 856 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 857 if_rtdel_dispatch); 858 msg.ifp = ifp; 859 rt_domsg_global(&msg.base); 860 861 /* Announce that the interface is gone. */ 862 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 863 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 864 865 SLIST_FOREACH(dp, &domains, dom_next) 866 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 867 (*dp->dom_ifdetach)(ifp, 868 ifp->if_afdata[dp->dom_family]); 869 870 /* 871 * Remove interface from ifindex2ifp[] and maybe decrement if_index. 872 */ 873 lwkt_gettoken(&ifnet_token); 874 ifindex2ifnet[ifp->if_index] = NULL; 875 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 876 if_index--; 877 TAILQ_REMOVE(&ifnet, ifp, if_link); 878 lwkt_reltoken(&ifnet_token); 879 880 kfree(ifp->if_addrheads, M_IFADDR); 881 882 lwkt_synchronize_ipiqs("if_detach"); 883 ifq_stage_detach(&ifp->if_snd); 884 885 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 886 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 887 888 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 889 kfree(ifsq->ifsq_stage, M_DEVBUF); 890 } 891 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 892 893 kfree(ifp->if_data_pcpu, M_DEVBUF); 894 895 crit_exit(); 896 } 897 898 /* 899 * Create interface group without members 900 */ 901 struct ifg_group * 902 if_creategroup(const char *groupname) 903 { 904 struct ifg_group *ifg = NULL; 905 906 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 907 M_TEMP, M_NOWAIT)) == NULL) 908 return (NULL); 909 910 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 911 ifg->ifg_refcnt = 0; 912 ifg->ifg_carp_demoted = 0; 913 TAILQ_INIT(&ifg->ifg_members); 914 #if NPF > 0 915 pfi_attach_ifgroup(ifg); 916 #endif 917 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 918 919 return (ifg); 920 } 921 922 /* 923 * Add a group to an interface 924 */ 925 int 926 if_addgroup(struct ifnet *ifp, const char *groupname) 927 { 928 struct ifg_list *ifgl; 929 struct ifg_group *ifg = NULL; 930 struct ifg_member *ifgm; 931 932 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 933 groupname[strlen(groupname) - 1] <= '9') 934 return (EINVAL); 935 936 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 937 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 938 return (EEXIST); 939 940 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 941 return (ENOMEM); 942 943 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 944 kfree(ifgl, M_TEMP); 945 return (ENOMEM); 946 } 947 948 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 949 if (!strcmp(ifg->ifg_group, groupname)) 950 break; 951 952 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 953 kfree(ifgl, M_TEMP); 954 kfree(ifgm, M_TEMP); 955 return (ENOMEM); 956 } 957 958 ifg->ifg_refcnt++; 959 ifgl->ifgl_group = ifg; 960 ifgm->ifgm_ifp = ifp; 961 962 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 963 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 964 965 #if NPF > 0 966 pfi_group_change(groupname); 967 #endif 968 969 return (0); 970 } 971 972 /* 973 * Remove a group from an interface 974 */ 975 int 976 if_delgroup(struct ifnet *ifp, const char *groupname) 977 { 978 struct ifg_list *ifgl; 979 struct ifg_member *ifgm; 980 981 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 982 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 983 break; 984 if (ifgl == NULL) 985 return (ENOENT); 986 987 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 988 989 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 990 if (ifgm->ifgm_ifp == ifp) 991 break; 992 993 if (ifgm != NULL) { 994 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 995 kfree(ifgm, M_TEMP); 996 } 997 998 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 999 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1000 #if NPF > 0 1001 pfi_detach_ifgroup(ifgl->ifgl_group); 1002 #endif 1003 kfree(ifgl->ifgl_group, M_TEMP); 1004 } 1005 1006 kfree(ifgl, M_TEMP); 1007 1008 #if NPF > 0 1009 pfi_group_change(groupname); 1010 #endif 1011 1012 return (0); 1013 } 1014 1015 /* 1016 * Stores all groups from an interface in memory pointed 1017 * to by data 1018 */ 1019 int 1020 if_getgroup(caddr_t data, struct ifnet *ifp) 1021 { 1022 int len, error; 1023 struct ifg_list *ifgl; 1024 struct ifg_req ifgrq, *ifgp; 1025 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1026 1027 if (ifgr->ifgr_len == 0) { 1028 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1029 ifgr->ifgr_len += sizeof(struct ifg_req); 1030 return (0); 1031 } 1032 1033 len = ifgr->ifgr_len; 1034 ifgp = ifgr->ifgr_groups; 1035 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1036 if (len < sizeof(ifgrq)) 1037 return (EINVAL); 1038 bzero(&ifgrq, sizeof ifgrq); 1039 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1040 sizeof(ifgrq.ifgrq_group)); 1041 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1042 sizeof(struct ifg_req)))) 1043 return (error); 1044 len -= sizeof(ifgrq); 1045 ifgp++; 1046 } 1047 1048 return (0); 1049 } 1050 1051 /* 1052 * Stores all members of a group in memory pointed to by data 1053 */ 1054 int 1055 if_getgroupmembers(caddr_t data) 1056 { 1057 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1058 struct ifg_group *ifg; 1059 struct ifg_member *ifgm; 1060 struct ifg_req ifgrq, *ifgp; 1061 int len, error; 1062 1063 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1064 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1065 break; 1066 if (ifg == NULL) 1067 return (ENOENT); 1068 1069 if (ifgr->ifgr_len == 0) { 1070 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1071 ifgr->ifgr_len += sizeof(ifgrq); 1072 return (0); 1073 } 1074 1075 len = ifgr->ifgr_len; 1076 ifgp = ifgr->ifgr_groups; 1077 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1078 if (len < sizeof(ifgrq)) 1079 return (EINVAL); 1080 bzero(&ifgrq, sizeof ifgrq); 1081 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1082 sizeof(ifgrq.ifgrq_member)); 1083 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1084 sizeof(struct ifg_req)))) 1085 return (error); 1086 len -= sizeof(ifgrq); 1087 ifgp++; 1088 } 1089 1090 return (0); 1091 } 1092 1093 /* 1094 * Delete Routes for a Network Interface 1095 * 1096 * Called for each routing entry via the rnh->rnh_walktree() call above 1097 * to delete all route entries referencing a detaching network interface. 1098 * 1099 * Arguments: 1100 * rn pointer to node in the routing table 1101 * arg argument passed to rnh->rnh_walktree() - detaching interface 1102 * 1103 * Returns: 1104 * 0 successful 1105 * errno failed - reason indicated 1106 * 1107 */ 1108 static int 1109 if_rtdel(struct radix_node *rn, void *arg) 1110 { 1111 struct rtentry *rt = (struct rtentry *)rn; 1112 struct ifnet *ifp = arg; 1113 int err; 1114 1115 if (rt->rt_ifp == ifp) { 1116 1117 /* 1118 * Protect (sorta) against walktree recursion problems 1119 * with cloned routes 1120 */ 1121 if (!(rt->rt_flags & RTF_UP)) 1122 return (0); 1123 1124 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1125 rt_mask(rt), rt->rt_flags, 1126 NULL); 1127 if (err) { 1128 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1129 } 1130 } 1131 1132 return (0); 1133 } 1134 1135 /* 1136 * Locate an interface based on a complete address. 1137 */ 1138 struct ifaddr * 1139 ifa_ifwithaddr(struct sockaddr *addr) 1140 { 1141 struct ifnet *ifp; 1142 1143 TAILQ_FOREACH(ifp, &ifnet, if_link) { 1144 struct ifaddr_container *ifac; 1145 1146 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1147 struct ifaddr *ifa = ifac->ifa; 1148 1149 if (ifa->ifa_addr->sa_family != addr->sa_family) 1150 continue; 1151 if (sa_equal(addr, ifa->ifa_addr)) 1152 return (ifa); 1153 if ((ifp->if_flags & IFF_BROADCAST) && 1154 ifa->ifa_broadaddr && 1155 /* IPv6 doesn't have broadcast */ 1156 ifa->ifa_broadaddr->sa_len != 0 && 1157 sa_equal(ifa->ifa_broadaddr, addr)) 1158 return (ifa); 1159 } 1160 } 1161 return (NULL); 1162 } 1163 /* 1164 * Locate the point to point interface with a given destination address. 1165 */ 1166 struct ifaddr * 1167 ifa_ifwithdstaddr(struct sockaddr *addr) 1168 { 1169 struct ifnet *ifp; 1170 1171 TAILQ_FOREACH(ifp, &ifnet, if_link) { 1172 struct ifaddr_container *ifac; 1173 1174 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1175 continue; 1176 1177 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1178 struct ifaddr *ifa = ifac->ifa; 1179 1180 if (ifa->ifa_addr->sa_family != addr->sa_family) 1181 continue; 1182 if (ifa->ifa_dstaddr && 1183 sa_equal(addr, ifa->ifa_dstaddr)) 1184 return (ifa); 1185 } 1186 } 1187 return (NULL); 1188 } 1189 1190 /* 1191 * Find an interface on a specific network. If many, choice 1192 * is most specific found. 1193 */ 1194 struct ifaddr * 1195 ifa_ifwithnet(struct sockaddr *addr) 1196 { 1197 struct ifnet *ifp; 1198 struct ifaddr *ifa_maybe = NULL; 1199 u_int af = addr->sa_family; 1200 char *addr_data = addr->sa_data, *cplim; 1201 1202 /* 1203 * AF_LINK addresses can be looked up directly by their index number, 1204 * so do that if we can. 1205 */ 1206 if (af == AF_LINK) { 1207 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1208 1209 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1210 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1211 } 1212 1213 /* 1214 * Scan though each interface, looking for ones that have 1215 * addresses in this address family. 1216 */ 1217 TAILQ_FOREACH(ifp, &ifnet, if_link) { 1218 struct ifaddr_container *ifac; 1219 1220 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1221 struct ifaddr *ifa = ifac->ifa; 1222 char *cp, *cp2, *cp3; 1223 1224 if (ifa->ifa_addr->sa_family != af) 1225 next: continue; 1226 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1227 /* 1228 * This is a bit broken as it doesn't 1229 * take into account that the remote end may 1230 * be a single node in the network we are 1231 * looking for. 1232 * The trouble is that we don't know the 1233 * netmask for the remote end. 1234 */ 1235 if (ifa->ifa_dstaddr != NULL && 1236 sa_equal(addr, ifa->ifa_dstaddr)) 1237 return (ifa); 1238 } else { 1239 /* 1240 * if we have a special address handler, 1241 * then use it instead of the generic one. 1242 */ 1243 if (ifa->ifa_claim_addr) { 1244 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1245 return (ifa); 1246 } else { 1247 continue; 1248 } 1249 } 1250 1251 /* 1252 * Scan all the bits in the ifa's address. 1253 * If a bit dissagrees with what we are 1254 * looking for, mask it with the netmask 1255 * to see if it really matters. 1256 * (A byte at a time) 1257 */ 1258 if (ifa->ifa_netmask == 0) 1259 continue; 1260 cp = addr_data; 1261 cp2 = ifa->ifa_addr->sa_data; 1262 cp3 = ifa->ifa_netmask->sa_data; 1263 cplim = ifa->ifa_netmask->sa_len + 1264 (char *)ifa->ifa_netmask; 1265 while (cp3 < cplim) 1266 if ((*cp++ ^ *cp2++) & *cp3++) 1267 goto next; /* next address! */ 1268 /* 1269 * If the netmask of what we just found 1270 * is more specific than what we had before 1271 * (if we had one) then remember the new one 1272 * before continuing to search 1273 * for an even better one. 1274 */ 1275 if (ifa_maybe == NULL || 1276 rn_refines((char *)ifa->ifa_netmask, 1277 (char *)ifa_maybe->ifa_netmask)) 1278 ifa_maybe = ifa; 1279 } 1280 } 1281 } 1282 return (ifa_maybe); 1283 } 1284 1285 /* 1286 * Find an interface address specific to an interface best matching 1287 * a given address. 1288 */ 1289 struct ifaddr * 1290 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1291 { 1292 struct ifaddr_container *ifac; 1293 char *cp, *cp2, *cp3; 1294 char *cplim; 1295 struct ifaddr *ifa_maybe = NULL; 1296 u_int af = addr->sa_family; 1297 1298 if (af >= AF_MAX) 1299 return (0); 1300 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1301 struct ifaddr *ifa = ifac->ifa; 1302 1303 if (ifa->ifa_addr->sa_family != af) 1304 continue; 1305 if (ifa_maybe == NULL) 1306 ifa_maybe = ifa; 1307 if (ifa->ifa_netmask == NULL) { 1308 if (sa_equal(addr, ifa->ifa_addr) || 1309 (ifa->ifa_dstaddr != NULL && 1310 sa_equal(addr, ifa->ifa_dstaddr))) 1311 return (ifa); 1312 continue; 1313 } 1314 if (ifp->if_flags & IFF_POINTOPOINT) { 1315 if (sa_equal(addr, ifa->ifa_dstaddr)) 1316 return (ifa); 1317 } else { 1318 cp = addr->sa_data; 1319 cp2 = ifa->ifa_addr->sa_data; 1320 cp3 = ifa->ifa_netmask->sa_data; 1321 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1322 for (; cp3 < cplim; cp3++) 1323 if ((*cp++ ^ *cp2++) & *cp3) 1324 break; 1325 if (cp3 == cplim) 1326 return (ifa); 1327 } 1328 } 1329 return (ifa_maybe); 1330 } 1331 1332 /* 1333 * Default action when installing a route with a Link Level gateway. 1334 * Lookup an appropriate real ifa to point to. 1335 * This should be moved to /sys/net/link.c eventually. 1336 */ 1337 static void 1338 link_rtrequest(int cmd, struct rtentry *rt) 1339 { 1340 struct ifaddr *ifa; 1341 struct sockaddr *dst; 1342 struct ifnet *ifp; 1343 1344 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1345 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1346 return; 1347 ifa = ifaof_ifpforaddr(dst, ifp); 1348 if (ifa != NULL) { 1349 IFAFREE(rt->rt_ifa); 1350 IFAREF(ifa); 1351 rt->rt_ifa = ifa; 1352 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1353 ifa->ifa_rtrequest(cmd, rt); 1354 } 1355 } 1356 1357 /* 1358 * Mark an interface down and notify protocols of 1359 * the transition. 1360 * NOTE: must be called at splnet or eqivalent. 1361 */ 1362 void 1363 if_unroute(struct ifnet *ifp, int flag, int fam) 1364 { 1365 struct ifaddr_container *ifac; 1366 1367 ifp->if_flags &= ~flag; 1368 getmicrotime(&ifp->if_lastchange); 1369 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1370 struct ifaddr *ifa = ifac->ifa; 1371 1372 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1373 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1374 } 1375 ifq_purge_all(&ifp->if_snd); 1376 rt_ifmsg(ifp); 1377 } 1378 1379 /* 1380 * Mark an interface up and notify protocols of 1381 * the transition. 1382 * NOTE: must be called at splnet or eqivalent. 1383 */ 1384 void 1385 if_route(struct ifnet *ifp, int flag, int fam) 1386 { 1387 struct ifaddr_container *ifac; 1388 1389 ifq_purge_all(&ifp->if_snd); 1390 ifp->if_flags |= flag; 1391 getmicrotime(&ifp->if_lastchange); 1392 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1393 struct ifaddr *ifa = ifac->ifa; 1394 1395 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1396 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1397 } 1398 rt_ifmsg(ifp); 1399 #ifdef INET6 1400 in6_if_up(ifp); 1401 #endif 1402 } 1403 1404 /* 1405 * Mark an interface down and notify protocols of the transition. An 1406 * interface going down is also considered to be a synchronizing event. 1407 * We must ensure that all packet processing related to the interface 1408 * has completed before we return so e.g. the caller can free the ifnet 1409 * structure that the mbufs may be referencing. 1410 * 1411 * NOTE: must be called at splnet or eqivalent. 1412 */ 1413 void 1414 if_down(struct ifnet *ifp) 1415 { 1416 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1417 netmsg_service_sync(); 1418 } 1419 1420 /* 1421 * Mark an interface up and notify protocols of 1422 * the transition. 1423 * NOTE: must be called at splnet or eqivalent. 1424 */ 1425 void 1426 if_up(struct ifnet *ifp) 1427 { 1428 if_route(ifp, IFF_UP, AF_UNSPEC); 1429 } 1430 1431 /* 1432 * Process a link state change. 1433 * NOTE: must be called at splsoftnet or equivalent. 1434 */ 1435 void 1436 if_link_state_change(struct ifnet *ifp) 1437 { 1438 int link_state = ifp->if_link_state; 1439 1440 rt_ifmsg(ifp); 1441 devctl_notify("IFNET", ifp->if_xname, 1442 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1443 } 1444 1445 /* 1446 * Handle interface watchdog timer routines. Called 1447 * from softclock, we decrement timers (if set) and 1448 * call the appropriate interface routine on expiration. 1449 */ 1450 static void 1451 if_slowtimo(void *arg) 1452 { 1453 struct ifnet *ifp; 1454 1455 crit_enter(); 1456 1457 TAILQ_FOREACH(ifp, &ifnet, if_link) { 1458 if (if_stats_compat) { 1459 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1460 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1461 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1462 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1463 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1464 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1465 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1466 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1467 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1468 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1469 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1470 } 1471 1472 if (ifp->if_timer == 0 || --ifp->if_timer) 1473 continue; 1474 if (ifp->if_watchdog) { 1475 if (ifnet_tryserialize_all(ifp)) { 1476 (*ifp->if_watchdog)(ifp); 1477 ifnet_deserialize_all(ifp); 1478 } else { 1479 /* try again next timeout */ 1480 ++ifp->if_timer; 1481 } 1482 } 1483 } 1484 1485 crit_exit(); 1486 1487 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1488 } 1489 1490 /* 1491 * Map interface name to 1492 * interface structure pointer. 1493 */ 1494 struct ifnet * 1495 ifunit(const char *name) 1496 { 1497 struct ifnet *ifp; 1498 1499 /* 1500 * Search all the interfaces for this name/number 1501 */ 1502 1503 TAILQ_FOREACH(ifp, &ifnet, if_link) { 1504 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1505 break; 1506 } 1507 return (ifp); 1508 } 1509 1510 1511 /* 1512 * Map interface name in a sockaddr_dl to 1513 * interface structure pointer. 1514 */ 1515 struct ifnet * 1516 if_withname(struct sockaddr *sa) 1517 { 1518 char ifname[IFNAMSIZ+1]; 1519 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa; 1520 1521 if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) || 1522 (sdl->sdl_nlen > IFNAMSIZ) ) 1523 return NULL; 1524 1525 /* 1526 * ifunit wants a null-terminated name. It may not be null-terminated 1527 * in the sockaddr. We don't want to change the caller's sockaddr, 1528 * and there might not be room to put the trailing null anyway, so we 1529 * make a local copy that we know we can null terminate safely. 1530 */ 1531 1532 bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen); 1533 ifname[sdl->sdl_nlen] = '\0'; 1534 return ifunit(ifname); 1535 } 1536 1537 1538 /* 1539 * Interface ioctls. 1540 */ 1541 int 1542 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1543 { 1544 struct ifnet *ifp; 1545 struct ifreq *ifr; 1546 struct ifstat *ifs; 1547 int error; 1548 short oif_flags; 1549 int new_flags; 1550 #ifdef COMPAT_43 1551 int ocmd; 1552 #endif 1553 size_t namelen, onamelen; 1554 char new_name[IFNAMSIZ]; 1555 struct ifaddr *ifa; 1556 struct sockaddr_dl *sdl; 1557 1558 switch (cmd) { 1559 case SIOCGIFCONF: 1560 case OSIOCGIFCONF: 1561 return (ifconf(cmd, data, cred)); 1562 default: 1563 break; 1564 } 1565 1566 ifr = (struct ifreq *)data; 1567 1568 switch (cmd) { 1569 case SIOCIFCREATE: 1570 case SIOCIFCREATE2: 1571 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1572 return (error); 1573 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1574 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1575 case SIOCIFDESTROY: 1576 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1577 return (error); 1578 return (if_clone_destroy(ifr->ifr_name)); 1579 case SIOCIFGCLONERS: 1580 return (if_clone_list((struct if_clonereq *)data)); 1581 default: 1582 break; 1583 } 1584 1585 /* 1586 * Nominal ioctl through interface, lookup the ifp and obtain a 1587 * lock to serialize the ifconfig ioctl operation. 1588 */ 1589 ifp = ifunit(ifr->ifr_name); 1590 if (ifp == NULL) 1591 return (ENXIO); 1592 error = 0; 1593 mtx_lock(&ifp->if_ioctl_mtx); 1594 1595 switch (cmd) { 1596 case SIOCGIFINDEX: 1597 ifr->ifr_index = ifp->if_index; 1598 break; 1599 1600 case SIOCGIFFLAGS: 1601 ifr->ifr_flags = ifp->if_flags; 1602 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1603 break; 1604 1605 case SIOCGIFCAP: 1606 ifr->ifr_reqcap = ifp->if_capabilities; 1607 ifr->ifr_curcap = ifp->if_capenable; 1608 break; 1609 1610 case SIOCGIFMETRIC: 1611 ifr->ifr_metric = ifp->if_metric; 1612 break; 1613 1614 case SIOCGIFMTU: 1615 ifr->ifr_mtu = ifp->if_mtu; 1616 break; 1617 1618 case SIOCGIFTSOLEN: 1619 ifr->ifr_tsolen = ifp->if_tsolen; 1620 break; 1621 1622 case SIOCGIFDATA: 1623 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1624 sizeof(ifp->if_data)); 1625 break; 1626 1627 case SIOCGIFPHYS: 1628 ifr->ifr_phys = ifp->if_physical; 1629 break; 1630 1631 case SIOCGIFPOLLCPU: 1632 ifr->ifr_pollcpu = -1; 1633 break; 1634 1635 case SIOCSIFPOLLCPU: 1636 break; 1637 1638 case SIOCSIFFLAGS: 1639 error = priv_check_cred(cred, PRIV_ROOT, 0); 1640 if (error) 1641 break; 1642 new_flags = (ifr->ifr_flags & 0xffff) | 1643 (ifr->ifr_flagshigh << 16); 1644 if (ifp->if_flags & IFF_SMART) { 1645 /* Smart drivers twiddle their own routes */ 1646 } else if (ifp->if_flags & IFF_UP && 1647 (new_flags & IFF_UP) == 0) { 1648 crit_enter(); 1649 if_down(ifp); 1650 crit_exit(); 1651 } else if (new_flags & IFF_UP && 1652 (ifp->if_flags & IFF_UP) == 0) { 1653 crit_enter(); 1654 if_up(ifp); 1655 crit_exit(); 1656 } 1657 1658 #ifdef IFPOLL_ENABLE 1659 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1660 if (new_flags & IFF_NPOLLING) 1661 ifpoll_register(ifp); 1662 else 1663 ifpoll_deregister(ifp); 1664 } 1665 #endif 1666 1667 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1668 (new_flags &~ IFF_CANTCHANGE); 1669 if (new_flags & IFF_PPROMISC) { 1670 /* Permanently promiscuous mode requested */ 1671 ifp->if_flags |= IFF_PROMISC; 1672 } else if (ifp->if_pcount == 0) { 1673 ifp->if_flags &= ~IFF_PROMISC; 1674 } 1675 if (ifp->if_ioctl) { 1676 ifnet_serialize_all(ifp); 1677 ifp->if_ioctl(ifp, cmd, data, cred); 1678 ifnet_deserialize_all(ifp); 1679 } 1680 getmicrotime(&ifp->if_lastchange); 1681 break; 1682 1683 case SIOCSIFCAP: 1684 error = priv_check_cred(cred, PRIV_ROOT, 0); 1685 if (error) 1686 break; 1687 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1688 error = EINVAL; 1689 break; 1690 } 1691 ifnet_serialize_all(ifp); 1692 ifp->if_ioctl(ifp, cmd, data, cred); 1693 ifnet_deserialize_all(ifp); 1694 break; 1695 1696 case SIOCSIFNAME: 1697 error = priv_check_cred(cred, PRIV_ROOT, 0); 1698 if (error) 1699 break; 1700 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1701 if (error) 1702 break; 1703 if (new_name[0] == '\0') { 1704 error = EINVAL; 1705 break; 1706 } 1707 if (ifunit(new_name) != NULL) { 1708 error = EEXIST; 1709 break; 1710 } 1711 1712 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1713 1714 /* Announce the departure of the interface. */ 1715 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1716 1717 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1718 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1719 /* XXX IFA_LOCK(ifa); */ 1720 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1721 namelen = strlen(new_name); 1722 onamelen = sdl->sdl_nlen; 1723 /* 1724 * Move the address if needed. This is safe because we 1725 * allocate space for a name of length IFNAMSIZ when we 1726 * create this in if_attach(). 1727 */ 1728 if (namelen != onamelen) { 1729 bcopy(sdl->sdl_data + onamelen, 1730 sdl->sdl_data + namelen, sdl->sdl_alen); 1731 } 1732 bcopy(new_name, sdl->sdl_data, namelen); 1733 sdl->sdl_nlen = namelen; 1734 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 1735 bzero(sdl->sdl_data, onamelen); 1736 while (namelen != 0) 1737 sdl->sdl_data[--namelen] = 0xff; 1738 /* XXX IFA_UNLOCK(ifa) */ 1739 1740 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 1741 1742 /* Announce the return of the interface. */ 1743 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 1744 break; 1745 1746 case SIOCSIFMETRIC: 1747 error = priv_check_cred(cred, PRIV_ROOT, 0); 1748 if (error) 1749 break; 1750 ifp->if_metric = ifr->ifr_metric; 1751 getmicrotime(&ifp->if_lastchange); 1752 break; 1753 1754 case SIOCSIFPHYS: 1755 error = priv_check_cred(cred, PRIV_ROOT, 0); 1756 if (error) 1757 break; 1758 if (ifp->if_ioctl == NULL) { 1759 error = EOPNOTSUPP; 1760 break; 1761 } 1762 ifnet_serialize_all(ifp); 1763 error = ifp->if_ioctl(ifp, cmd, data, cred); 1764 ifnet_deserialize_all(ifp); 1765 if (error == 0) 1766 getmicrotime(&ifp->if_lastchange); 1767 break; 1768 1769 case SIOCSIFMTU: 1770 { 1771 u_long oldmtu = ifp->if_mtu; 1772 1773 error = priv_check_cred(cred, PRIV_ROOT, 0); 1774 if (error) 1775 break; 1776 if (ifp->if_ioctl == NULL) { 1777 error = EOPNOTSUPP; 1778 break; 1779 } 1780 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 1781 error = EINVAL; 1782 break; 1783 } 1784 ifnet_serialize_all(ifp); 1785 error = ifp->if_ioctl(ifp, cmd, data, cred); 1786 ifnet_deserialize_all(ifp); 1787 if (error == 0) { 1788 getmicrotime(&ifp->if_lastchange); 1789 rt_ifmsg(ifp); 1790 } 1791 /* 1792 * If the link MTU changed, do network layer specific procedure. 1793 */ 1794 if (ifp->if_mtu != oldmtu) { 1795 #ifdef INET6 1796 nd6_setmtu(ifp); 1797 #endif 1798 } 1799 break; 1800 } 1801 1802 case SIOCSIFTSOLEN: 1803 error = priv_check_cred(cred, PRIV_ROOT, 0); 1804 if (error) 1805 break; 1806 1807 /* XXX need driver supplied upper limit */ 1808 if (ifr->ifr_tsolen <= 0) { 1809 error = EINVAL; 1810 break; 1811 } 1812 ifp->if_tsolen = ifr->ifr_tsolen; 1813 break; 1814 1815 case SIOCADDMULTI: 1816 case SIOCDELMULTI: 1817 error = priv_check_cred(cred, PRIV_ROOT, 0); 1818 if (error) 1819 break; 1820 1821 /* Don't allow group membership on non-multicast interfaces. */ 1822 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 1823 error = EOPNOTSUPP; 1824 break; 1825 } 1826 1827 /* Don't let users screw up protocols' entries. */ 1828 if (ifr->ifr_addr.sa_family != AF_LINK) { 1829 error = EINVAL; 1830 break; 1831 } 1832 1833 if (cmd == SIOCADDMULTI) { 1834 struct ifmultiaddr *ifma; 1835 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 1836 } else { 1837 error = if_delmulti(ifp, &ifr->ifr_addr); 1838 } 1839 if (error == 0) 1840 getmicrotime(&ifp->if_lastchange); 1841 break; 1842 1843 case SIOCSIFPHYADDR: 1844 case SIOCDIFPHYADDR: 1845 #ifdef INET6 1846 case SIOCSIFPHYADDR_IN6: 1847 #endif 1848 case SIOCSLIFPHYADDR: 1849 case SIOCSIFMEDIA: 1850 case SIOCSIFGENERIC: 1851 error = priv_check_cred(cred, PRIV_ROOT, 0); 1852 if (error) 1853 break; 1854 if (ifp->if_ioctl == 0) { 1855 error = EOPNOTSUPP; 1856 break; 1857 } 1858 ifnet_serialize_all(ifp); 1859 error = ifp->if_ioctl(ifp, cmd, data, cred); 1860 ifnet_deserialize_all(ifp); 1861 if (error == 0) 1862 getmicrotime(&ifp->if_lastchange); 1863 break; 1864 1865 case SIOCGIFSTATUS: 1866 ifs = (struct ifstat *)data; 1867 ifs->ascii[0] = '\0'; 1868 /* fall through */ 1869 case SIOCGIFPSRCADDR: 1870 case SIOCGIFPDSTADDR: 1871 case SIOCGLIFPHYADDR: 1872 case SIOCGIFMEDIA: 1873 case SIOCGIFGENERIC: 1874 if (ifp->if_ioctl == NULL) { 1875 error = EOPNOTSUPP; 1876 break; 1877 } 1878 ifnet_serialize_all(ifp); 1879 error = ifp->if_ioctl(ifp, cmd, data, cred); 1880 ifnet_deserialize_all(ifp); 1881 break; 1882 1883 case SIOCSIFLLADDR: 1884 error = priv_check_cred(cred, PRIV_ROOT, 0); 1885 if (error) 1886 break; 1887 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 1888 ifr->ifr_addr.sa_len); 1889 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 1890 break; 1891 1892 default: 1893 oif_flags = ifp->if_flags; 1894 if (so->so_proto == 0) { 1895 error = EOPNOTSUPP; 1896 break; 1897 } 1898 #ifndef COMPAT_43 1899 error = so_pru_control_direct(so, cmd, data, ifp); 1900 #else 1901 ocmd = cmd; 1902 1903 switch (cmd) { 1904 case SIOCSIFDSTADDR: 1905 case SIOCSIFADDR: 1906 case SIOCSIFBRDADDR: 1907 case SIOCSIFNETMASK: 1908 #if BYTE_ORDER != BIG_ENDIAN 1909 if (ifr->ifr_addr.sa_family == 0 && 1910 ifr->ifr_addr.sa_len < 16) { 1911 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; 1912 ifr->ifr_addr.sa_len = 16; 1913 } 1914 #else 1915 if (ifr->ifr_addr.sa_len == 0) 1916 ifr->ifr_addr.sa_len = 16; 1917 #endif 1918 break; 1919 case OSIOCGIFADDR: 1920 cmd = SIOCGIFADDR; 1921 break; 1922 case OSIOCGIFDSTADDR: 1923 cmd = SIOCGIFDSTADDR; 1924 break; 1925 case OSIOCGIFBRDADDR: 1926 cmd = SIOCGIFBRDADDR; 1927 break; 1928 case OSIOCGIFNETMASK: 1929 cmd = SIOCGIFNETMASK; 1930 break; 1931 default: 1932 break; 1933 } 1934 1935 error = so_pru_control_direct(so, cmd, data, ifp); 1936 1937 switch (ocmd) { 1938 case OSIOCGIFADDR: 1939 case OSIOCGIFDSTADDR: 1940 case OSIOCGIFBRDADDR: 1941 case OSIOCGIFNETMASK: 1942 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; 1943 break; 1944 } 1945 #endif /* COMPAT_43 */ 1946 1947 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 1948 #ifdef INET6 1949 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 1950 if (ifp->if_flags & IFF_UP) { 1951 crit_enter(); 1952 in6_if_up(ifp); 1953 crit_exit(); 1954 } 1955 #endif 1956 } 1957 break; 1958 } 1959 1960 mtx_unlock(&ifp->if_ioctl_mtx); 1961 return (error); 1962 } 1963 1964 /* 1965 * Set/clear promiscuous mode on interface ifp based on the truth value 1966 * of pswitch. The calls are reference counted so that only the first 1967 * "on" request actually has an effect, as does the final "off" request. 1968 * Results are undefined if the "off" and "on" requests are not matched. 1969 */ 1970 int 1971 ifpromisc(struct ifnet *ifp, int pswitch) 1972 { 1973 struct ifreq ifr; 1974 int error; 1975 int oldflags; 1976 1977 oldflags = ifp->if_flags; 1978 if (ifp->if_flags & IFF_PPROMISC) { 1979 /* Do nothing if device is in permanently promiscuous mode */ 1980 ifp->if_pcount += pswitch ? 1 : -1; 1981 return (0); 1982 } 1983 if (pswitch) { 1984 /* 1985 * If the device is not configured up, we cannot put it in 1986 * promiscuous mode. 1987 */ 1988 if ((ifp->if_flags & IFF_UP) == 0) 1989 return (ENETDOWN); 1990 if (ifp->if_pcount++ != 0) 1991 return (0); 1992 ifp->if_flags |= IFF_PROMISC; 1993 log(LOG_INFO, "%s: promiscuous mode enabled\n", 1994 ifp->if_xname); 1995 } else { 1996 if (--ifp->if_pcount > 0) 1997 return (0); 1998 ifp->if_flags &= ~IFF_PROMISC; 1999 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2000 ifp->if_xname); 2001 } 2002 ifr.ifr_flags = ifp->if_flags; 2003 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2004 ifnet_serialize_all(ifp); 2005 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2006 ifnet_deserialize_all(ifp); 2007 if (error == 0) 2008 rt_ifmsg(ifp); 2009 else 2010 ifp->if_flags = oldflags; 2011 return error; 2012 } 2013 2014 /* 2015 * Return interface configuration 2016 * of system. List may be used 2017 * in later ioctl's (above) to get 2018 * other information. 2019 */ 2020 static int 2021 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2022 { 2023 struct ifconf *ifc = (struct ifconf *)data; 2024 struct ifnet *ifp; 2025 struct sockaddr *sa; 2026 struct ifreq ifr, *ifrp; 2027 int space = ifc->ifc_len, error = 0; 2028 2029 ifrp = ifc->ifc_req; 2030 TAILQ_FOREACH(ifp, &ifnet, if_link) { 2031 struct ifaddr_container *ifac; 2032 int addrs; 2033 2034 if (space <= sizeof ifr) 2035 break; 2036 2037 /* 2038 * Zero the stack declared structure first to prevent 2039 * memory disclosure. 2040 */ 2041 bzero(&ifr, sizeof(ifr)); 2042 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2043 >= sizeof(ifr.ifr_name)) { 2044 error = ENAMETOOLONG; 2045 break; 2046 } 2047 2048 addrs = 0; 2049 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2050 struct ifaddr *ifa = ifac->ifa; 2051 2052 if (space <= sizeof ifr) 2053 break; 2054 sa = ifa->ifa_addr; 2055 if (cred->cr_prison && 2056 prison_if(cred, sa)) 2057 continue; 2058 addrs++; 2059 #ifdef COMPAT_43 2060 if (cmd == OSIOCGIFCONF) { 2061 struct osockaddr *osa = 2062 (struct osockaddr *)&ifr.ifr_addr; 2063 ifr.ifr_addr = *sa; 2064 osa->sa_family = sa->sa_family; 2065 error = copyout(&ifr, ifrp, sizeof ifr); 2066 ifrp++; 2067 } else 2068 #endif 2069 if (sa->sa_len <= sizeof(*sa)) { 2070 ifr.ifr_addr = *sa; 2071 error = copyout(&ifr, ifrp, sizeof ifr); 2072 ifrp++; 2073 } else { 2074 if (space < (sizeof ifr) + sa->sa_len - 2075 sizeof(*sa)) 2076 break; 2077 space -= sa->sa_len - sizeof(*sa); 2078 error = copyout(&ifr, ifrp, 2079 sizeof ifr.ifr_name); 2080 if (error == 0) 2081 error = copyout(sa, &ifrp->ifr_addr, 2082 sa->sa_len); 2083 ifrp = (struct ifreq *) 2084 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2085 } 2086 if (error) 2087 break; 2088 space -= sizeof ifr; 2089 } 2090 if (error) 2091 break; 2092 if (!addrs) { 2093 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2094 error = copyout(&ifr, ifrp, sizeof ifr); 2095 if (error) 2096 break; 2097 space -= sizeof ifr; 2098 ifrp++; 2099 } 2100 } 2101 ifc->ifc_len -= space; 2102 return (error); 2103 } 2104 2105 /* 2106 * Just like if_promisc(), but for all-multicast-reception mode. 2107 */ 2108 int 2109 if_allmulti(struct ifnet *ifp, int onswitch) 2110 { 2111 int error = 0; 2112 struct ifreq ifr; 2113 2114 crit_enter(); 2115 2116 if (onswitch) { 2117 if (ifp->if_amcount++ == 0) { 2118 ifp->if_flags |= IFF_ALLMULTI; 2119 ifr.ifr_flags = ifp->if_flags; 2120 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2121 ifnet_serialize_all(ifp); 2122 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2123 NULL); 2124 ifnet_deserialize_all(ifp); 2125 } 2126 } else { 2127 if (ifp->if_amcount > 1) { 2128 ifp->if_amcount--; 2129 } else { 2130 ifp->if_amcount = 0; 2131 ifp->if_flags &= ~IFF_ALLMULTI; 2132 ifr.ifr_flags = ifp->if_flags; 2133 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2134 ifnet_serialize_all(ifp); 2135 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2136 NULL); 2137 ifnet_deserialize_all(ifp); 2138 } 2139 } 2140 2141 crit_exit(); 2142 2143 if (error == 0) 2144 rt_ifmsg(ifp); 2145 return error; 2146 } 2147 2148 /* 2149 * Add a multicast listenership to the interface in question. 2150 * The link layer provides a routine which converts 2151 */ 2152 int 2153 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2154 struct ifmultiaddr **retifma) 2155 { 2156 struct sockaddr *llsa, *dupsa; 2157 int error; 2158 struct ifmultiaddr *ifma; 2159 2160 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2161 2162 /* 2163 * If the matching multicast address already exists 2164 * then don't add a new one, just add a reference 2165 */ 2166 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2167 if (sa_equal(sa, ifma->ifma_addr)) { 2168 ifma->ifma_refcount++; 2169 if (retifma) 2170 *retifma = ifma; 2171 return 0; 2172 } 2173 } 2174 2175 /* 2176 * Give the link layer a chance to accept/reject it, and also 2177 * find out which AF_LINK address this maps to, if it isn't one 2178 * already. 2179 */ 2180 if (ifp->if_resolvemulti) { 2181 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2182 if (error) 2183 return error; 2184 } else { 2185 llsa = NULL; 2186 } 2187 2188 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK); 2189 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_WAITOK); 2190 bcopy(sa, dupsa, sa->sa_len); 2191 2192 ifma->ifma_addr = dupsa; 2193 ifma->ifma_lladdr = llsa; 2194 ifma->ifma_ifp = ifp; 2195 ifma->ifma_refcount = 1; 2196 ifma->ifma_protospec = NULL; 2197 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2198 2199 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2200 if (retifma) 2201 *retifma = ifma; 2202 2203 if (llsa != NULL) { 2204 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2205 if (sa_equal(ifma->ifma_addr, llsa)) 2206 break; 2207 } 2208 if (ifma) { 2209 ifma->ifma_refcount++; 2210 } else { 2211 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK); 2212 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_WAITOK); 2213 bcopy(llsa, dupsa, llsa->sa_len); 2214 ifma->ifma_addr = dupsa; 2215 ifma->ifma_ifp = ifp; 2216 ifma->ifma_refcount = 1; 2217 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2218 } 2219 } 2220 /* 2221 * We are certain we have added something, so call down to the 2222 * interface to let them know about it. 2223 */ 2224 if (ifp->if_ioctl) 2225 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2226 2227 return 0; 2228 } 2229 2230 int 2231 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2232 struct ifmultiaddr **retifma) 2233 { 2234 int error; 2235 2236 ifnet_serialize_all(ifp); 2237 error = if_addmulti_serialized(ifp, sa, retifma); 2238 ifnet_deserialize_all(ifp); 2239 2240 return error; 2241 } 2242 2243 /* 2244 * Remove a reference to a multicast address on this interface. Yell 2245 * if the request does not match an existing membership. 2246 */ 2247 static int 2248 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2249 { 2250 struct ifmultiaddr *ifma; 2251 2252 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2253 2254 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2255 if (sa_equal(sa, ifma->ifma_addr)) 2256 break; 2257 if (ifma == NULL) 2258 return ENOENT; 2259 2260 if (ifma->ifma_refcount > 1) { 2261 ifma->ifma_refcount--; 2262 return 0; 2263 } 2264 2265 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2266 sa = ifma->ifma_lladdr; 2267 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2268 /* 2269 * Make sure the interface driver is notified 2270 * in the case of a link layer mcast group being left. 2271 */ 2272 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2273 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2274 kfree(ifma->ifma_addr, M_IFMADDR); 2275 kfree(ifma, M_IFMADDR); 2276 if (sa == NULL) 2277 return 0; 2278 2279 /* 2280 * Now look for the link-layer address which corresponds to 2281 * this network address. It had been squirreled away in 2282 * ifma->ifma_lladdr for this purpose (so we don't have 2283 * to call ifp->if_resolvemulti() again), and we saved that 2284 * value in sa above. If some nasty deleted the 2285 * link-layer address out from underneath us, we can deal because 2286 * the address we stored was is not the same as the one which was 2287 * in the record for the link-layer address. (So we don't complain 2288 * in that case.) 2289 */ 2290 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2291 if (sa_equal(sa, ifma->ifma_addr)) 2292 break; 2293 if (ifma == NULL) 2294 return 0; 2295 2296 if (ifma->ifma_refcount > 1) { 2297 ifma->ifma_refcount--; 2298 return 0; 2299 } 2300 2301 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2302 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2303 kfree(ifma->ifma_addr, M_IFMADDR); 2304 kfree(sa, M_IFMADDR); 2305 kfree(ifma, M_IFMADDR); 2306 2307 return 0; 2308 } 2309 2310 int 2311 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2312 { 2313 int error; 2314 2315 ifnet_serialize_all(ifp); 2316 error = if_delmulti_serialized(ifp, sa); 2317 ifnet_deserialize_all(ifp); 2318 2319 return error; 2320 } 2321 2322 /* 2323 * Delete all multicast group membership for an interface. 2324 * Should be used to quickly flush all multicast filters. 2325 */ 2326 void 2327 if_delallmulti_serialized(struct ifnet *ifp) 2328 { 2329 struct ifmultiaddr *ifma, mark; 2330 struct sockaddr sa; 2331 2332 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2333 2334 bzero(&sa, sizeof(sa)); 2335 sa.sa_family = AF_UNSPEC; 2336 sa.sa_len = sizeof(sa); 2337 2338 bzero(&mark, sizeof(mark)); 2339 mark.ifma_addr = &sa; 2340 2341 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2342 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2343 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2344 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2345 ifma_link); 2346 2347 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2348 continue; 2349 2350 if_delmulti_serialized(ifp, ifma->ifma_addr); 2351 } 2352 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2353 } 2354 2355 2356 /* 2357 * Set the link layer address on an interface. 2358 * 2359 * At this time we only support certain types of interfaces, 2360 * and we don't allow the length of the address to change. 2361 */ 2362 int 2363 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2364 { 2365 struct sockaddr_dl *sdl; 2366 struct ifreq ifr; 2367 2368 sdl = IF_LLSOCKADDR(ifp); 2369 if (sdl == NULL) 2370 return (EINVAL); 2371 if (len != sdl->sdl_alen) /* don't allow length to change */ 2372 return (EINVAL); 2373 switch (ifp->if_type) { 2374 case IFT_ETHER: /* these types use struct arpcom */ 2375 case IFT_XETHER: 2376 case IFT_L2VLAN: 2377 case IFT_IEEE8023ADLAG: 2378 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2379 bcopy(lladdr, LLADDR(sdl), len); 2380 break; 2381 default: 2382 return (ENODEV); 2383 } 2384 /* 2385 * If the interface is already up, we need 2386 * to re-init it in order to reprogram its 2387 * address filter. 2388 */ 2389 ifnet_serialize_all(ifp); 2390 if ((ifp->if_flags & IFF_UP) != 0) { 2391 #ifdef INET 2392 struct ifaddr_container *ifac; 2393 #endif 2394 2395 ifp->if_flags &= ~IFF_UP; 2396 ifr.ifr_flags = ifp->if_flags; 2397 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2398 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2399 NULL); 2400 ifp->if_flags |= IFF_UP; 2401 ifr.ifr_flags = ifp->if_flags; 2402 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2403 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2404 NULL); 2405 #ifdef INET 2406 /* 2407 * Also send gratuitous ARPs to notify other nodes about 2408 * the address change. 2409 */ 2410 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2411 struct ifaddr *ifa = ifac->ifa; 2412 2413 if (ifa->ifa_addr != NULL && 2414 ifa->ifa_addr->sa_family == AF_INET) 2415 arp_gratuitous(ifp, ifa); 2416 } 2417 #endif 2418 } 2419 ifnet_deserialize_all(ifp); 2420 return (0); 2421 } 2422 2423 struct ifmultiaddr * 2424 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2425 { 2426 struct ifmultiaddr *ifma; 2427 2428 /* TODO: need ifnet_serialize_main */ 2429 ifnet_serialize_all(ifp); 2430 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2431 if (sa_equal(ifma->ifma_addr, sa)) 2432 break; 2433 ifnet_deserialize_all(ifp); 2434 2435 return ifma; 2436 } 2437 2438 /* 2439 * This function locates the first real ethernet MAC from a network 2440 * card and loads it into node, returning 0 on success or ENOENT if 2441 * no suitable interfaces were found. It is used by the uuid code to 2442 * generate a unique 6-byte number. 2443 */ 2444 int 2445 if_getanyethermac(uint16_t *node, int minlen) 2446 { 2447 struct ifnet *ifp; 2448 struct sockaddr_dl *sdl; 2449 2450 TAILQ_FOREACH(ifp, &ifnet, if_link) { 2451 if (ifp->if_type != IFT_ETHER) 2452 continue; 2453 sdl = IF_LLSOCKADDR(ifp); 2454 if (sdl->sdl_alen < minlen) 2455 continue; 2456 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2457 minlen); 2458 return(0); 2459 } 2460 return (ENOENT); 2461 } 2462 2463 /* 2464 * The name argument must be a pointer to storage which will last as 2465 * long as the interface does. For physical devices, the result of 2466 * device_get_name(dev) is a good choice and for pseudo-devices a 2467 * static string works well. 2468 */ 2469 void 2470 if_initname(struct ifnet *ifp, const char *name, int unit) 2471 { 2472 ifp->if_dname = name; 2473 ifp->if_dunit = unit; 2474 if (unit != IF_DUNIT_NONE) 2475 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2476 else 2477 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2478 } 2479 2480 int 2481 if_printf(struct ifnet *ifp, const char *fmt, ...) 2482 { 2483 __va_list ap; 2484 int retval; 2485 2486 retval = kprintf("%s: ", ifp->if_xname); 2487 __va_start(ap, fmt); 2488 retval += kvprintf(fmt, ap); 2489 __va_end(ap); 2490 return (retval); 2491 } 2492 2493 struct ifnet * 2494 if_alloc(uint8_t type) 2495 { 2496 struct ifnet *ifp; 2497 size_t size; 2498 2499 /* 2500 * XXX temporary hack until arpcom is setup in if_l2com 2501 */ 2502 if (type == IFT_ETHER) 2503 size = sizeof(struct arpcom); 2504 else 2505 size = sizeof(struct ifnet); 2506 2507 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2508 2509 ifp->if_type = type; 2510 2511 if (if_com_alloc[type] != NULL) { 2512 ifp->if_l2com = if_com_alloc[type](type, ifp); 2513 if (ifp->if_l2com == NULL) { 2514 kfree(ifp, M_IFNET); 2515 return (NULL); 2516 } 2517 } 2518 return (ifp); 2519 } 2520 2521 void 2522 if_free(struct ifnet *ifp) 2523 { 2524 kfree(ifp, M_IFNET); 2525 } 2526 2527 void 2528 ifq_set_classic(struct ifaltq *ifq) 2529 { 2530 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2531 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2532 } 2533 2534 void 2535 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2536 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2537 { 2538 int q; 2539 2540 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2541 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2542 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2543 KASSERT(request != NULL, ("request is not specified")); 2544 2545 ifq->altq_mapsubq = mapsubq; 2546 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2547 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2548 2549 ifsq->ifsq_enqueue = enqueue; 2550 ifsq->ifsq_dequeue = dequeue; 2551 ifsq->ifsq_request = request; 2552 } 2553 } 2554 2555 static void 2556 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2557 { 2558 m->m_nextpkt = NULL; 2559 if (ifsq->ifsq_norm_tail == NULL) 2560 ifsq->ifsq_norm_head = m; 2561 else 2562 ifsq->ifsq_norm_tail->m_nextpkt = m; 2563 ifsq->ifsq_norm_tail = m; 2564 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2565 } 2566 2567 static void 2568 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2569 { 2570 m->m_nextpkt = NULL; 2571 if (ifsq->ifsq_prio_tail == NULL) 2572 ifsq->ifsq_prio_head = m; 2573 else 2574 ifsq->ifsq_prio_tail->m_nextpkt = m; 2575 ifsq->ifsq_prio_tail = m; 2576 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2577 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2578 } 2579 2580 static struct mbuf * 2581 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2582 { 2583 struct mbuf *m; 2584 2585 m = ifsq->ifsq_norm_head; 2586 if (m != NULL) { 2587 if ((ifsq->ifsq_norm_head = m->m_nextpkt) == NULL) 2588 ifsq->ifsq_norm_tail = NULL; 2589 m->m_nextpkt = NULL; 2590 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2591 } 2592 return m; 2593 } 2594 2595 static struct mbuf * 2596 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2597 { 2598 struct mbuf *m; 2599 2600 m = ifsq->ifsq_prio_head; 2601 if (m != NULL) { 2602 if ((ifsq->ifsq_prio_head = m->m_nextpkt) == NULL) 2603 ifsq->ifsq_prio_tail = NULL; 2604 m->m_nextpkt = NULL; 2605 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2606 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2607 } 2608 return m; 2609 } 2610 2611 int 2612 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2613 struct altq_pktattr *pa __unused) 2614 { 2615 M_ASSERTPKTHDR(m); 2616 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2617 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2618 if ((m->m_flags & M_PRIO) && 2619 ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen / 2) && 2620 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt / 2)) { 2621 struct mbuf *m_drop; 2622 2623 /* 2624 * Perform drop-head on normal queue 2625 */ 2626 m_drop = ifsq_norm_dequeue(ifsq); 2627 if (m_drop != NULL) { 2628 m_freem(m_drop); 2629 ifsq_prio_enqueue(ifsq, m); 2630 return 0; 2631 } 2632 /* XXX nothing could be dropped? */ 2633 } 2634 m_freem(m); 2635 return ENOBUFS; 2636 } else { 2637 if (m->m_flags & M_PRIO) 2638 ifsq_prio_enqueue(ifsq, m); 2639 else 2640 ifsq_norm_enqueue(ifsq, m); 2641 return 0; 2642 } 2643 } 2644 2645 struct mbuf * 2646 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2647 { 2648 struct mbuf *m; 2649 2650 switch (op) { 2651 case ALTDQ_POLL: 2652 m = ifsq->ifsq_prio_head; 2653 if (m == NULL) 2654 m = ifsq->ifsq_norm_head; 2655 break; 2656 2657 case ALTDQ_REMOVE: 2658 m = ifsq_prio_dequeue(ifsq); 2659 if (m == NULL) 2660 m = ifsq_norm_dequeue(ifsq); 2661 break; 2662 2663 default: 2664 panic("unsupported ALTQ dequeue op: %d", op); 2665 } 2666 return m; 2667 } 2668 2669 int 2670 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2671 { 2672 switch (req) { 2673 case ALTRQ_PURGE: 2674 for (;;) { 2675 struct mbuf *m; 2676 2677 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2678 if (m == NULL) 2679 break; 2680 m_freem(m); 2681 } 2682 break; 2683 2684 default: 2685 panic("unsupported ALTQ request: %d", req); 2686 } 2687 return 0; 2688 } 2689 2690 static void 2691 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2692 { 2693 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2694 int running = 0, need_sched; 2695 2696 /* 2697 * Try to do direct ifnet.if_start on the subqueue first, if there is 2698 * contention on the subqueue hardware serializer, ifnet.if_start on 2699 * the subqueue will be scheduled on the subqueue owner CPU. 2700 */ 2701 if (!ifsq_tryserialize_hw(ifsq)) { 2702 /* 2703 * Subqueue hardware serializer contention happened, 2704 * ifnet.if_start on the subqueue is scheduled on 2705 * the subqueue owner CPU, and we keep going. 2706 */ 2707 ifsq_ifstart_schedule(ifsq, 1); 2708 return; 2709 } 2710 2711 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2712 ifp->if_start(ifp, ifsq); 2713 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2714 running = 1; 2715 } 2716 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2717 2718 ifsq_deserialize_hw(ifsq); 2719 2720 if (need_sched) { 2721 /* 2722 * More data need to be transmitted, ifnet.if_start on the 2723 * subqueue is scheduled on the subqueue owner CPU, and we 2724 * keep going. 2725 * NOTE: ifnet.if_start subqueue interlock is not released. 2726 */ 2727 ifsq_ifstart_schedule(ifsq, force_sched); 2728 } 2729 } 2730 2731 /* 2732 * Subqeue packets staging mechanism: 2733 * 2734 * The packets enqueued into the subqueue are staged to a certain amount 2735 * before the ifnet.if_start on the subqueue is called. In this way, the 2736 * driver could avoid writing to hardware registers upon every packet, 2737 * instead, hardware registers could be written when certain amount of 2738 * packets are put onto hardware TX ring. The measurement on several modern 2739 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 2740 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 2741 * datagrams are transmitted at 1.48Mpps. The performance improvement by 2742 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 2743 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 2744 * 2745 * Subqueue packets staging is performed for two entry points into drivers' 2746 * transmission function: 2747 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 2748 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 2749 * 2750 * Subqueue packets staging will be stopped upon any of the following 2751 * conditions: 2752 * - If the count of packets enqueued on the current CPU is great than or 2753 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 2754 * - If the total length of packets enqueued on the current CPU is great 2755 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 2756 * cut from the hardware's MTU mainly bacause a full TCP segment's size 2757 * is usually less than hardware's MTU. 2758 * - ifsq_ifstart_schedule() is not pending on the current CPU and 2759 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 2760 * released. 2761 * - The if_start_rollup(), which is registered as low priority netisr 2762 * rollup function, is called; probably because no more work is pending 2763 * for netisr. 2764 * 2765 * NOTE: 2766 * Currently subqueue packet staging is only performed in netisr threads. 2767 */ 2768 int 2769 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 2770 { 2771 struct ifaltq *ifq = &ifp->if_snd; 2772 struct ifaltq_subque *ifsq; 2773 int error, start = 0, len, mcast = 0, avoid_start = 0; 2774 struct ifsubq_stage_head *head = NULL; 2775 struct ifsubq_stage *stage = NULL; 2776 struct globaldata *gd = mycpu; 2777 struct thread *td = gd->gd_curthread; 2778 2779 crit_enter_quick(td); 2780 2781 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 2782 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 2783 2784 len = m->m_pkthdr.len; 2785 if (m->m_flags & M_MCAST) 2786 mcast = 1; 2787 2788 if (td->td_type == TD_TYPE_NETISR) { 2789 head = &ifsubq_stage_heads[mycpuid]; 2790 stage = ifsq_get_stage(ifsq, mycpuid); 2791 2792 stage->stg_cnt++; 2793 stage->stg_len += len; 2794 if (stage->stg_cnt < ifsq_stage_cntmax && 2795 stage->stg_len < (ifp->if_mtu - max_protohdr)) 2796 avoid_start = 1; 2797 } 2798 2799 ALTQ_SQ_LOCK(ifsq); 2800 error = ifsq_enqueue_locked(ifsq, m, pa); 2801 if (error) { 2802 if (!ifsq_data_ready(ifsq)) { 2803 ALTQ_SQ_UNLOCK(ifsq); 2804 crit_exit_quick(td); 2805 return error; 2806 } 2807 avoid_start = 0; 2808 } 2809 if (!ifsq_is_started(ifsq)) { 2810 if (avoid_start) { 2811 ALTQ_SQ_UNLOCK(ifsq); 2812 2813 KKASSERT(!error); 2814 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 2815 ifsq_stage_insert(head, stage); 2816 2817 IFNET_STAT_INC(ifp, obytes, len); 2818 if (mcast) 2819 IFNET_STAT_INC(ifp, omcasts, 1); 2820 crit_exit_quick(td); 2821 return error; 2822 } 2823 2824 /* 2825 * Hold the subqueue interlock of ifnet.if_start 2826 */ 2827 ifsq_set_started(ifsq); 2828 start = 1; 2829 } 2830 ALTQ_SQ_UNLOCK(ifsq); 2831 2832 if (!error) { 2833 IFNET_STAT_INC(ifp, obytes, len); 2834 if (mcast) 2835 IFNET_STAT_INC(ifp, omcasts, 1); 2836 } 2837 2838 if (stage != NULL) { 2839 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 2840 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 2841 if (!avoid_start) { 2842 ifsq_stage_remove(head, stage); 2843 ifsq_ifstart_schedule(ifsq, 1); 2844 } 2845 crit_exit_quick(td); 2846 return error; 2847 } 2848 2849 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 2850 ifsq_stage_remove(head, stage); 2851 } else { 2852 stage->stg_cnt = 0; 2853 stage->stg_len = 0; 2854 } 2855 } 2856 2857 if (!start) { 2858 crit_exit_quick(td); 2859 return error; 2860 } 2861 2862 ifsq_ifstart_try(ifsq, 0); 2863 2864 crit_exit_quick(td); 2865 return error; 2866 } 2867 2868 void * 2869 ifa_create(int size, int flags) 2870 { 2871 struct ifaddr *ifa; 2872 int i; 2873 2874 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 2875 2876 ifa = kmalloc(size, M_IFADDR, flags | M_ZERO); 2877 if (ifa == NULL) 2878 return NULL; 2879 2880 ifa->ifa_containers = 2881 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 2882 M_IFADDR, M_WAITOK | M_ZERO); 2883 ifa->ifa_ncnt = ncpus; 2884 for (i = 0; i < ncpus; ++i) { 2885 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 2886 2887 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 2888 ifac->ifa = ifa; 2889 ifac->ifa_refcnt = 1; 2890 } 2891 #ifdef IFADDR_DEBUG 2892 kprintf("alloc ifa %p %d\n", ifa, size); 2893 #endif 2894 return ifa; 2895 } 2896 2897 void 2898 ifac_free(struct ifaddr_container *ifac, int cpu_id) 2899 { 2900 struct ifaddr *ifa = ifac->ifa; 2901 2902 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 2903 KKASSERT(ifac->ifa_refcnt == 0); 2904 KASSERT(ifac->ifa_listmask == 0, 2905 ("ifa is still on %#x lists", ifac->ifa_listmask)); 2906 2907 ifac->ifa_magic = IFA_CONTAINER_DEAD; 2908 2909 #ifdef IFADDR_DEBUG_VERBOSE 2910 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 2911 #endif 2912 2913 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 2914 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 2915 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 2916 #ifdef IFADDR_DEBUG 2917 kprintf("free ifa %p\n", ifa); 2918 #endif 2919 kfree(ifa->ifa_containers, M_IFADDR); 2920 kfree(ifa, M_IFADDR); 2921 } 2922 } 2923 2924 static void 2925 ifa_iflink_dispatch(netmsg_t nmsg) 2926 { 2927 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 2928 struct ifaddr *ifa = msg->ifa; 2929 struct ifnet *ifp = msg->ifp; 2930 int cpu = mycpuid; 2931 struct ifaddr_container *ifac; 2932 2933 crit_enter(); 2934 2935 ifac = &ifa->ifa_containers[cpu]; 2936 ASSERT_IFAC_VALID(ifac); 2937 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 2938 ("ifaddr is on if_addrheads")); 2939 2940 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 2941 if (msg->tail) 2942 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 2943 else 2944 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 2945 2946 crit_exit(); 2947 2948 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 2949 } 2950 2951 void 2952 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 2953 { 2954 struct netmsg_ifaddr msg; 2955 2956 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 2957 0, ifa_iflink_dispatch); 2958 msg.ifa = ifa; 2959 msg.ifp = ifp; 2960 msg.tail = tail; 2961 2962 ifa_domsg(&msg.base.lmsg, 0); 2963 } 2964 2965 static void 2966 ifa_ifunlink_dispatch(netmsg_t nmsg) 2967 { 2968 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 2969 struct ifaddr *ifa = msg->ifa; 2970 struct ifnet *ifp = msg->ifp; 2971 int cpu = mycpuid; 2972 struct ifaddr_container *ifac; 2973 2974 crit_enter(); 2975 2976 ifac = &ifa->ifa_containers[cpu]; 2977 ASSERT_IFAC_VALID(ifac); 2978 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 2979 ("ifaddr is not on if_addrhead")); 2980 2981 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 2982 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 2983 2984 crit_exit(); 2985 2986 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 2987 } 2988 2989 void 2990 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 2991 { 2992 struct netmsg_ifaddr msg; 2993 2994 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 2995 0, ifa_ifunlink_dispatch); 2996 msg.ifa = ifa; 2997 msg.ifp = ifp; 2998 2999 ifa_domsg(&msg.base.lmsg, 0); 3000 } 3001 3002 static void 3003 ifa_destroy_dispatch(netmsg_t nmsg) 3004 { 3005 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3006 3007 IFAFREE(msg->ifa); 3008 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3009 } 3010 3011 void 3012 ifa_destroy(struct ifaddr *ifa) 3013 { 3014 struct netmsg_ifaddr msg; 3015 3016 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3017 0, ifa_destroy_dispatch); 3018 msg.ifa = ifa; 3019 3020 ifa_domsg(&msg.base.lmsg, 0); 3021 } 3022 3023 struct lwkt_port * 3024 ifnet_portfn(int cpu) 3025 { 3026 return &ifnet_threads[cpu].td_msgport; 3027 } 3028 3029 void 3030 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu) 3031 { 3032 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus); 3033 3034 if (next_cpu < ncpus) 3035 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg); 3036 else 3037 lwkt_replymsg(lmsg, 0); 3038 } 3039 3040 int 3041 ifnet_domsg(struct lwkt_msg *lmsg, int cpu) 3042 { 3043 KKASSERT(cpu < ncpus); 3044 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0); 3045 } 3046 3047 void 3048 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu) 3049 { 3050 KKASSERT(cpu < ncpus); 3051 lwkt_sendmsg(ifnet_portfn(cpu), lmsg); 3052 } 3053 3054 /* 3055 * Generic netmsg service loop. Some protocols may roll their own but all 3056 * must do the basic command dispatch function call done here. 3057 */ 3058 static void 3059 ifnet_service_loop(void *arg __unused) 3060 { 3061 netmsg_t msg; 3062 3063 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 3064 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg")); 3065 msg->base.nm_dispatch(msg); 3066 } 3067 } 3068 3069 static void 3070 if_start_rollup(void) 3071 { 3072 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3073 struct ifsubq_stage *stage; 3074 3075 crit_enter(); 3076 3077 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3078 struct ifaltq_subque *ifsq = stage->stg_subq; 3079 int is_sched = 0; 3080 3081 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3082 is_sched = 1; 3083 ifsq_stage_remove(head, stage); 3084 3085 if (is_sched) { 3086 ifsq_ifstart_schedule(ifsq, 1); 3087 } else { 3088 int start = 0; 3089 3090 ALTQ_SQ_LOCK(ifsq); 3091 if (!ifsq_is_started(ifsq)) { 3092 /* 3093 * Hold the subqueue interlock of 3094 * ifnet.if_start 3095 */ 3096 ifsq_set_started(ifsq); 3097 start = 1; 3098 } 3099 ALTQ_SQ_UNLOCK(ifsq); 3100 3101 if (start) 3102 ifsq_ifstart_try(ifsq, 1); 3103 } 3104 KKASSERT((stage->stg_flags & 3105 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3106 } 3107 3108 crit_exit(); 3109 } 3110 3111 static void 3112 ifnetinit(void *dummy __unused) 3113 { 3114 int i; 3115 3116 for (i = 0; i < ncpus; ++i) { 3117 struct thread *thr = &ifnet_threads[i]; 3118 3119 lwkt_create(ifnet_service_loop, NULL, NULL, 3120 thr, TDF_NOSTART|TDF_FORCE_SPINPORT|TDF_FIXEDCPU, 3121 i, "ifnet %d", i); 3122 netmsg_service_port_init(&thr->td_msgport); 3123 lwkt_schedule(thr); 3124 } 3125 3126 for (i = 0; i < ncpus; ++i) 3127 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3128 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3129 } 3130 3131 void 3132 if_register_com_alloc(u_char type, 3133 if_com_alloc_t *a, if_com_free_t *f) 3134 { 3135 3136 KASSERT(if_com_alloc[type] == NULL, 3137 ("if_register_com_alloc: %d already registered", type)); 3138 KASSERT(if_com_free[type] == NULL, 3139 ("if_register_com_alloc: %d free already registered", type)); 3140 3141 if_com_alloc[type] = a; 3142 if_com_free[type] = f; 3143 } 3144 3145 void 3146 if_deregister_com_alloc(u_char type) 3147 { 3148 3149 KASSERT(if_com_alloc[type] != NULL, 3150 ("if_deregister_com_alloc: %d not registered", type)); 3151 KASSERT(if_com_free[type] != NULL, 3152 ("if_deregister_com_alloc: %d free not registered", type)); 3153 if_com_alloc[type] = NULL; 3154 if_com_free[type] = NULL; 3155 } 3156 3157 int 3158 if_ring_count2(int cnt, int cnt_max) 3159 { 3160 int shift = 0; 3161 3162 KASSERT(cnt_max >= 1 && powerof2(cnt_max), 3163 ("invalid ring count max %d", cnt_max)); 3164 3165 if (cnt <= 0) 3166 cnt = cnt_max; 3167 if (cnt > ncpus2) 3168 cnt = ncpus2; 3169 if (cnt > cnt_max) 3170 cnt = cnt_max; 3171 3172 while ((1 << (shift + 1)) <= cnt) 3173 ++shift; 3174 cnt = 1 << shift; 3175 3176 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max, 3177 ("calculate cnt %d, ncpus2 %d, cnt max %d", 3178 cnt, ncpus2, cnt_max)); 3179 return cnt; 3180 } 3181 3182 void 3183 ifq_set_maxlen(struct ifaltq *ifq, int len) 3184 { 3185 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3186 } 3187 3188 int 3189 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3190 { 3191 return ALTQ_SUBQ_INDEX_DEFAULT; 3192 } 3193 3194 int 3195 ifq_mapsubq_mask(struct ifaltq *ifq, int cpuid) 3196 { 3197 return (cpuid & ifq->altq_subq_mask); 3198 } 3199 3200 static void 3201 ifsq_watchdog(void *arg) 3202 { 3203 struct ifsubq_watchdog *wd = arg; 3204 struct ifnet *ifp; 3205 3206 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3207 goto done; 3208 3209 ifp = ifsq_get_ifp(wd->wd_subq); 3210 if (ifnet_tryserialize_all(ifp)) { 3211 wd->wd_watchdog(wd->wd_subq); 3212 ifnet_deserialize_all(ifp); 3213 } else { 3214 /* try again next timeout */ 3215 wd->wd_timer = 1; 3216 } 3217 done: 3218 ifsq_watchdog_reset(wd); 3219 } 3220 3221 static void 3222 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3223 { 3224 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3225 ifsq_get_cpuid(wd->wd_subq)); 3226 } 3227 3228 void 3229 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3230 ifsq_watchdog_t watchdog) 3231 { 3232 callout_init_mp(&wd->wd_callout); 3233 wd->wd_timer = 0; 3234 wd->wd_subq = ifsq; 3235 wd->wd_watchdog = watchdog; 3236 } 3237 3238 void 3239 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3240 { 3241 wd->wd_timer = 0; 3242 ifsq_watchdog_reset(wd); 3243 } 3244 3245 void 3246 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3247 { 3248 wd->wd_timer = 0; 3249 callout_stop(&wd->wd_callout); 3250 } 3251