1 /* $NetBSD: if_vlan.c,v 1.167 2021/12/24 04:50:40 yamaguchi Exp $ */ 2 3 /* 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright 1998 Massachusetts Institute of Technology 34 * 35 * Permission to use, copy, modify, and distribute this software and 36 * its documentation for any purpose and without fee is hereby 37 * granted, provided that both the above copyright notice and this 38 * permission notice appear in all copies, that both the above 39 * copyright notice and this permission notice appear in all 40 * supporting documentation, and that the name of M.I.T. not be used 41 * in advertising or publicity pertaining to distribution of the 42 * software without specific, written prior permission. M.I.T. makes 43 * no representations about the suitability of this software for any 44 * purpose. It is provided "as is" without express or implied 45 * warranty. 46 * 47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp 61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp 62 */ 63 64 /* 65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be 66 * extended some day to also handle IEEE 802.1P priority tagging. This is 67 * sort of sneaky in the implementation, since we need to pretend to be 68 * enough of an Ethernet implementation to make ARP work. The way we do 69 * this is by telling everyone that we are an Ethernet interface, and then 70 * catch the packets that ether_output() left on our output queue when it 71 * calls if_start(), rewrite them for use by the real outgoing interface, 72 * and ask it to send them. 73 * 74 * TODO: 75 * 76 * - Need some way to notify vlan interfaces when the parent 77 * interface changes MTU. 78 */ 79 80 #include <sys/cdefs.h> 81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.167 2021/12/24 04:50:40 yamaguchi Exp $"); 82 83 #ifdef _KERNEL_OPT 84 #include "opt_inet.h" 85 #include "opt_net_mpsafe.h" 86 #endif 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/mbuf.h> 92 #include <sys/queue.h> 93 #include <sys/socket.h> 94 #include <sys/sockio.h> 95 #include <sys/systm.h> 96 #include <sys/proc.h> 97 #include <sys/kauth.h> 98 #include <sys/mutex.h> 99 #include <sys/kmem.h> 100 #include <sys/cpu.h> 101 #include <sys/pserialize.h> 102 #include <sys/psref.h> 103 #include <sys/pslist.h> 104 #include <sys/atomic.h> 105 #include <sys/device.h> 106 #include <sys/module.h> 107 108 #include <net/bpf.h> 109 #include <net/if.h> 110 #include <net/if_dl.h> 111 #include <net/if_types.h> 112 #include <net/if_ether.h> 113 #include <net/if_vlanvar.h> 114 115 #ifdef INET 116 #include <netinet/in.h> 117 #include <netinet/if_inarp.h> 118 #endif 119 #ifdef INET6 120 #include <netinet6/in6_ifattach.h> 121 #include <netinet6/in6_var.h> 122 #include <netinet6/nd6.h> 123 #endif 124 125 #include "ioconf.h" 126 127 struct vlan_mc_entry { 128 LIST_ENTRY(vlan_mc_entry) mc_entries; 129 /* 130 * A key to identify this entry. The mc_addr below can't be 131 * used since multiple sockaddr may mapped into the same 132 * ether_multi (e.g., AF_UNSPEC). 133 */ 134 struct ether_multi *mc_enm; 135 struct sockaddr_storage mc_addr; 136 }; 137 138 struct ifvlan_linkmib { 139 struct ifvlan *ifvm_ifvlan; 140 const struct vlan_multisw *ifvm_msw; 141 int ifvm_encaplen; /* encapsulation length */ 142 int ifvm_mtufudge; /* MTU fudged by this much */ 143 int ifvm_mintu; /* min transmission unit */ 144 uint16_t ifvm_proto; /* encapsulation ethertype */ 145 uint16_t ifvm_tag; /* tag to apply on packets */ 146 struct ifnet *ifvm_p; /* parent interface of this vlan */ 147 148 struct psref_target ifvm_psref; 149 }; 150 151 struct ifvlan { 152 struct ethercom ifv_ec; 153 struct ifvlan_linkmib *ifv_mib; /* 154 * reader must use vlan_getref_linkmib() 155 * instead of direct dereference 156 */ 157 kmutex_t ifv_lock; /* writer lock for ifv_mib */ 158 pserialize_t ifv_psz; 159 void *ifv_linkstate_hook; 160 void *ifv_ifdetach_hook; 161 162 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead; 163 struct pslist_entry ifv_hash; 164 int ifv_flags; 165 bool ifv_stopping; 166 }; 167 168 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */ 169 170 #define ifv_if ifv_ec.ec_if 171 172 #define ifv_msw ifv_mib.ifvm_msw 173 #define ifv_encaplen ifv_mib.ifvm_encaplen 174 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 175 #define ifv_mintu ifv_mib.ifvm_mintu 176 #define ifv_tag ifv_mib.ifvm_tag 177 178 struct vlan_multisw { 179 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *); 180 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *); 181 void (*vmsw_purgemulti)(struct ifvlan *); 182 }; 183 184 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *); 185 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *); 186 static void vlan_ether_purgemulti(struct ifvlan *); 187 188 const struct vlan_multisw vlan_ether_multisw = { 189 .vmsw_addmulti = vlan_ether_addmulti, 190 .vmsw_delmulti = vlan_ether_delmulti, 191 .vmsw_purgemulti = vlan_ether_purgemulti, 192 }; 193 194 static int vlan_clone_create(struct if_clone *, int); 195 static int vlan_clone_destroy(struct ifnet *); 196 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t); 197 static int vlan_ioctl(struct ifnet *, u_long, void *); 198 static void vlan_start(struct ifnet *); 199 static int vlan_transmit(struct ifnet *, struct mbuf *); 200 static void vlan_link_state_changed(void *); 201 static void vlan_ifdetach(void *); 202 static void vlan_unconfig(struct ifnet *); 203 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *); 204 static void vlan_hash_init(void); 205 static int vlan_hash_fini(void); 206 static int vlan_tag_hash(uint16_t, u_long); 207 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *, 208 struct psref *); 209 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *); 210 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *); 211 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *, 212 uint16_t, struct psref *); 213 214 #if !defined(VLAN_TAG_HASH_SIZE) 215 #define VLAN_TAG_HASH_SIZE 32 216 #endif 217 static struct { 218 kmutex_t lock; 219 struct pslist_head *lists; 220 u_long mask; 221 } ifv_hash __cacheline_aligned = { 222 .lists = NULL, 223 .mask = 0, 224 }; 225 226 pserialize_t vlan_psz __read_mostly; 227 static struct psref_class *ifvm_psref_class __read_mostly; 228 229 struct if_clone vlan_cloner = 230 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy); 231 232 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */ 233 static char vlan_zero_pad_buff[ETHER_MIN_LEN]; 234 235 static uint32_t nvlanifs; 236 237 static inline int 238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch) 239 { 240 int e; 241 242 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 243 e = ifpromisc(ifp, pswitch); 244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 245 246 return e; 247 } 248 249 __unused static inline int 250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch) 251 { 252 int e; 253 254 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 255 e = ifpromisc_locked(ifp, pswitch); 256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 257 258 return e; 259 } 260 261 void 262 vlanattach(int n) 263 { 264 265 /* 266 * Nothing to do here, initialization is handled by the 267 * module initialization code in vlaninit() below. 268 */ 269 } 270 271 static void 272 vlaninit(void) 273 { 274 nvlanifs = 0; 275 276 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE); 277 vlan_psz = pserialize_create(); 278 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET); 279 if_clone_attach(&vlan_cloner); 280 281 vlan_hash_init(); 282 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input); 283 } 284 285 static int 286 vlandetach(void) 287 { 288 int error; 289 290 if (nvlanifs > 0) 291 return EBUSY; 292 293 error = vlan_hash_fini(); 294 if (error != 0) 295 return error; 296 297 if_clone_detach(&vlan_cloner); 298 psref_class_destroy(ifvm_psref_class); 299 pserialize_destroy(vlan_psz); 300 mutex_destroy(&ifv_hash.lock); 301 302 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook); 303 return 0; 304 } 305 306 static void 307 vlan_reset_linkname(struct ifnet *ifp) 308 { 309 310 /* 311 * We start out with a "802.1Q VLAN" type and zero-length 312 * addresses. When we attach to a parent interface, we 313 * inherit its type, address length, address, and data link 314 * type. 315 */ 316 317 ifp->if_type = IFT_L2VLAN; 318 ifp->if_addrlen = 0; 319 ifp->if_dlt = DLT_NULL; 320 if_alloc_sadl(ifp); 321 } 322 323 static int 324 vlan_clone_create(struct if_clone *ifc, int unit) 325 { 326 struct ifvlan *ifv; 327 struct ifnet *ifp; 328 struct ifvlan_linkmib *mib; 329 330 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO); 331 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP); 332 ifp = &ifv->ifv_if; 333 LIST_INIT(&ifv->ifv_mc_listhead); 334 335 mib->ifvm_ifvlan = ifv; 336 mib->ifvm_p = NULL; 337 psref_target_init(&mib->ifvm_psref, ifvm_psref_class); 338 339 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE); 340 ifv->ifv_psz = pserialize_create(); 341 ifv->ifv_mib = mib; 342 343 atomic_inc_uint(&nvlanifs); 344 345 if_initname(ifp, ifc->ifc_name, unit); 346 ifp->if_softc = ifv; 347 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 348 #ifdef NET_MPSAFE 349 ifp->if_extflags = IFEF_MPSAFE; 350 #endif 351 ifp->if_start = vlan_start; 352 ifp->if_transmit = vlan_transmit; 353 ifp->if_ioctl = vlan_ioctl; 354 IFQ_SET_READY(&ifp->if_snd); 355 if_initialize(ifp); 356 /* 357 * Set the link state to down. 358 * When the parent interface attaches we will use that link state. 359 * When the parent interface link state changes, so will ours. 360 * When the parent interface detaches, set the link state to down. 361 */ 362 ifp->if_link_state = LINK_STATE_DOWN; 363 364 vlan_reset_linkname(ifp); 365 if_register(ifp); 366 return 0; 367 } 368 369 static int 370 vlan_clone_destroy(struct ifnet *ifp) 371 { 372 struct ifvlan *ifv = ifp->if_softc; 373 374 atomic_dec_uint(&nvlanifs); 375 376 IFNET_LOCK(ifp); 377 vlan_unconfig(ifp); 378 IFNET_UNLOCK(ifp); 379 if_detach(ifp); 380 381 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class); 382 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib)); 383 pserialize_destroy(ifv->ifv_psz); 384 mutex_destroy(&ifv->ifv_lock); 385 free(ifv, M_DEVBUF); 386 387 return 0; 388 } 389 390 /* 391 * Configure a VLAN interface. 392 */ 393 static int 394 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 395 { 396 struct ifnet *ifp = &ifv->ifv_if; 397 struct ifvlan_linkmib *nmib = NULL; 398 struct ifvlan_linkmib *omib = NULL; 399 struct ifvlan_linkmib *checkmib; 400 struct psref_target *nmib_psref = NULL; 401 const uint16_t vid = EVL_VLANOFTAG(tag); 402 int error = 0; 403 int idx; 404 bool omib_cleanup = false; 405 struct psref psref; 406 407 /* VLAN ID 0 and 4095 are reserved in the spec */ 408 if ((vid == 0) || (vid == 0xfff)) 409 return EINVAL; 410 411 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 412 mutex_enter(&ifv->ifv_lock); 413 omib = ifv->ifv_mib; 414 415 if (omib->ifvm_p != NULL) { 416 error = EBUSY; 417 goto done; 418 } 419 420 /* Duplicate check */ 421 checkmib = vlan_lookup_tag_psref(p, vid, &psref); 422 if (checkmib != NULL) { 423 vlan_putref_linkmib(checkmib, &psref); 424 error = EEXIST; 425 goto done; 426 } 427 428 *nmib = *omib; 429 nmib_psref = &nmib->ifvm_psref; 430 431 psref_target_init(nmib_psref, ifvm_psref_class); 432 433 switch (p->if_type) { 434 case IFT_ETHER: 435 { 436 struct ethercom *ec = (void *)p; 437 438 nmib->ifvm_msw = &vlan_ether_multisw; 439 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN; 440 nmib->ifvm_mintu = ETHERMIN; 441 442 error = ether_add_vlantag(p, tag, NULL); 443 if (error != 0) 444 goto done; 445 446 if (ec->ec_capenable & ETHERCAP_VLAN_MTU) { 447 nmib->ifvm_mtufudge = 0; 448 } else { 449 /* 450 * Fudge the MTU by the encapsulation size. This 451 * makes us incompatible with strictly compliant 452 * 802.1Q implementations, but allows us to use 453 * the feature with other NetBSD 454 * implementations, which might still be useful. 455 */ 456 nmib->ifvm_mtufudge = nmib->ifvm_encaplen; 457 } 458 459 /* 460 * If the parent interface can do hardware-assisted 461 * VLAN encapsulation, then propagate its hardware- 462 * assisted checksumming flags and tcp segmentation 463 * offload. 464 */ 465 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) { 466 ifp->if_capabilities = p->if_capabilities & 467 (IFCAP_TSOv4 | IFCAP_TSOv6 | 468 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 469 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 470 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 471 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | 472 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx); 473 } 474 475 /* 476 * We inherit the parent's Ethernet address. 477 */ 478 ether_ifattach(ifp, CLLADDR(p->if_sadl)); 479 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */ 480 break; 481 } 482 483 default: 484 error = EPROTONOSUPPORT; 485 goto done; 486 } 487 488 nmib->ifvm_p = p; 489 nmib->ifvm_tag = vid; 490 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge; 491 ifv->ifv_if.if_flags = p->if_flags & 492 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 493 494 /* 495 * Inherit the if_type from the parent. This allows us 496 * to participate in bridges of that type. 497 */ 498 ifv->ifv_if.if_type = p->if_type; 499 500 PSLIST_ENTRY_INIT(ifv, ifv_hash); 501 idx = vlan_tag_hash(vid, ifv_hash.mask); 502 503 mutex_enter(&ifv_hash.lock); 504 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash); 505 mutex_exit(&ifv_hash.lock); 506 507 vlan_linkmib_update(ifv, nmib); 508 nmib = NULL; 509 nmib_psref = NULL; 510 omib_cleanup = true; 511 512 ifv->ifv_ifdetach_hook = ether_ifdetachhook_establish(p, 513 vlan_ifdetach, ifp); 514 515 /* 516 * We inherit the parents link state. 517 */ 518 ifv->ifv_linkstate_hook = if_linkstate_change_establish(p, 519 vlan_link_state_changed, ifv); 520 if_link_state_change(&ifv->ifv_if, p->if_link_state); 521 522 done: 523 mutex_exit(&ifv->ifv_lock); 524 525 if (nmib_psref) 526 psref_target_destroy(nmib_psref, ifvm_psref_class); 527 if (nmib) 528 kmem_free(nmib, sizeof(*nmib)); 529 if (omib_cleanup) 530 kmem_free(omib, sizeof(*omib)); 531 532 return error; 533 } 534 535 /* 536 * Unconfigure a VLAN interface. 537 */ 538 static void 539 vlan_unconfig(struct ifnet *ifp) 540 { 541 struct ifvlan *ifv = ifp->if_softc; 542 struct ifvlan_linkmib *nmib = NULL; 543 int error; 544 545 KASSERT(IFNET_LOCKED(ifp)); 546 547 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 548 549 mutex_enter(&ifv->ifv_lock); 550 error = vlan_unconfig_locked(ifv, nmib); 551 mutex_exit(&ifv->ifv_lock); 552 553 if (error) 554 kmem_free(nmib, sizeof(*nmib)); 555 } 556 static int 557 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 558 { 559 struct ifnet *p; 560 struct ifnet *ifp = &ifv->ifv_if; 561 struct psref_target *nmib_psref = NULL; 562 struct ifvlan_linkmib *omib; 563 int error = 0; 564 565 KASSERT(IFNET_LOCKED(ifp)); 566 KASSERT(mutex_owned(&ifv->ifv_lock)); 567 568 if (ifv->ifv_stopping) { 569 error = -1; 570 goto done; 571 } 572 573 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING); 574 575 omib = ifv->ifv_mib; 576 p = omib->ifvm_p; 577 578 if (p == NULL) { 579 error = -1; 580 goto done; 581 } 582 583 *nmib = *omib; 584 nmib_psref = &nmib->ifvm_psref; 585 psref_target_init(nmib_psref, ifvm_psref_class); 586 587 /* 588 * Since the interface is being unconfigured, we need to empty the 589 * list of multicast groups that we may have joined while we were 590 * alive and remove them from the parent's list also. 591 */ 592 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv); 593 594 /* Disconnect from parent. */ 595 switch (p->if_type) { 596 case IFT_ETHER: 597 { 598 (void)ether_del_vlantag(p, nmib->ifvm_tag); 599 600 /* XXX ether_ifdetach must not be called with IFNET_LOCK */ 601 ifv->ifv_stopping = true; 602 mutex_exit(&ifv->ifv_lock); 603 IFNET_UNLOCK(ifp); 604 ether_ifdetach(ifp); 605 IFNET_LOCK(ifp); 606 mutex_enter(&ifv->ifv_lock); 607 ifv->ifv_stopping = false; 608 609 /* if_free_sadl must be called with IFNET_LOCK */ 610 if_free_sadl(ifp, 1); 611 612 /* Restore vlan_ioctl overwritten by ether_ifdetach */ 613 ifp->if_ioctl = vlan_ioctl; 614 vlan_reset_linkname(ifp); 615 break; 616 } 617 618 default: 619 panic("%s: impossible", __func__); 620 } 621 622 nmib->ifvm_p = NULL; 623 ifv->ifv_if.if_mtu = 0; 624 ifv->ifv_flags = 0; 625 626 mutex_enter(&ifv_hash.lock); 627 PSLIST_WRITER_REMOVE(ifv, ifv_hash); 628 pserialize_perform(vlan_psz); 629 mutex_exit(&ifv_hash.lock); 630 PSLIST_ENTRY_DESTROY(ifv, ifv_hash); 631 if_linkstate_change_disestablish(p, 632 ifv->ifv_linkstate_hook, NULL); 633 634 vlan_linkmib_update(ifv, nmib); 635 if_link_state_change(ifp, LINK_STATE_DOWN); 636 637 /*XXX ether_ifdetachhook_disestablish must not called with IFNET_LOCK */ 638 IFNET_UNLOCK(ifp); 639 ether_ifdetachhook_disestablish(p, ifv->ifv_ifdetach_hook, 640 &ifv->ifv_lock); 641 mutex_exit(&ifv->ifv_lock); 642 IFNET_LOCK(ifp); 643 644 nmib_psref = NULL; 645 kmem_free(omib, sizeof(*omib)); 646 647 #ifdef INET6 648 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 649 /* To delete v6 link local addresses */ 650 if (in6_present) 651 in6_ifdetach(ifp); 652 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 653 #endif 654 655 if_down_locked(ifp); 656 ifp->if_capabilities = 0; 657 mutex_enter(&ifv->ifv_lock); 658 done: 659 if (nmib_psref) 660 psref_target_destroy(nmib_psref, ifvm_psref_class); 661 662 return error; 663 } 664 665 static void 666 vlan_hash_init(void) 667 { 668 669 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true, 670 &ifv_hash.mask); 671 } 672 673 static int 674 vlan_hash_fini(void) 675 { 676 int i; 677 678 mutex_enter(&ifv_hash.lock); 679 680 for (i = 0; i < ifv_hash.mask + 1; i++) { 681 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan, 682 ifv_hash) != NULL) { 683 mutex_exit(&ifv_hash.lock); 684 return EBUSY; 685 } 686 } 687 688 for (i = 0; i < ifv_hash.mask + 1; i++) 689 PSLIST_DESTROY(&ifv_hash.lists[i]); 690 691 mutex_exit(&ifv_hash.lock); 692 693 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask); 694 695 ifv_hash.lists = NULL; 696 ifv_hash.mask = 0; 697 698 return 0; 699 } 700 701 static int 702 vlan_tag_hash(uint16_t tag, u_long mask) 703 { 704 uint32_t hash; 705 706 hash = (tag >> 8) ^ tag; 707 hash = (hash >> 2) ^ hash; 708 709 return hash & mask; 710 } 711 712 static struct ifvlan_linkmib * 713 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref) 714 { 715 struct ifvlan_linkmib *mib; 716 int s; 717 718 s = pserialize_read_enter(); 719 mib = atomic_load_consume(&sc->ifv_mib); 720 if (mib == NULL) { 721 pserialize_read_exit(s); 722 return NULL; 723 } 724 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 725 pserialize_read_exit(s); 726 727 return mib; 728 } 729 730 static void 731 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref) 732 { 733 if (mib == NULL) 734 return; 735 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class); 736 } 737 738 static struct ifvlan_linkmib * 739 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref) 740 { 741 int idx; 742 int s; 743 struct ifvlan *sc; 744 745 idx = vlan_tag_hash(tag, ifv_hash.mask); 746 747 s = pserialize_read_enter(); 748 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan, 749 ifv_hash) { 750 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib); 751 if (mib == NULL) 752 continue; 753 if (mib->ifvm_tag != tag) 754 continue; 755 if (mib->ifvm_p != ifp) 756 continue; 757 758 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 759 pserialize_read_exit(s); 760 return mib; 761 } 762 pserialize_read_exit(s); 763 return NULL; 764 } 765 766 static void 767 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 768 { 769 struct ifvlan_linkmib *omib = ifv->ifv_mib; 770 771 KASSERT(mutex_owned(&ifv->ifv_lock)); 772 773 atomic_store_release(&ifv->ifv_mib, nmib); 774 775 pserialize_perform(ifv->ifv_psz); 776 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class); 777 } 778 779 /* 780 * Called when a parent interface is detaching; destroy any VLAN 781 * configuration for the parent interface. 782 */ 783 static void 784 vlan_ifdetach(void *xifp) 785 { 786 struct ifnet *ifp; 787 788 ifp = (struct ifnet *)xifp; 789 790 /* IFNET_LOCK must be held before ifv_lock. */ 791 IFNET_LOCK(ifp); 792 vlan_unconfig(ifp); 793 IFNET_UNLOCK(ifp); 794 } 795 796 static int 797 vlan_set_promisc(struct ifnet *ifp) 798 { 799 struct ifvlan *ifv = ifp->if_softc; 800 struct ifvlan_linkmib *mib; 801 struct psref psref; 802 int error = 0; 803 int bound; 804 805 bound = curlwp_bind(); 806 mib = vlan_getref_linkmib(ifv, &psref); 807 if (mib == NULL) { 808 curlwp_bindx(bound); 809 return EBUSY; 810 } 811 812 if ((ifp->if_flags & IFF_PROMISC) != 0) { 813 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) { 814 error = vlan_safe_ifpromisc(mib->ifvm_p, 1); 815 if (error == 0) 816 ifv->ifv_flags |= IFVF_PROMISC; 817 } 818 } else { 819 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) { 820 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 821 if (error == 0) 822 ifv->ifv_flags &= ~IFVF_PROMISC; 823 } 824 } 825 vlan_putref_linkmib(mib, &psref); 826 curlwp_bindx(bound); 827 828 return error; 829 } 830 831 static int 832 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data) 833 { 834 struct lwp *l = curlwp; 835 struct ifvlan *ifv = ifp->if_softc; 836 struct ifaddr *ifa = (struct ifaddr *) data; 837 struct ifreq *ifr = (struct ifreq *) data; 838 struct ifnet *pr; 839 struct ifcapreq *ifcr; 840 struct vlanreq vlr; 841 struct ifvlan_linkmib *mib; 842 struct psref psref; 843 int error = 0; 844 int bound; 845 846 switch (cmd) { 847 case SIOCSIFMTU: 848 bound = curlwp_bind(); 849 mib = vlan_getref_linkmib(ifv, &psref); 850 if (mib == NULL) { 851 curlwp_bindx(bound); 852 error = EBUSY; 853 break; 854 } 855 856 if (mib->ifvm_p == NULL) { 857 vlan_putref_linkmib(mib, &psref); 858 curlwp_bindx(bound); 859 error = EINVAL; 860 } else if ( 861 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) || 862 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) { 863 vlan_putref_linkmib(mib, &psref); 864 curlwp_bindx(bound); 865 error = EINVAL; 866 } else { 867 vlan_putref_linkmib(mib, &psref); 868 curlwp_bindx(bound); 869 870 error = ifioctl_common(ifp, cmd, data); 871 if (error == ENETRESET) 872 error = 0; 873 } 874 875 break; 876 877 case SIOCSETVLAN: 878 if ((error = kauth_authorize_network(l->l_cred, 879 KAUTH_NETWORK_INTERFACE, 880 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd, 881 NULL)) != 0) 882 break; 883 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0) 884 break; 885 886 if (vlr.vlr_parent[0] == '\0') { 887 bound = curlwp_bind(); 888 mib = vlan_getref_linkmib(ifv, &psref); 889 if (mib == NULL) { 890 curlwp_bindx(bound); 891 error = EBUSY; 892 break; 893 } 894 895 if (mib->ifvm_p != NULL && 896 (ifp->if_flags & IFF_PROMISC) != 0) 897 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 898 899 vlan_putref_linkmib(mib, &psref); 900 curlwp_bindx(bound); 901 902 vlan_unconfig(ifp); 903 break; 904 } 905 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) { 906 error = EINVAL; /* check for valid tag */ 907 break; 908 } 909 if ((pr = ifunit(vlr.vlr_parent)) == NULL) { 910 error = ENOENT; 911 break; 912 } 913 914 error = vlan_config(ifv, pr, vlr.vlr_tag); 915 if (error != 0) 916 break; 917 918 /* Update promiscuous mode, if necessary. */ 919 vlan_set_promisc(ifp); 920 921 ifp->if_flags |= IFF_RUNNING; 922 break; 923 924 case SIOCGETVLAN: 925 memset(&vlr, 0, sizeof(vlr)); 926 bound = curlwp_bind(); 927 mib = vlan_getref_linkmib(ifv, &psref); 928 if (mib == NULL) { 929 curlwp_bindx(bound); 930 error = EBUSY; 931 break; 932 } 933 if (mib->ifvm_p != NULL) { 934 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s", 935 mib->ifvm_p->if_xname); 936 vlr.vlr_tag = mib->ifvm_tag; 937 } 938 vlan_putref_linkmib(mib, &psref); 939 curlwp_bindx(bound); 940 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 941 break; 942 943 case SIOCSIFFLAGS: 944 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 945 break; 946 /* 947 * For promiscuous mode, we enable promiscuous mode on 948 * the parent if we need promiscuous on the VLAN interface. 949 */ 950 bound = curlwp_bind(); 951 mib = vlan_getref_linkmib(ifv, &psref); 952 if (mib == NULL) { 953 curlwp_bindx(bound); 954 error = EBUSY; 955 break; 956 } 957 958 if (mib->ifvm_p != NULL) 959 error = vlan_set_promisc(ifp); 960 vlan_putref_linkmib(mib, &psref); 961 curlwp_bindx(bound); 962 break; 963 964 case SIOCADDMULTI: 965 mutex_enter(&ifv->ifv_lock); 966 mib = ifv->ifv_mib; 967 if (mib == NULL) { 968 error = EBUSY; 969 mutex_exit(&ifv->ifv_lock); 970 break; 971 } 972 973 error = (mib->ifvm_p != NULL) ? 974 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL; 975 mib = NULL; 976 mutex_exit(&ifv->ifv_lock); 977 break; 978 979 case SIOCDELMULTI: 980 mutex_enter(&ifv->ifv_lock); 981 mib = ifv->ifv_mib; 982 if (mib == NULL) { 983 error = EBUSY; 984 mutex_exit(&ifv->ifv_lock); 985 break; 986 } 987 error = (mib->ifvm_p != NULL) ? 988 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL; 989 mib = NULL; 990 mutex_exit(&ifv->ifv_lock); 991 break; 992 993 case SIOCSIFCAP: 994 ifcr = data; 995 /* make sure caps are enabled on parent */ 996 bound = curlwp_bind(); 997 mib = vlan_getref_linkmib(ifv, &psref); 998 if (mib == NULL) { 999 curlwp_bindx(bound); 1000 error = EBUSY; 1001 break; 1002 } 1003 1004 if (mib->ifvm_p == NULL) { 1005 vlan_putref_linkmib(mib, &psref); 1006 curlwp_bindx(bound); 1007 error = EINVAL; 1008 break; 1009 } 1010 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) != 1011 ifcr->ifcr_capenable) { 1012 vlan_putref_linkmib(mib, &psref); 1013 curlwp_bindx(bound); 1014 error = EINVAL; 1015 break; 1016 } 1017 1018 vlan_putref_linkmib(mib, &psref); 1019 curlwp_bindx(bound); 1020 1021 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 1022 error = 0; 1023 break; 1024 case SIOCINITIFADDR: 1025 bound = curlwp_bind(); 1026 mib = vlan_getref_linkmib(ifv, &psref); 1027 if (mib == NULL) { 1028 curlwp_bindx(bound); 1029 error = EBUSY; 1030 break; 1031 } 1032 1033 if (mib->ifvm_p == NULL) { 1034 error = EINVAL; 1035 vlan_putref_linkmib(mib, &psref); 1036 curlwp_bindx(bound); 1037 break; 1038 } 1039 vlan_putref_linkmib(mib, &psref); 1040 curlwp_bindx(bound); 1041 1042 ifp->if_flags |= IFF_UP; 1043 #ifdef INET 1044 if (ifa->ifa_addr->sa_family == AF_INET) 1045 arp_ifinit(ifp, ifa); 1046 #endif 1047 break; 1048 1049 default: 1050 error = ether_ioctl(ifp, cmd, data); 1051 } 1052 1053 return error; 1054 } 1055 1056 static int 1057 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr) 1058 { 1059 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1060 struct vlan_mc_entry *mc; 1061 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1062 struct ifvlan_linkmib *mib; 1063 int error; 1064 1065 KASSERT(mutex_owned(&ifv->ifv_lock)); 1066 1067 if (sa->sa_len > sizeof(struct sockaddr_storage)) 1068 return EINVAL; 1069 1070 error = ether_addmulti(sa, &ifv->ifv_ec); 1071 if (error != ENETRESET) 1072 return error; 1073 1074 /* 1075 * This is a new multicast address. We have to tell parent 1076 * about it. Also, remember this multicast address so that 1077 * we can delete it on unconfigure. 1078 */ 1079 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT); 1080 if (mc == NULL) { 1081 error = ENOMEM; 1082 goto alloc_failed; 1083 } 1084 1085 /* 1086 * Since ether_addmulti() returned ENETRESET, the following two 1087 * statements shouldn't fail. Here ifv_ec is implicitly protected 1088 * by the ifv_lock lock. 1089 */ 1090 error = ether_multiaddr(sa, addrlo, addrhi); 1091 KASSERT(error == 0); 1092 1093 ETHER_LOCK(&ifv->ifv_ec); 1094 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1095 ETHER_UNLOCK(&ifv->ifv_ec); 1096 1097 KASSERT(mc->mc_enm != NULL); 1098 1099 memcpy(&mc->mc_addr, sa, sa->sa_len); 1100 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries); 1101 1102 mib = ifv->ifv_mib; 1103 1104 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1105 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa); 1106 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1107 1108 if (error != 0) 1109 goto ioctl_failed; 1110 return error; 1111 1112 ioctl_failed: 1113 LIST_REMOVE(mc, mc_entries); 1114 free(mc, M_DEVBUF); 1115 1116 alloc_failed: 1117 (void)ether_delmulti(sa, &ifv->ifv_ec); 1118 return error; 1119 } 1120 1121 static int 1122 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr) 1123 { 1124 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1125 struct ether_multi *enm; 1126 struct vlan_mc_entry *mc; 1127 struct ifvlan_linkmib *mib; 1128 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1129 int error; 1130 1131 KASSERT(mutex_owned(&ifv->ifv_lock)); 1132 1133 /* 1134 * Find a key to lookup vlan_mc_entry. We have to do this 1135 * before calling ether_delmulti for obvious reasons. 1136 */ 1137 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0) 1138 return error; 1139 1140 ETHER_LOCK(&ifv->ifv_ec); 1141 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1142 ETHER_UNLOCK(&ifv->ifv_ec); 1143 if (enm == NULL) 1144 return EINVAL; 1145 1146 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) { 1147 if (mc->mc_enm == enm) 1148 break; 1149 } 1150 1151 /* We woun't delete entries we didn't add */ 1152 if (mc == NULL) 1153 return EINVAL; 1154 1155 error = ether_delmulti(sa, &ifv->ifv_ec); 1156 if (error != ENETRESET) 1157 return error; 1158 1159 /* We no longer use this multicast address. Tell parent so. */ 1160 mib = ifv->ifv_mib; 1161 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa); 1162 1163 if (error == 0) { 1164 /* And forget about this address. */ 1165 LIST_REMOVE(mc, mc_entries); 1166 free(mc, M_DEVBUF); 1167 } else { 1168 (void)ether_addmulti(sa, &ifv->ifv_ec); 1169 } 1170 1171 return error; 1172 } 1173 1174 /* 1175 * Delete any multicast address we have asked to add from parent 1176 * interface. Called when the vlan is being unconfigured. 1177 */ 1178 static void 1179 vlan_ether_purgemulti(struct ifvlan *ifv) 1180 { 1181 struct vlan_mc_entry *mc; 1182 struct ifvlan_linkmib *mib; 1183 1184 KASSERT(mutex_owned(&ifv->ifv_lock)); 1185 mib = ifv->ifv_mib; 1186 if (mib == NULL) { 1187 return; 1188 } 1189 1190 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) { 1191 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI, 1192 sstocsa(&mc->mc_addr)); 1193 LIST_REMOVE(mc, mc_entries); 1194 free(mc, M_DEVBUF); 1195 } 1196 } 1197 1198 static void 1199 vlan_start(struct ifnet *ifp) 1200 { 1201 struct ifvlan *ifv = ifp->if_softc; 1202 struct ifnet *p; 1203 struct ethercom *ec; 1204 struct mbuf *m; 1205 struct ifvlan_linkmib *mib; 1206 struct psref psref; 1207 struct ether_header *eh; 1208 int error, bound; 1209 1210 bound = curlwp_bind(); 1211 mib = vlan_getref_linkmib(ifv, &psref); 1212 if (mib == NULL) { 1213 curlwp_bindx(bound); 1214 return; 1215 } 1216 1217 if (__predict_false(mib->ifvm_p == NULL)) { 1218 vlan_putref_linkmib(mib, &psref); 1219 curlwp_bindx(bound); 1220 return; 1221 } 1222 1223 p = mib->ifvm_p; 1224 ec = (void *)mib->ifvm_p; 1225 1226 ifp->if_flags |= IFF_OACTIVE; 1227 1228 for (;;) { 1229 IFQ_DEQUEUE(&ifp->if_snd, m); 1230 if (m == NULL) 1231 break; 1232 1233 if (m->m_len < sizeof(*eh)) { 1234 m = m_pullup(m, sizeof(*eh)); 1235 if (m == NULL) { 1236 if_statinc(ifp, if_oerrors); 1237 continue; 1238 } 1239 } 1240 1241 eh = mtod(m, struct ether_header *); 1242 if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) { 1243 m_freem(m); 1244 if_statinc(ifp, if_noproto); 1245 continue; 1246 } 1247 1248 #ifdef ALTQ 1249 /* 1250 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is 1251 * defined. 1252 */ 1253 KERNEL_LOCK(1, NULL); 1254 /* 1255 * If ALTQ is enabled on the parent interface, do 1256 * classification; the queueing discipline might 1257 * not require classification, but might require 1258 * the address family/header pointer in the pktattr. 1259 */ 1260 if (ALTQ_IS_ENABLED(&p->if_snd)) { 1261 switch (p->if_type) { 1262 case IFT_ETHER: 1263 altq_etherclassify(&p->if_snd, m); 1264 break; 1265 default: 1266 panic("%s: impossible (altq)", __func__); 1267 } 1268 } 1269 KERNEL_UNLOCK_ONE(NULL); 1270 #endif /* ALTQ */ 1271 1272 bpf_mtap(ifp, m, BPF_D_OUT); 1273 /* 1274 * If the parent can insert the tag itself, just mark 1275 * the tag in the mbuf header. 1276 */ 1277 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) { 1278 vlan_set_tag(m, mib->ifvm_tag); 1279 } else { 1280 /* 1281 * insert the tag ourselves 1282 */ 1283 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1284 if (m == NULL) { 1285 printf("%s: unable to prepend encap header", 1286 p->if_xname); 1287 if_statinc(ifp, if_oerrors); 1288 continue; 1289 } 1290 1291 switch (p->if_type) { 1292 case IFT_ETHER: 1293 { 1294 struct ether_vlan_header *evl; 1295 1296 if (m->m_len < sizeof(struct ether_vlan_header)) 1297 m = m_pullup(m, 1298 sizeof(struct ether_vlan_header)); 1299 if (m == NULL) { 1300 printf("%s: unable to pullup encap " 1301 "header", p->if_xname); 1302 if_statinc(ifp, if_oerrors); 1303 continue; 1304 } 1305 1306 /* 1307 * Transform the Ethernet header into an 1308 * Ethernet header with 802.1Q encapsulation. 1309 */ 1310 memmove(mtod(m, void *), 1311 mtod(m, char *) + mib->ifvm_encaplen, 1312 sizeof(struct ether_header)); 1313 evl = mtod(m, struct ether_vlan_header *); 1314 evl->evl_proto = evl->evl_encap_proto; 1315 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1316 evl->evl_tag = htons(mib->ifvm_tag); 1317 1318 /* 1319 * To cater for VLAN-aware layer 2 ethernet 1320 * switches which may need to strip the tag 1321 * before forwarding the packet, make sure 1322 * the packet+tag is at least 68 bytes long. 1323 * This is necessary because our parent will 1324 * only pad to 64 bytes (ETHER_MIN_LEN) and 1325 * some switches will not pad by themselves 1326 * after deleting a tag. 1327 */ 1328 const size_t min_data_len = ETHER_MIN_LEN - 1329 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1330 if (m->m_pkthdr.len < min_data_len) { 1331 m_copyback(m, m->m_pkthdr.len, 1332 min_data_len - m->m_pkthdr.len, 1333 vlan_zero_pad_buff); 1334 } 1335 break; 1336 } 1337 1338 default: 1339 panic("%s: impossible", __func__); 1340 } 1341 } 1342 1343 if ((p->if_flags & IFF_RUNNING) == 0) { 1344 m_freem(m); 1345 continue; 1346 } 1347 1348 error = if_transmit_lock(p, m); 1349 if (error) { 1350 /* mbuf is already freed */ 1351 if_statinc(ifp, if_oerrors); 1352 continue; 1353 } 1354 if_statinc(ifp, if_opackets); 1355 } 1356 1357 ifp->if_flags &= ~IFF_OACTIVE; 1358 1359 /* Remove reference to mib before release */ 1360 vlan_putref_linkmib(mib, &psref); 1361 curlwp_bindx(bound); 1362 } 1363 1364 static int 1365 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1366 { 1367 struct ifvlan *ifv = ifp->if_softc; 1368 struct ifnet *p; 1369 struct ethercom *ec; 1370 struct ifvlan_linkmib *mib; 1371 struct psref psref; 1372 struct ether_header *eh; 1373 int error, bound; 1374 size_t pktlen = m->m_pkthdr.len; 1375 bool mcast = (m->m_flags & M_MCAST) != 0; 1376 1377 if (m->m_len < sizeof(*eh)) { 1378 m = m_pullup(m, sizeof(*eh)); 1379 if (m == NULL) { 1380 if_statinc(ifp, if_oerrors); 1381 return ENOBUFS; 1382 } 1383 } 1384 1385 eh = mtod(m, struct ether_header *); 1386 if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) { 1387 m_freem(m); 1388 if_statinc(ifp, if_noproto); 1389 return EPROTONOSUPPORT; 1390 } 1391 1392 bound = curlwp_bind(); 1393 mib = vlan_getref_linkmib(ifv, &psref); 1394 if (mib == NULL) { 1395 curlwp_bindx(bound); 1396 m_freem(m); 1397 return ENETDOWN; 1398 } 1399 1400 if (__predict_false(mib->ifvm_p == NULL)) { 1401 vlan_putref_linkmib(mib, &psref); 1402 curlwp_bindx(bound); 1403 m_freem(m); 1404 return ENETDOWN; 1405 } 1406 1407 p = mib->ifvm_p; 1408 ec = (void *)mib->ifvm_p; 1409 1410 bpf_mtap(ifp, m, BPF_D_OUT); 1411 1412 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0) 1413 goto out; 1414 if (m == NULL) 1415 goto out; 1416 1417 /* 1418 * If the parent can insert the tag itself, just mark 1419 * the tag in the mbuf header. 1420 */ 1421 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) { 1422 vlan_set_tag(m, mib->ifvm_tag); 1423 } else { 1424 /* 1425 * insert the tag ourselves 1426 */ 1427 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1428 if (m == NULL) { 1429 printf("%s: unable to prepend encap header", 1430 p->if_xname); 1431 if_statinc(ifp, if_oerrors); 1432 error = ENOBUFS; 1433 goto out; 1434 } 1435 1436 switch (p->if_type) { 1437 case IFT_ETHER: 1438 { 1439 struct ether_vlan_header *evl; 1440 1441 if (m->m_len < sizeof(struct ether_vlan_header)) 1442 m = m_pullup(m, 1443 sizeof(struct ether_vlan_header)); 1444 if (m == NULL) { 1445 printf("%s: unable to pullup encap " 1446 "header", p->if_xname); 1447 if_statinc(ifp, if_oerrors); 1448 error = ENOBUFS; 1449 goto out; 1450 } 1451 1452 /* 1453 * Transform the Ethernet header into an 1454 * Ethernet header with 802.1Q encapsulation. 1455 */ 1456 memmove(mtod(m, void *), 1457 mtod(m, char *) + mib->ifvm_encaplen, 1458 sizeof(struct ether_header)); 1459 evl = mtod(m, struct ether_vlan_header *); 1460 evl->evl_proto = evl->evl_encap_proto; 1461 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1462 evl->evl_tag = htons(mib->ifvm_tag); 1463 1464 /* 1465 * To cater for VLAN-aware layer 2 ethernet 1466 * switches which may need to strip the tag 1467 * before forwarding the packet, make sure 1468 * the packet+tag is at least 68 bytes long. 1469 * This is necessary because our parent will 1470 * only pad to 64 bytes (ETHER_MIN_LEN) and 1471 * some switches will not pad by themselves 1472 * after deleting a tag. 1473 */ 1474 const size_t min_data_len = ETHER_MIN_LEN - 1475 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1476 if (m->m_pkthdr.len < min_data_len) { 1477 m_copyback(m, m->m_pkthdr.len, 1478 min_data_len - m->m_pkthdr.len, 1479 vlan_zero_pad_buff); 1480 } 1481 break; 1482 } 1483 1484 default: 1485 panic("%s: impossible", __func__); 1486 } 1487 } 1488 1489 if ((p->if_flags & IFF_RUNNING) == 0) { 1490 m_freem(m); 1491 error = ENETDOWN; 1492 goto out; 1493 } 1494 1495 error = if_transmit_lock(p, m); 1496 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1497 if (error) { 1498 /* mbuf is already freed */ 1499 if_statinc_ref(nsr, if_oerrors); 1500 } else { 1501 if_statinc_ref(nsr, if_opackets); 1502 if_statadd_ref(nsr, if_obytes, pktlen); 1503 if (mcast) 1504 if_statinc_ref(nsr, if_omcasts); 1505 } 1506 IF_STAT_PUTREF(ifp); 1507 1508 out: 1509 /* Remove reference to mib before release */ 1510 vlan_putref_linkmib(mib, &psref); 1511 curlwp_bindx(bound); 1512 1513 return error; 1514 } 1515 1516 /* 1517 * Given an Ethernet frame, find a valid vlan interface corresponding to the 1518 * given source interface and tag, then run the real packet through the 1519 * parent's input routine. 1520 */ 1521 void 1522 vlan_input(struct ifnet *ifp, struct mbuf *m) 1523 { 1524 struct ifvlan *ifv; 1525 uint16_t vid; 1526 struct ifvlan_linkmib *mib; 1527 struct psref psref; 1528 bool have_vtag; 1529 1530 have_vtag = vlan_has_tag(m); 1531 if (have_vtag) { 1532 vid = EVL_VLANOFTAG(vlan_get_tag(m)); 1533 m->m_flags &= ~M_VLANTAG; 1534 } else { 1535 struct ether_vlan_header *evl; 1536 1537 if (ifp->if_type != IFT_ETHER) { 1538 panic("%s: impossible", __func__); 1539 } 1540 1541 if (m->m_len < sizeof(struct ether_vlan_header) && 1542 (m = m_pullup(m, 1543 sizeof(struct ether_vlan_header))) == NULL) { 1544 printf("%s: no memory for VLAN header, " 1545 "dropping packet.\n", ifp->if_xname); 1546 return; 1547 } 1548 1549 if (m_makewritable(&m, 0, 1550 sizeof(struct ether_vlan_header), M_DONTWAIT)) { 1551 m_freem(m); 1552 if_statinc(ifp, if_ierrors); 1553 return; 1554 } 1555 1556 evl = mtod(m, struct ether_vlan_header *); 1557 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN); 1558 1559 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 1560 1561 /* 1562 * Restore the original ethertype. We'll remove 1563 * the encapsulation after we've found the vlan 1564 * interface corresponding to the tag. 1565 */ 1566 evl->evl_encap_proto = evl->evl_proto; 1567 } 1568 1569 mib = vlan_lookup_tag_psref(ifp, vid, &psref); 1570 if (mib == NULL) { 1571 m_freem(m); 1572 if_statinc(ifp, if_noproto); 1573 return; 1574 } 1575 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN); 1576 1577 ifv = mib->ifvm_ifvlan; 1578 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) != 1579 (IFF_UP | IFF_RUNNING)) { 1580 m_freem(m); 1581 if_statinc(ifp, if_noproto); 1582 goto out; 1583 } 1584 1585 /* 1586 * Now, remove the encapsulation header. The original 1587 * header has already been fixed up above. 1588 */ 1589 if (!have_vtag) { 1590 memmove(mtod(m, char *) + mib->ifvm_encaplen, 1591 mtod(m, void *), sizeof(struct ether_header)); 1592 m_adj(m, mib->ifvm_encaplen); 1593 } 1594 1595 /* 1596 * Drop promiscuously received packets if we are not in 1597 * promiscuous mode 1598 */ 1599 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 && 1600 (ifp->if_flags & IFF_PROMISC) && 1601 (ifv->ifv_if.if_flags & IFF_PROMISC) == 0) { 1602 struct ether_header *eh; 1603 1604 eh = mtod(m, struct ether_header *); 1605 if (memcmp(CLLADDR(ifv->ifv_if.if_sadl), 1606 eh->ether_dhost, ETHER_ADDR_LEN) != 0) { 1607 m_freem(m); 1608 if_statinc(&ifv->ifv_if, if_ierrors); 1609 goto out; 1610 } 1611 } 1612 1613 m_set_rcvif(m, &ifv->ifv_if); 1614 1615 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0) 1616 goto out; 1617 if (m == NULL) 1618 goto out; 1619 1620 m->m_flags &= ~M_PROMISC; 1621 if_input(&ifv->ifv_if, m); 1622 out: 1623 vlan_putref_linkmib(mib, &psref); 1624 } 1625 1626 /* 1627 * If the parent link state changed, the vlan link state should change also. 1628 */ 1629 static void 1630 vlan_link_state_changed(void *xifv) 1631 { 1632 struct ifvlan *ifv = xifv; 1633 struct ifnet *ifp, *p; 1634 struct ifvlan_linkmib *mib; 1635 struct psref psref; 1636 int bound; 1637 1638 bound = curlwp_bind(); 1639 mib = vlan_getref_linkmib(ifv, &psref); 1640 if (mib == NULL) { 1641 curlwp_bindx(bound); 1642 return; 1643 } 1644 1645 if (mib->ifvm_p == NULL) { 1646 vlan_putref_linkmib(mib, &psref); 1647 curlwp_bindx(bound); 1648 return; 1649 } 1650 1651 ifp = &ifv->ifv_if; 1652 p = mib->ifvm_p; 1653 if_link_state_change(ifp, p->if_link_state); 1654 1655 vlan_putref_linkmib(mib, &psref); 1656 curlwp_bindx(bound); 1657 } 1658 1659 /* 1660 * Module infrastructure 1661 */ 1662 #include "if_module.h" 1663 1664 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL) 1665