xref: /netbsd-src/sys/net/if_vlan.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /*	$NetBSD: if_vlan.c,v 1.125 2018/03/16 17:00:35 tih Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright 1998 Massachusetts Institute of Technology
34  *
35  * Permission to use, copy, modify, and distribute this software and
36  * its documentation for any purpose and without fee is hereby
37  * granted, provided that both the above copyright notice and this
38  * permission notice appear in all copies, that both the above
39  * copyright notice and this permission notice appear in all
40  * supporting documentation, and that the name of M.I.T. not be used
41  * in advertising or publicity pertaining to distribution of the
42  * software without specific, written prior permission.  M.I.T. makes
43  * no representations about the suitability of this software for any
44  * purpose.  It is provided "as is" without express or implied
45  * warranty.
46  *
47  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
48  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61  * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62  */
63 
64 /*
65  * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.  Might be
66  * extended some day to also handle IEEE 802.1P priority tagging.  This is
67  * sort of sneaky in the implementation, since we need to pretend to be
68  * enough of an Ethernet implementation to make ARP work.  The way we do
69  * this is by telling everyone that we are an Ethernet interface, and then
70  * catch the packets that ether_output() left on our output queue when it
71  * calls if_start(), rewrite them for use by the real outgoing interface,
72  * and ask it to send them.
73  *
74  * TODO:
75  *
76  *	- Need some way to notify vlan interfaces when the parent
77  *	  interface changes MTU.
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.125 2018/03/16 17:00:35 tih Exp $");
82 
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107 
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114 
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #endif
123 
124 #include "ioconf.h"
125 
126 struct vlan_mc_entry {
127 	LIST_ENTRY(vlan_mc_entry)	mc_entries;
128 	/*
129 	 * A key to identify this entry.  The mc_addr below can't be
130 	 * used since multiple sockaddr may mapped into the same
131 	 * ether_multi (e.g., AF_UNSPEC).
132 	 */
133 	union {
134 		struct ether_multi	*mcu_enm;
135 	} mc_u;
136 	struct sockaddr_storage		mc_addr;
137 };
138 
139 #define	mc_enm		mc_u.mcu_enm
140 
141 
142 struct ifvlan_linkmib {
143 	struct ifvlan *ifvm_ifvlan;
144 	const struct vlan_multisw *ifvm_msw;
145 	int	ifvm_encaplen;	/* encapsulation length */
146 	int	ifvm_mtufudge;	/* MTU fudged by this much */
147 	int	ifvm_mintu;	/* min transmission unit */
148 	uint16_t ifvm_proto;	/* encapsulation ethertype */
149 	uint16_t ifvm_tag;	/* tag to apply on packets */
150 	struct ifnet *ifvm_p;		/* parent interface of this vlan */
151 
152 	struct psref_target ifvm_psref;
153 };
154 
155 struct ifvlan {
156 	union {
157 		struct ethercom ifvu_ec;
158 	} ifv_u;
159 	struct ifvlan_linkmib *ifv_mib;	/*
160 					 * reader must use vlan_getref_linkmib()
161 					 * instead of direct dereference
162 					 */
163 	kmutex_t ifv_lock;		/* writer lock for ifv_mib */
164 
165 	LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
166 	LIST_ENTRY(ifvlan) ifv_list;
167 	struct pslist_entry ifv_hash;
168 	int ifv_flags;
169 };
170 
171 #define	IFVF_PROMISC	0x01		/* promiscuous mode enabled */
172 
173 #define	ifv_ec		ifv_u.ifvu_ec
174 
175 #define	ifv_if		ifv_ec.ec_if
176 
177 #define	ifv_msw		ifv_mib.ifvm_msw
178 #define	ifv_encaplen	ifv_mib.ifvm_encaplen
179 #define	ifv_mtufudge	ifv_mib.ifvm_mtufudge
180 #define	ifv_mintu	ifv_mib.ifvm_mintu
181 #define	ifv_tag		ifv_mib.ifvm_tag
182 
183 struct vlan_multisw {
184 	int	(*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
185 	int	(*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
186 	void	(*vmsw_purgemulti)(struct ifvlan *);
187 };
188 
189 static int	vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
190 static int	vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
191 static void	vlan_ether_purgemulti(struct ifvlan *);
192 
193 const struct vlan_multisw vlan_ether_multisw = {
194 	.vmsw_addmulti = vlan_ether_addmulti,
195 	.vmsw_delmulti = vlan_ether_delmulti,
196 	.vmsw_purgemulti = vlan_ether_purgemulti,
197 };
198 
199 static int	vlan_clone_create(struct if_clone *, int);
200 static int	vlan_clone_destroy(struct ifnet *);
201 static int	vlan_config(struct ifvlan *, struct ifnet *,
202     uint16_t);
203 static int	vlan_ioctl(struct ifnet *, u_long, void *);
204 static void	vlan_start(struct ifnet *);
205 static int	vlan_transmit(struct ifnet *, struct mbuf *);
206 static void	vlan_unconfig(struct ifnet *);
207 static int	vlan_unconfig_locked(struct ifvlan *,
208     struct ifvlan_linkmib *);
209 static void	vlan_hash_init(void);
210 static int	vlan_hash_fini(void);
211 static int	vlan_tag_hash(uint16_t, u_long);
212 static struct ifvlan_linkmib*	vlan_getref_linkmib(struct ifvlan *,
213     struct psref *);
214 static void	vlan_putref_linkmib(struct ifvlan_linkmib *,
215     struct psref *);
216 static void	vlan_linkmib_update(struct ifvlan *,
217     struct ifvlan_linkmib *);
218 static struct ifvlan_linkmib*	vlan_lookup_tag_psref(struct ifnet *,
219     uint16_t, struct psref *);
220 
221 LIST_HEAD(vlan_ifvlist, ifvlan);
222 static struct {
223 	kmutex_t lock;
224 	struct vlan_ifvlist list;
225 } ifv_list __cacheline_aligned;
226 
227 
228 #if !defined(VLAN_TAG_HASH_SIZE)
229 #define VLAN_TAG_HASH_SIZE 32
230 #endif
231 static struct {
232 	kmutex_t lock;
233 	struct pslist_head *lists;
234 	u_long mask;
235 } ifv_hash __cacheline_aligned = {
236 	.lists = NULL,
237 	.mask = 0,
238 };
239 
240 pserialize_t vlan_psz __read_mostly;
241 static struct psref_class *ifvm_psref_class __read_mostly;
242 
243 struct if_clone vlan_cloner =
244     IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
245 
246 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
247 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
248 
249 static inline int
250 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
251 {
252 	int e;
253 
254 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 	e = ifpromisc(ifp, pswitch);
256 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257 
258 	return e;
259 }
260 
261 static inline int
262 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
263 {
264 	int e;
265 
266 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
267 	e = ifpromisc_locked(ifp, pswitch);
268 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
269 
270 	return e;
271 }
272 
273 void
274 vlanattach(int n)
275 {
276 
277 	/*
278 	 * Nothing to do here, initialization is handled by the
279 	 * module initialization code in vlaninit() below.
280 	 */
281 }
282 
283 static void
284 vlaninit(void)
285 {
286 	mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
287 	LIST_INIT(&ifv_list.list);
288 
289 	mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
290 	vlan_psz = pserialize_create();
291 	ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
292 	if_clone_attach(&vlan_cloner);
293 
294 	vlan_hash_init();
295 }
296 
297 static int
298 vlandetach(void)
299 {
300 	bool is_empty;
301 	int error;
302 
303 	mutex_enter(&ifv_list.lock);
304 	is_empty = LIST_EMPTY(&ifv_list.list);
305 	mutex_exit(&ifv_list.lock);
306 
307 	if (!is_empty)
308 		return EBUSY;
309 
310 	error = vlan_hash_fini();
311 	if (error != 0)
312 		return error;
313 
314 	if_clone_detach(&vlan_cloner);
315 	psref_class_destroy(ifvm_psref_class);
316 	pserialize_destroy(vlan_psz);
317 	mutex_destroy(&ifv_hash.lock);
318 	mutex_destroy(&ifv_list.lock);
319 
320 	return 0;
321 }
322 
323 static void
324 vlan_reset_linkname(struct ifnet *ifp)
325 {
326 
327 	/*
328 	 * We start out with a "802.1Q VLAN" type and zero-length
329 	 * addresses.  When we attach to a parent interface, we
330 	 * inherit its type, address length, address, and data link
331 	 * type.
332 	 */
333 
334 	ifp->if_type = IFT_L2VLAN;
335 	ifp->if_addrlen = 0;
336 	ifp->if_dlt = DLT_NULL;
337 	if_alloc_sadl(ifp);
338 }
339 
340 static int
341 vlan_clone_create(struct if_clone *ifc, int unit)
342 {
343 	struct ifvlan *ifv;
344 	struct ifnet *ifp;
345 	struct ifvlan_linkmib *mib;
346 	int rv;
347 
348 	ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK|M_ZERO);
349 	mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
350 	ifp = &ifv->ifv_if;
351 	LIST_INIT(&ifv->ifv_mc_listhead);
352 
353 	mib->ifvm_ifvlan = ifv;
354 	mib->ifvm_p = NULL;
355 	psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
356 
357 	mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
358 	ifv->ifv_mib = mib;
359 
360 	mutex_enter(&ifv_list.lock);
361 	LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
362 	mutex_exit(&ifv_list.lock);
363 
364 	if_initname(ifp, ifc->ifc_name, unit);
365 	ifp->if_softc = ifv;
366 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367 	ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
368 #ifdef NET_MPSAFE
369 	ifp->if_extflags |= IFEF_MPSAFE;
370 #endif
371 	ifp->if_start = vlan_start;
372 	ifp->if_transmit = vlan_transmit;
373 	ifp->if_ioctl = vlan_ioctl;
374 	IFQ_SET_READY(&ifp->if_snd);
375 
376 	rv = if_initialize(ifp);
377 	if (rv != 0) {
378 		aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
379 		    rv);
380 		goto fail;
381 	}
382 
383 	vlan_reset_linkname(ifp);
384 	if_register(ifp);
385 	return 0;
386 
387 fail:
388 	mutex_enter(&ifv_list.lock);
389 	LIST_REMOVE(ifv, ifv_list);
390 	mutex_exit(&ifv_list.lock);
391 
392 	mutex_destroy(&ifv->ifv_lock);
393 	psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
394 	kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
395 	free(ifv, M_DEVBUF);
396 
397 	return rv;
398 }
399 
400 static int
401 vlan_clone_destroy(struct ifnet *ifp)
402 {
403 	struct ifvlan *ifv = ifp->if_softc;
404 
405 	mutex_enter(&ifv_list.lock);
406 	LIST_REMOVE(ifv, ifv_list);
407 	mutex_exit(&ifv_list.lock);
408 
409 	IFNET_LOCK(ifp);
410 	vlan_unconfig(ifp);
411 	IFNET_UNLOCK(ifp);
412 	if_detach(ifp);
413 
414 	psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
415 	kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
416 	mutex_destroy(&ifv->ifv_lock);
417 	free(ifv, M_DEVBUF);
418 
419 	return 0;
420 }
421 
422 /*
423  * Configure a VLAN interface.
424  */
425 static int
426 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
427 {
428 	struct ifnet *ifp = &ifv->ifv_if;
429 	struct ifvlan_linkmib *nmib = NULL;
430 	struct ifvlan_linkmib *omib = NULL;
431 	struct ifvlan_linkmib *checkmib;
432 	struct psref_target *nmib_psref = NULL;
433 	const uint16_t vid = EVL_VLANOFTAG(tag);
434 	int error = 0;
435 	int idx;
436 	bool omib_cleanup = false;
437 	struct psref psref;
438 
439 	/* VLAN ID 0 and 4095 are reserved in the spec */
440 	if ((vid == 0) || (vid == 0xfff))
441 		return EINVAL;
442 
443 	nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
444 	mutex_enter(&ifv->ifv_lock);
445 	omib = ifv->ifv_mib;
446 
447 	if (omib->ifvm_p != NULL) {
448 		error = EBUSY;
449 		goto done;
450 	}
451 
452 	/* Duplicate check */
453 	checkmib = vlan_lookup_tag_psref(p, vid, &psref);
454 	if (checkmib != NULL) {
455 		vlan_putref_linkmib(checkmib, &psref);
456 		error = EEXIST;
457 		goto done;
458 	}
459 
460 	*nmib = *omib;
461 	nmib_psref = &nmib->ifvm_psref;
462 
463 	psref_target_init(nmib_psref, ifvm_psref_class);
464 
465 	switch (p->if_type) {
466 	case IFT_ETHER:
467 	    {
468 		struct ethercom *ec = (void *)p;
469 		nmib->ifvm_msw = &vlan_ether_multisw;
470 		nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
471 		nmib->ifvm_mintu = ETHERMIN;
472 
473 		if (ec->ec_nvlans++ == 0) {
474 			IFNET_LOCK(p);
475 			error = ether_enable_vlan_mtu(p);
476 			IFNET_UNLOCK(p);
477 			if (error >= 0) {
478 				if (error) {
479 					ec->ec_nvlans--;
480 					goto done;
481 				}
482 				nmib->ifvm_mtufudge = 0;
483 			} else {
484 				/*
485 				 * Fudge the MTU by the encapsulation size. This
486 				 * makes us incompatible with strictly compliant
487 				 * 802.1Q implementations, but allows us to use
488 				 * the feature with other NetBSD
489 				 * implementations, which might still be useful.
490 				 */
491 				nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
492 			}
493 			error = 0;
494 		}
495 
496 		/*
497 		 * If the parent interface can do hardware-assisted
498 		 * VLAN encapsulation, then propagate its hardware-
499 		 * assisted checksumming flags and tcp segmentation
500 		 * offload.
501 		 */
502 		if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
503 			ec->ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
504 			ifp->if_capabilities = p->if_capabilities &
505 			    (IFCAP_TSOv4 | IFCAP_TSOv6 |
506 			     IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx|
507 			     IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx|
508 			     IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx|
509 			     IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_TCPv6_Rx|
510 			     IFCAP_CSUM_UDPv6_Tx|IFCAP_CSUM_UDPv6_Rx);
511 		}
512 
513 		/*
514 		 * We inherit the parent's Ethernet address.
515 		 */
516 		ether_ifattach(ifp, CLLADDR(p->if_sadl));
517 		ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
518 		break;
519 	    }
520 
521 	default:
522 		error = EPROTONOSUPPORT;
523 		goto done;
524 	}
525 
526 	nmib->ifvm_p = p;
527 	nmib->ifvm_tag = vid;
528 	ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
529 	ifv->ifv_if.if_flags = p->if_flags &
530 	    (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
531 
532 	/*
533 	 * Inherit the if_type from the parent.  This allows us
534 	 * to participate in bridges of that type.
535 	 */
536 	ifv->ifv_if.if_type = p->if_type;
537 
538 	PSLIST_ENTRY_INIT(ifv, ifv_hash);
539 	idx = vlan_tag_hash(vid, ifv_hash.mask);
540 
541 	mutex_enter(&ifv_hash.lock);
542 	PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
543 	mutex_exit(&ifv_hash.lock);
544 
545 	vlan_linkmib_update(ifv, nmib);
546 	nmib = NULL;
547 	nmib_psref = NULL;
548 	omib_cleanup = true;
549 
550 done:
551 	mutex_exit(&ifv->ifv_lock);
552 
553 	if (nmib_psref)
554 		psref_target_destroy(nmib_psref, ifvm_psref_class);
555 	if (nmib)
556 		kmem_free(nmib, sizeof(*nmib));
557 	if (omib_cleanup)
558 		kmem_free(omib, sizeof(*omib));
559 
560 	return error;
561 }
562 
563 /*
564  * Unconfigure a VLAN interface.
565  */
566 static void
567 vlan_unconfig(struct ifnet *ifp)
568 {
569 	struct ifvlan *ifv = ifp->if_softc;
570 	struct ifvlan_linkmib *nmib = NULL;
571 	int error;
572 
573 	KASSERT(IFNET_LOCKED(ifp));
574 
575 	nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
576 
577 	mutex_enter(&ifv->ifv_lock);
578 	error = vlan_unconfig_locked(ifv, nmib);
579 	mutex_exit(&ifv->ifv_lock);
580 
581 	if (error)
582 		kmem_free(nmib, sizeof(*nmib));
583 }
584 static int
585 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
586 {
587 	struct ifnet *p;
588 	struct ifnet *ifp = &ifv->ifv_if;
589 	struct psref_target *nmib_psref = NULL;
590 	struct ifvlan_linkmib *omib;
591 	int error = 0;
592 
593 	KASSERT(IFNET_LOCKED(ifp));
594 	KASSERT(mutex_owned(&ifv->ifv_lock));
595 
596 	ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
597 
598 	omib = ifv->ifv_mib;
599 	p = omib->ifvm_p;
600 
601 	if (p == NULL) {
602 		error = -1;
603 		goto done;
604 	}
605 
606 	*nmib = *omib;
607 	nmib_psref = &nmib->ifvm_psref;
608 	psref_target_init(nmib_psref, ifvm_psref_class);
609 
610 	/*
611  	 * Since the interface is being unconfigured, we need to empty the
612 	 * list of multicast groups that we may have joined while we were
613 	 * alive and remove them from the parent's list also.
614 	 */
615 	(*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
616 
617 	/* Disconnect from parent. */
618 	switch (p->if_type) {
619 	case IFT_ETHER:
620 	    {
621 		struct ethercom *ec = (void *)p;
622 		if (--ec->ec_nvlans == 0) {
623 			IFNET_LOCK(p);
624 			(void) ether_disable_vlan_mtu(p);
625 			IFNET_UNLOCK(p);
626 		}
627 
628 		ether_ifdetach(ifp);
629 		/* Restore vlan_ioctl overwritten by ether_ifdetach */
630 		ifp->if_ioctl = vlan_ioctl;
631 		vlan_reset_linkname(ifp);
632 		break;
633 	    }
634 
635 	default:
636 		panic("%s: impossible", __func__);
637 	}
638 
639 	nmib->ifvm_p = NULL;
640 	ifv->ifv_if.if_mtu = 0;
641 	ifv->ifv_flags = 0;
642 
643 	mutex_enter(&ifv_hash.lock);
644 	PSLIST_WRITER_REMOVE(ifv, ifv_hash);
645 	pserialize_perform(vlan_psz);
646 	mutex_exit(&ifv_hash.lock);
647 	PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
648 
649 	vlan_linkmib_update(ifv, nmib);
650 
651 	mutex_exit(&ifv->ifv_lock);
652 
653 	nmib_psref = NULL;
654 	kmem_free(omib, sizeof(*omib));
655 
656 #ifdef INET6
657 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
658 	/* To delete v6 link local addresses */
659 	if (in6_present)
660 		in6_ifdetach(ifp);
661 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
662 #endif
663 
664 	if ((ifp->if_flags & IFF_PROMISC) != 0)
665 		vlan_safe_ifpromisc_locked(ifp, 0);
666 	if_down_locked(ifp);
667 	ifp->if_capabilities = 0;
668 	mutex_enter(&ifv->ifv_lock);
669 done:
670 
671 	if (nmib_psref)
672 		psref_target_destroy(nmib_psref, ifvm_psref_class);
673 
674 	return error;
675 }
676 
677 static void
678 vlan_hash_init(void)
679 {
680 
681 	ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
682 	    &ifv_hash.mask);
683 }
684 
685 static int
686 vlan_hash_fini(void)
687 {
688 	int i;
689 
690 	mutex_enter(&ifv_hash.lock);
691 
692 	for (i = 0; i < ifv_hash.mask + 1; i++) {
693 		if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
694 		    ifv_hash) != NULL) {
695 			mutex_exit(&ifv_hash.lock);
696 			return EBUSY;
697 		}
698 	}
699 
700 	for (i = 0; i < ifv_hash.mask + 1; i++)
701 		PSLIST_DESTROY(&ifv_hash.lists[i]);
702 
703 	mutex_exit(&ifv_hash.lock);
704 
705 	hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
706 
707 	ifv_hash.lists = NULL;
708 	ifv_hash.mask = 0;
709 
710 	return 0;
711 }
712 
713 static int
714 vlan_tag_hash(uint16_t tag, u_long mask)
715 {
716 	uint32_t hash;
717 
718 	hash = (tag >> 8) ^ tag;
719 	hash = (hash >> 2) ^ hash;
720 
721 	return hash & mask;
722 }
723 
724 static struct ifvlan_linkmib *
725 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
726 {
727 	struct ifvlan_linkmib *mib;
728 	int s;
729 
730 	s = pserialize_read_enter();
731 	mib = sc->ifv_mib;
732 	if (mib == NULL) {
733 		pserialize_read_exit(s);
734 		return NULL;
735 	}
736 	membar_datadep_consumer();
737 	psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
738 	pserialize_read_exit(s);
739 
740 	return mib;
741 }
742 
743 static void
744 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
745 {
746 	if (mib == NULL)
747 		return;
748 	psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
749 }
750 
751 static struct ifvlan_linkmib *
752 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
753 {
754 	int idx;
755 	int s;
756 	struct ifvlan *sc;
757 
758 	idx = vlan_tag_hash(tag, ifv_hash.mask);
759 
760 	s = pserialize_read_enter();
761 	PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
762 	    ifv_hash) {
763 		struct ifvlan_linkmib *mib = sc->ifv_mib;
764 		if (mib == NULL)
765 			continue;
766 		if (mib->ifvm_tag != tag)
767 			continue;
768 		if (mib->ifvm_p != ifp)
769 			continue;
770 
771 		psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
772 		pserialize_read_exit(s);
773 		return mib;
774 	}
775 	pserialize_read_exit(s);
776 	return NULL;
777 }
778 
779 static void
780 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
781 {
782 	struct ifvlan_linkmib *omib = ifv->ifv_mib;
783 
784 	KASSERT(mutex_owned(&ifv->ifv_lock));
785 
786 	membar_producer();
787 	ifv->ifv_mib = nmib;
788 
789 	pserialize_perform(vlan_psz);
790 	psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
791 }
792 
793 /*
794  * Called when a parent interface is detaching; destroy any VLAN
795  * configuration for the parent interface.
796  */
797 void
798 vlan_ifdetach(struct ifnet *p)
799 {
800 	struct ifvlan *ifv;
801 	struct ifvlan_linkmib *mib, **nmibs;
802 	struct psref psref;
803 	int error;
804 	int bound;
805 	int i, cnt = 0;
806 
807 	bound = curlwp_bind();
808 
809 	mutex_enter(&ifv_list.lock);
810 	LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
811 		mib = vlan_getref_linkmib(ifv, &psref);
812 		if (mib == NULL)
813 			continue;
814 
815 		if (mib->ifvm_p == p)
816 			cnt++;
817 
818 		vlan_putref_linkmib(mib, &psref);
819 	}
820 	mutex_exit(&ifv_list.lock);
821 
822 	if (cnt == 0) {
823 		curlwp_bindx(bound);
824 		return;
825 	}
826 
827 	/*
828 	 * The value of "cnt" does not increase while ifv_list.lock
829 	 * and ifv->ifv_lock are released here, because the parent
830 	 * interface is detaching.
831 	 */
832 	nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
833 	for (i = 0; i < cnt; i++) {
834 		nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
835 	}
836 
837 	mutex_enter(&ifv_list.lock);
838 
839 	i = 0;
840 	LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
841 		struct ifnet *ifp = &ifv->ifv_if;
842 
843 		/* IFNET_LOCK must be held before ifv_lock. */
844 		IFNET_LOCK(ifp);
845 		mutex_enter(&ifv->ifv_lock);
846 
847 		/* XXX ifv_mib = NULL? */
848 		if (ifv->ifv_mib->ifvm_p == p) {
849 			KASSERTMSG(i < cnt, "no memory for unconfig, parent=%s",
850 			    p->if_xname);
851 			error = vlan_unconfig_locked(ifv, nmibs[i]);
852 			if (!error) {
853 				nmibs[i] = NULL;
854 				i++;
855 			}
856 
857 		}
858 
859 		mutex_exit(&ifv->ifv_lock);
860 		IFNET_UNLOCK(ifp);
861 	}
862 
863 	mutex_exit(&ifv_list.lock);
864 
865 	curlwp_bindx(bound);
866 
867 	for (i = 0; i < cnt; i++) {
868 		if (nmibs[i])
869 			kmem_free(nmibs[i], sizeof(*nmibs[i]));
870 	}
871 
872 	kmem_free(nmibs, sizeof(*nmibs) * cnt);
873 
874 	return;
875 }
876 
877 static int
878 vlan_set_promisc(struct ifnet *ifp)
879 {
880 	struct ifvlan *ifv = ifp->if_softc;
881 	struct ifvlan_linkmib *mib;
882 	struct psref psref;
883 	int error = 0;
884 	int bound;
885 
886 	bound = curlwp_bind();
887 	mib = vlan_getref_linkmib(ifv, &psref);
888 	if (mib == NULL) {
889 		curlwp_bindx(bound);
890 		return EBUSY;
891 	}
892 
893 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
894 		if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
895 			error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
896 			if (error == 0)
897 				ifv->ifv_flags |= IFVF_PROMISC;
898 		}
899 	} else {
900 		if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
901 			error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
902 			if (error == 0)
903 				ifv->ifv_flags &= ~IFVF_PROMISC;
904 		}
905 	}
906 	vlan_putref_linkmib(mib, &psref);
907 	curlwp_bindx(bound);
908 
909 	return error;
910 }
911 
912 static int
913 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
914 {
915 	struct lwp *l = curlwp;
916 	struct ifvlan *ifv = ifp->if_softc;
917 	struct ifaddr *ifa = (struct ifaddr *) data;
918 	struct ifreq *ifr = (struct ifreq *) data;
919 	struct ifnet *pr;
920 	struct ifcapreq *ifcr;
921 	struct vlanreq vlr;
922 	struct ifvlan_linkmib *mib;
923 	struct psref psref;
924 	int error = 0;
925 	int bound;
926 
927 	switch (cmd) {
928 	case SIOCSIFMTU:
929 		bound = curlwp_bind();
930 		mib = vlan_getref_linkmib(ifv, &psref);
931 		if (mib == NULL) {
932 			curlwp_bindx(bound);
933 			error = EBUSY;
934 			break;
935 		}
936 
937 		if (mib->ifvm_p == NULL) {
938 			vlan_putref_linkmib(mib, &psref);
939 			curlwp_bindx(bound);
940 			error = EINVAL;
941 		} else if (
942 		    ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
943 		    ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
944 			vlan_putref_linkmib(mib, &psref);
945 			curlwp_bindx(bound);
946 			error = EINVAL;
947 		} else {
948 			vlan_putref_linkmib(mib, &psref);
949 			curlwp_bindx(bound);
950 
951 			error = ifioctl_common(ifp, cmd, data);
952 			if (error == ENETRESET)
953 				error = 0;
954 		}
955 
956 		break;
957 
958 	case SIOCSETVLAN:
959 		if ((error = kauth_authorize_network(l->l_cred,
960 		    KAUTH_NETWORK_INTERFACE,
961 		    KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
962 		    NULL)) != 0)
963 			break;
964 		if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
965 			break;
966 
967 		if (vlr.vlr_parent[0] == '\0') {
968 			bound = curlwp_bind();
969 			mib = vlan_getref_linkmib(ifv, &psref);
970 			if (mib == NULL) {
971 				curlwp_bindx(bound);
972 				error = EBUSY;
973 				break;
974 			}
975 
976 			if (mib->ifvm_p != NULL &&
977 			    (ifp->if_flags & IFF_PROMISC) != 0)
978 				error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
979 
980 			vlan_putref_linkmib(mib, &psref);
981 			curlwp_bindx(bound);
982 
983 			vlan_unconfig(ifp);
984 			break;
985 		}
986 		if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
987 			error = EINVAL;		 /* check for valid tag */
988 			break;
989 		}
990 		if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
991 			error = ENOENT;
992 			break;
993 		}
994 		error = vlan_config(ifv, pr, vlr.vlr_tag);
995 		if (error != 0) {
996 			break;
997 		}
998 
999 		/* Update promiscuous mode, if necessary. */
1000 		vlan_set_promisc(ifp);
1001 
1002 		ifp->if_flags |= IFF_RUNNING;
1003 		break;
1004 
1005 	case SIOCGETVLAN:
1006 		memset(&vlr, 0, sizeof(vlr));
1007 		bound = curlwp_bind();
1008 		mib = vlan_getref_linkmib(ifv, &psref);
1009 		if (mib == NULL) {
1010 			curlwp_bindx(bound);
1011 			error = EBUSY;
1012 			break;
1013 		}
1014 		if (mib->ifvm_p != NULL) {
1015 			snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1016 			    mib->ifvm_p->if_xname);
1017 			vlr.vlr_tag = mib->ifvm_tag;
1018 		}
1019 		vlan_putref_linkmib(mib, &psref);
1020 		curlwp_bindx(bound);
1021 		error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1022 		break;
1023 
1024 	case SIOCSIFFLAGS:
1025 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1026 			break;
1027 		/*
1028 		 * For promiscuous mode, we enable promiscuous mode on
1029 		 * the parent if we need promiscuous on the VLAN interface.
1030 		 */
1031 		bound = curlwp_bind();
1032 		mib = vlan_getref_linkmib(ifv, &psref);
1033 		if (mib == NULL) {
1034 			curlwp_bindx(bound);
1035 			error = EBUSY;
1036 			break;
1037 		}
1038 
1039 		if (mib->ifvm_p != NULL)
1040 			error = vlan_set_promisc(ifp);
1041 		vlan_putref_linkmib(mib, &psref);
1042 		curlwp_bindx(bound);
1043 		break;
1044 
1045 	case SIOCADDMULTI:
1046 		mutex_enter(&ifv->ifv_lock);
1047 		mib = ifv->ifv_mib;
1048 		if (mib == NULL) {
1049 			error = EBUSY;
1050 			mutex_exit(&ifv->ifv_lock);
1051 			break;
1052 		}
1053 
1054 		error = (mib->ifvm_p != NULL) ?
1055 		    (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1056 		mib = NULL;
1057 		mutex_exit(&ifv->ifv_lock);
1058 		break;
1059 
1060 	case SIOCDELMULTI:
1061 		mutex_enter(&ifv->ifv_lock);
1062 		mib = ifv->ifv_mib;
1063 		if (mib == NULL) {
1064 			error = EBUSY;
1065 			mutex_exit(&ifv->ifv_lock);
1066 			break;
1067 		}
1068 		error = (mib->ifvm_p != NULL) ?
1069 		    (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1070 		mib = NULL;
1071 		mutex_exit(&ifv->ifv_lock);
1072 		break;
1073 
1074 	case SIOCSIFCAP:
1075 		ifcr = data;
1076 		/* make sure caps are enabled on parent */
1077 		bound = curlwp_bind();
1078 		mib = vlan_getref_linkmib(ifv, &psref);
1079 		if (mib == NULL) {
1080 			curlwp_bindx(bound);
1081 			error = EBUSY;
1082 			break;
1083 		}
1084 
1085 		if (mib->ifvm_p == NULL) {
1086 			vlan_putref_linkmib(mib, &psref);
1087 			curlwp_bindx(bound);
1088 			error = EINVAL;
1089 			break;
1090 		}
1091 		if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1092 		    ifcr->ifcr_capenable) {
1093 			vlan_putref_linkmib(mib, &psref);
1094 			curlwp_bindx(bound);
1095 			error = EINVAL;
1096 			break;
1097 		}
1098 
1099 		vlan_putref_linkmib(mib, &psref);
1100 		curlwp_bindx(bound);
1101 
1102 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1103 			error = 0;
1104 		break;
1105 	case SIOCINITIFADDR:
1106 		bound = curlwp_bind();
1107 		mib = vlan_getref_linkmib(ifv, &psref);
1108 		if (mib == NULL) {
1109 			curlwp_bindx(bound);
1110 			error = EBUSY;
1111 			break;
1112 		}
1113 
1114 		if (mib->ifvm_p == NULL) {
1115 			error = EINVAL;
1116 			vlan_putref_linkmib(mib, &psref);
1117 			curlwp_bindx(bound);
1118 			break;
1119 		}
1120 		vlan_putref_linkmib(mib, &psref);
1121 		curlwp_bindx(bound);
1122 
1123 		ifp->if_flags |= IFF_UP;
1124 #ifdef INET
1125 		if (ifa->ifa_addr->sa_family == AF_INET)
1126 			arp_ifinit(ifp, ifa);
1127 #endif
1128 		break;
1129 
1130 	default:
1131 		error = ether_ioctl(ifp, cmd, data);
1132 	}
1133 
1134 	return error;
1135 }
1136 
1137 static int
1138 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1139 {
1140 	const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1141 	struct vlan_mc_entry *mc;
1142 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1143 	struct ifvlan_linkmib *mib;
1144 	int error;
1145 
1146 	KASSERT(mutex_owned(&ifv->ifv_lock));
1147 
1148 	if (sa->sa_len > sizeof(struct sockaddr_storage))
1149 		return EINVAL;
1150 
1151 	error = ether_addmulti(sa, &ifv->ifv_ec);
1152 	if (error != ENETRESET)
1153 		return error;
1154 
1155 	/*
1156 	 * This is a new multicast address.  We have to tell parent
1157 	 * about it.  Also, remember this multicast address so that
1158 	 * we can delete it on unconfigure.
1159 	 */
1160 	mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1161 	if (mc == NULL) {
1162 		error = ENOMEM;
1163 		goto alloc_failed;
1164 	}
1165 
1166 	/*
1167 	 * Since ether_addmulti() returned ENETRESET, the following two
1168 	 * statements shouldn't fail. Here ifv_ec is implicitly protected
1169 	 * by the ifv_lock lock.
1170 	 */
1171 	error = ether_multiaddr(sa, addrlo, addrhi);
1172 	KASSERT(error == 0);
1173 	ETHER_LOOKUP_MULTI(addrlo, addrhi, &ifv->ifv_ec, mc->mc_enm);
1174 	KASSERT(mc->mc_enm != NULL);
1175 
1176 	memcpy(&mc->mc_addr, sa, sa->sa_len);
1177 	LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1178 
1179 	mib = ifv->ifv_mib;
1180 
1181 	KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1182 	IFNET_LOCK(mib->ifvm_p);
1183 	error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1184 	IFNET_UNLOCK(mib->ifvm_p);
1185 	KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1186 
1187 	if (error != 0)
1188 		goto ioctl_failed;
1189 	return error;
1190 
1191 ioctl_failed:
1192 	LIST_REMOVE(mc, mc_entries);
1193 	free(mc, M_DEVBUF);
1194 
1195 alloc_failed:
1196 	(void)ether_delmulti(sa, &ifv->ifv_ec);
1197 	return error;
1198 }
1199 
1200 static int
1201 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1202 {
1203 	const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1204 	struct ether_multi *enm;
1205 	struct vlan_mc_entry *mc;
1206 	struct ifvlan_linkmib *mib;
1207 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1208 	int error;
1209 
1210 	KASSERT(mutex_owned(&ifv->ifv_lock));
1211 
1212 	/*
1213 	 * Find a key to lookup vlan_mc_entry.  We have to do this
1214 	 * before calling ether_delmulti for obvious reasons.
1215 	 */
1216 	if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1217 		return error;
1218 	ETHER_LOOKUP_MULTI(addrlo, addrhi, &ifv->ifv_ec, enm);
1219 
1220 	error = ether_delmulti(sa, &ifv->ifv_ec);
1221 	if (error != ENETRESET)
1222 		return error;
1223 
1224 	/* We no longer use this multicast address.  Tell parent so. */
1225 	mib = ifv->ifv_mib;
1226 	IFNET_LOCK(mib->ifvm_p);
1227 	error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1228 	IFNET_UNLOCK(mib->ifvm_p);
1229 
1230 	if (error == 0) {
1231 		/* And forget about this address. */
1232 		for (mc = LIST_FIRST(&ifv->ifv_mc_listhead); mc != NULL;
1233 		    mc = LIST_NEXT(mc, mc_entries)) {
1234 			if (mc->mc_enm == enm) {
1235 				LIST_REMOVE(mc, mc_entries);
1236 				free(mc, M_DEVBUF);
1237 				break;
1238 			}
1239 		}
1240 		KASSERT(mc != NULL);
1241 	} else
1242 		(void)ether_addmulti(sa, &ifv->ifv_ec);
1243 
1244 	return error;
1245 }
1246 
1247 /*
1248  * Delete any multicast address we have asked to add from parent
1249  * interface.  Called when the vlan is being unconfigured.
1250  */
1251 static void
1252 vlan_ether_purgemulti(struct ifvlan *ifv)
1253 {
1254 	struct vlan_mc_entry *mc;
1255 	struct ifvlan_linkmib *mib;
1256 
1257 	KASSERT(mutex_owned(&ifv->ifv_lock));
1258 	mib = ifv->ifv_mib;
1259 	if (mib == NULL) {
1260 		return;
1261 	}
1262 
1263 	while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1264 		IFNET_LOCK(mib->ifvm_p);
1265 		(void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1266 		    (const struct sockaddr *)&mc->mc_addr);
1267 		IFNET_UNLOCK(mib->ifvm_p);
1268 		LIST_REMOVE(mc, mc_entries);
1269 		free(mc, M_DEVBUF);
1270 	}
1271 }
1272 
1273 static void
1274 vlan_start(struct ifnet *ifp)
1275 {
1276 	struct ifvlan *ifv = ifp->if_softc;
1277 	struct ifnet *p;
1278 	struct ethercom *ec;
1279 	struct mbuf *m;
1280 	struct ifvlan_linkmib *mib;
1281 	struct psref psref;
1282 	int error;
1283 
1284 	mib = vlan_getref_linkmib(ifv, &psref);
1285 	if (mib == NULL)
1286 		return;
1287 	p = mib->ifvm_p;
1288 	ec = (void *)mib->ifvm_p;
1289 
1290 	ifp->if_flags |= IFF_OACTIVE;
1291 
1292 	for (;;) {
1293 		IFQ_DEQUEUE(&ifp->if_snd, m);
1294 		if (m == NULL)
1295 			break;
1296 
1297 #ifdef ALTQ
1298 		/*
1299 		 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1300 		 * defined.
1301 		 */
1302 		KERNEL_LOCK(1, NULL);
1303 		/*
1304 		 * If ALTQ is enabled on the parent interface, do
1305 		 * classification; the queueing discipline might
1306 		 * not require classification, but might require
1307 		 * the address family/header pointer in the pktattr.
1308 		 */
1309 		if (ALTQ_IS_ENABLED(&p->if_snd)) {
1310 			switch (p->if_type) {
1311 			case IFT_ETHER:
1312 				altq_etherclassify(&p->if_snd, m);
1313 				break;
1314 			default:
1315 				panic("%s: impossible (altq)", __func__);
1316 			}
1317 		}
1318 		KERNEL_UNLOCK_ONE(NULL);
1319 #endif /* ALTQ */
1320 
1321 		bpf_mtap(ifp, m);
1322 		/*
1323 		 * If the parent can insert the tag itself, just mark
1324 		 * the tag in the mbuf header.
1325 		 */
1326 		if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1327 			vlan_set_tag(m, mib->ifvm_tag);
1328 		} else {
1329 			/*
1330 			 * insert the tag ourselves
1331 			 */
1332 			M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1333 			if (m == NULL) {
1334 				printf("%s: unable to prepend encap header",
1335 				    p->if_xname);
1336 				ifp->if_oerrors++;
1337 				continue;
1338 			}
1339 
1340 			switch (p->if_type) {
1341 			case IFT_ETHER:
1342 			    {
1343 				struct ether_vlan_header *evl;
1344 
1345 				if (m->m_len < sizeof(struct ether_vlan_header))
1346 					m = m_pullup(m,
1347 					    sizeof(struct ether_vlan_header));
1348 				if (m == NULL) {
1349 					printf("%s: unable to pullup encap "
1350 					    "header", p->if_xname);
1351 					ifp->if_oerrors++;
1352 					continue;
1353 				}
1354 
1355 				/*
1356 				 * Transform the Ethernet header into an
1357 				 * Ethernet header with 802.1Q encapsulation.
1358 				 */
1359 				memmove(mtod(m, void *),
1360 				    mtod(m, char *) + mib->ifvm_encaplen,
1361 				    sizeof(struct ether_header));
1362 				evl = mtod(m, struct ether_vlan_header *);
1363 				evl->evl_proto = evl->evl_encap_proto;
1364 				evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1365 				evl->evl_tag = htons(mib->ifvm_tag);
1366 
1367 				/*
1368 				 * To cater for VLAN-aware layer 2 ethernet
1369 				 * switches which may need to strip the tag
1370 				 * before forwarding the packet, make sure
1371 				 * the packet+tag is at least 68 bytes long.
1372 				 * This is necessary because our parent will
1373 				 * only pad to 64 bytes (ETHER_MIN_LEN) and
1374 				 * some switches will not pad by themselves
1375 				 * after deleting a tag.
1376 				 */
1377 				const size_t min_data_len = ETHER_MIN_LEN -
1378 				    ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1379 				if (m->m_pkthdr.len < min_data_len) {
1380 					m_copyback(m, m->m_pkthdr.len,
1381 					    min_data_len - m->m_pkthdr.len,
1382 					    vlan_zero_pad_buff);
1383 				}
1384 				break;
1385 			    }
1386 
1387 			default:
1388 				panic("%s: impossible", __func__);
1389 			}
1390 		}
1391 
1392 		if ((p->if_flags & IFF_RUNNING) == 0) {
1393 			m_freem(m);
1394 			continue;
1395 		}
1396 
1397 		error = if_transmit_lock(p, m);
1398 		if (error) {
1399 			/* mbuf is already freed */
1400 			ifp->if_oerrors++;
1401 			continue;
1402 		}
1403 		ifp->if_opackets++;
1404 	}
1405 
1406 	ifp->if_flags &= ~IFF_OACTIVE;
1407 
1408 	/* Remove reference to mib before release */
1409 	vlan_putref_linkmib(mib, &psref);
1410 }
1411 
1412 static int
1413 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1414 {
1415 	struct ifvlan *ifv = ifp->if_softc;
1416 	struct ifnet *p;
1417 	struct ethercom *ec;
1418 	struct ifvlan_linkmib *mib;
1419 	struct psref psref;
1420 	int error;
1421 	size_t pktlen = m->m_pkthdr.len;
1422 	bool mcast = (m->m_flags & M_MCAST) != 0;
1423 
1424 	mib = vlan_getref_linkmib(ifv, &psref);
1425 	if (mib == NULL) {
1426 		m_freem(m);
1427 		return ENETDOWN;
1428 	}
1429 
1430 	p = mib->ifvm_p;
1431 	ec = (void *)mib->ifvm_p;
1432 
1433 	bpf_mtap(ifp, m);
1434 
1435 	if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1436 		goto out;
1437 	if (m == NULL)
1438 		goto out;
1439 
1440 	/*
1441 	 * If the parent can insert the tag itself, just mark
1442 	 * the tag in the mbuf header.
1443 	 */
1444 	if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1445 		vlan_set_tag(m, mib->ifvm_tag);
1446 	} else {
1447 		/*
1448 		 * insert the tag ourselves
1449 		 */
1450 		M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1451 		if (m == NULL) {
1452 			printf("%s: unable to prepend encap header",
1453 			    p->if_xname);
1454 			ifp->if_oerrors++;
1455 			error = ENOBUFS;
1456 			goto out;
1457 		}
1458 
1459 		switch (p->if_type) {
1460 		case IFT_ETHER:
1461 		    {
1462 			struct ether_vlan_header *evl;
1463 
1464 			if (m->m_len < sizeof(struct ether_vlan_header))
1465 				m = m_pullup(m,
1466 				    sizeof(struct ether_vlan_header));
1467 			if (m == NULL) {
1468 				printf("%s: unable to pullup encap "
1469 				    "header", p->if_xname);
1470 				ifp->if_oerrors++;
1471 				error = ENOBUFS;
1472 				goto out;
1473 			}
1474 
1475 			/*
1476 			 * Transform the Ethernet header into an
1477 			 * Ethernet header with 802.1Q encapsulation.
1478 			 */
1479 			memmove(mtod(m, void *),
1480 			    mtod(m, char *) + mib->ifvm_encaplen,
1481 			    sizeof(struct ether_header));
1482 			evl = mtod(m, struct ether_vlan_header *);
1483 			evl->evl_proto = evl->evl_encap_proto;
1484 			evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1485 			evl->evl_tag = htons(mib->ifvm_tag);
1486 
1487 			/*
1488 			 * To cater for VLAN-aware layer 2 ethernet
1489 			 * switches which may need to strip the tag
1490 			 * before forwarding the packet, make sure
1491 			 * the packet+tag is at least 68 bytes long.
1492 			 * This is necessary because our parent will
1493 			 * only pad to 64 bytes (ETHER_MIN_LEN) and
1494 			 * some switches will not pad by themselves
1495 			 * after deleting a tag.
1496 			 */
1497 			const size_t min_data_len = ETHER_MIN_LEN -
1498 			    ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1499 			if (m->m_pkthdr.len < min_data_len) {
1500 				m_copyback(m, m->m_pkthdr.len,
1501 				    min_data_len - m->m_pkthdr.len,
1502 				    vlan_zero_pad_buff);
1503 			}
1504 			break;
1505 		    }
1506 
1507 		default:
1508 			panic("%s: impossible", __func__);
1509 		}
1510 	}
1511 
1512 	if ((p->if_flags & IFF_RUNNING) == 0) {
1513 		m_freem(m);
1514 		error = ENETDOWN;
1515 		goto out;
1516 	}
1517 
1518 	error = if_transmit_lock(p, m);
1519 	if (error) {
1520 		/* mbuf is already freed */
1521 		ifp->if_oerrors++;
1522 	} else {
1523 
1524 		ifp->if_opackets++;
1525 		ifp->if_obytes += pktlen;
1526 		if (mcast)
1527 			ifp->if_omcasts++;
1528 	}
1529 
1530 out:
1531 	/* Remove reference to mib before release */
1532 	vlan_putref_linkmib(mib, &psref);
1533 	return error;
1534 }
1535 
1536 /*
1537  * Given an Ethernet frame, find a valid vlan interface corresponding to the
1538  * given source interface and tag, then run the real packet through the
1539  * parent's input routine.
1540  */
1541 void
1542 vlan_input(struct ifnet *ifp, struct mbuf *m)
1543 {
1544 	struct ifvlan *ifv;
1545 	uint16_t vid;
1546 	struct ifvlan_linkmib *mib;
1547 	struct psref psref;
1548 	bool have_vtag;
1549 
1550 	have_vtag = vlan_has_tag(m);
1551 	if (have_vtag) {
1552 		vid = EVL_VLANOFTAG(vlan_get_tag(m));
1553 		m->m_flags &= ~M_VLANTAG;
1554 	} else {
1555 		struct ether_vlan_header *evl;
1556 
1557 		if (ifp->if_type != IFT_ETHER) {
1558 			panic("%s: impossible", __func__);
1559 		}
1560 
1561 		if (m->m_len < sizeof(struct ether_vlan_header) &&
1562 		    (m = m_pullup(m,
1563 		     sizeof(struct ether_vlan_header))) == NULL) {
1564 			printf("%s: no memory for VLAN header, "
1565 			    "dropping packet.\n", ifp->if_xname);
1566 			return;
1567 		}
1568 		evl = mtod(m, struct ether_vlan_header *);
1569 		KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1570 
1571 		vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1572 
1573 		/*
1574 		 * Restore the original ethertype.  We'll remove
1575 		 * the encapsulation after we've found the vlan
1576 		 * interface corresponding to the tag.
1577 		 */
1578 		evl->evl_encap_proto = evl->evl_proto;
1579 	}
1580 
1581 	mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1582 	if (mib == NULL) {
1583 		m_freem(m);
1584 		ifp->if_noproto++;
1585 		return;
1586 	}
1587 	KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1588 
1589 	ifv = mib->ifvm_ifvlan;
1590 	if ((ifv->ifv_if.if_flags & (IFF_UP|IFF_RUNNING)) !=
1591 	    (IFF_UP|IFF_RUNNING)) {
1592 		m_freem(m);
1593 		ifp->if_noproto++;
1594 		goto out;
1595 	}
1596 
1597 	/*
1598 	 * Now, remove the encapsulation header.  The original
1599 	 * header has already been fixed up above.
1600 	 */
1601 	if (!have_vtag) {
1602 		memmove(mtod(m, char *) + mib->ifvm_encaplen,
1603 		    mtod(m, void *), sizeof(struct ether_header));
1604 		m_adj(m, mib->ifvm_encaplen);
1605 	}
1606 
1607 	m_set_rcvif(m, &ifv->ifv_if);
1608 	ifv->ifv_if.if_ipackets++;
1609 
1610 	if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1611 		goto out;
1612 	if (m == NULL)
1613 		goto out;
1614 
1615 	m->m_flags &= ~M_PROMISC;
1616 	if_input(&ifv->ifv_if, m);
1617 out:
1618 	vlan_putref_linkmib(mib, &psref);
1619 }
1620 
1621 /*
1622  * Module infrastructure
1623  */
1624 #include "if_module.h"
1625 
1626 IF_MODULE(MODULE_CLASS_DRIVER, vlan, "")
1627