xref: /netbsd-src/sys/net/if_vlan.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /*	$NetBSD: if_vlan.c,v 1.153 2020/09/26 18:38:09 roy Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright 1998 Massachusetts Institute of Technology
34  *
35  * Permission to use, copy, modify, and distribute this software and
36  * its documentation for any purpose and without fee is hereby
37  * granted, provided that both the above copyright notice and this
38  * permission notice appear in all copies, that both the above
39  * copyright notice and this permission notice appear in all
40  * supporting documentation, and that the name of M.I.T. not be used
41  * in advertising or publicity pertaining to distribution of the
42  * software without specific, written prior permission.  M.I.T. makes
43  * no representations about the suitability of this software for any
44  * purpose.  It is provided "as is" without express or implied
45  * warranty.
46  *
47  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
48  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61  * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62  */
63 
64 /*
65  * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.  Might be
66  * extended some day to also handle IEEE 802.1P priority tagging.  This is
67  * sort of sneaky in the implementation, since we need to pretend to be
68  * enough of an Ethernet implementation to make ARP work.  The way we do
69  * this is by telling everyone that we are an Ethernet interface, and then
70  * catch the packets that ether_output() left on our output queue when it
71  * calls if_start(), rewrite them for use by the real outgoing interface,
72  * and ask it to send them.
73  *
74  * TODO:
75  *
76  *	- Need some way to notify vlan interfaces when the parent
77  *	  interface changes MTU.
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.153 2020/09/26 18:38:09 roy Exp $");
82 
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107 
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114 
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #include <netinet6/nd6.h>
123 #endif
124 
125 #include "ioconf.h"
126 
127 struct vlan_mc_entry {
128 	LIST_ENTRY(vlan_mc_entry)	mc_entries;
129 	/*
130 	 * A key to identify this entry.  The mc_addr below can't be
131 	 * used since multiple sockaddr may mapped into the same
132 	 * ether_multi (e.g., AF_UNSPEC).
133 	 */
134 	struct ether_multi	*mc_enm;
135 	struct sockaddr_storage		mc_addr;
136 };
137 
138 struct ifvlan_linkmib {
139 	struct ifvlan *ifvm_ifvlan;
140 	const struct vlan_multisw *ifvm_msw;
141 	int	ifvm_encaplen;	/* encapsulation length */
142 	int	ifvm_mtufudge;	/* MTU fudged by this much */
143 	int	ifvm_mintu;	/* min transmission unit */
144 	uint16_t ifvm_proto;	/* encapsulation ethertype */
145 	uint16_t ifvm_tag;	/* tag to apply on packets */
146 	struct ifnet *ifvm_p;	/* parent interface of this vlan */
147 
148 	struct psref_target ifvm_psref;
149 };
150 
151 struct ifvlan {
152 	struct ethercom ifv_ec;
153 	struct ifvlan_linkmib *ifv_mib;	/*
154 					 * reader must use vlan_getref_linkmib()
155 					 * instead of direct dereference
156 					 */
157 	kmutex_t ifv_lock;		/* writer lock for ifv_mib */
158 	pserialize_t ifv_psz;
159 
160 	LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 	LIST_ENTRY(ifvlan) ifv_list;
162 	struct pslist_entry ifv_hash;
163 	int ifv_flags;
164 };
165 
166 #define	IFVF_PROMISC	0x01		/* promiscuous mode enabled */
167 
168 #define	ifv_if		ifv_ec.ec_if
169 
170 #define	ifv_msw		ifv_mib.ifvm_msw
171 #define	ifv_encaplen	ifv_mib.ifvm_encaplen
172 #define	ifv_mtufudge	ifv_mib.ifvm_mtufudge
173 #define	ifv_mintu	ifv_mib.ifvm_mintu
174 #define	ifv_tag		ifv_mib.ifvm_tag
175 
176 struct vlan_multisw {
177 	int	(*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 	int	(*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 	void	(*vmsw_purgemulti)(struct ifvlan *);
180 };
181 
182 static int	vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183 static int	vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184 static void	vlan_ether_purgemulti(struct ifvlan *);
185 
186 const struct vlan_multisw vlan_ether_multisw = {
187 	.vmsw_addmulti = vlan_ether_addmulti,
188 	.vmsw_delmulti = vlan_ether_delmulti,
189 	.vmsw_purgemulti = vlan_ether_purgemulti,
190 };
191 
192 static int	vlan_clone_create(struct if_clone *, int);
193 static int	vlan_clone_destroy(struct ifnet *);
194 static int	vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195 static int	vlan_ioctl(struct ifnet *, u_long, void *);
196 static void	vlan_start(struct ifnet *);
197 static int	vlan_transmit(struct ifnet *, struct mbuf *);
198 static void	vlan_unconfig(struct ifnet *);
199 static int	vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200 static void	vlan_hash_init(void);
201 static int	vlan_hash_fini(void);
202 static int	vlan_tag_hash(uint16_t, u_long);
203 static struct ifvlan_linkmib*	vlan_getref_linkmib(struct ifvlan *,
204     struct psref *);
205 static void	vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206 static void	vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207 static struct ifvlan_linkmib*	vlan_lookup_tag_psref(struct ifnet *,
208     uint16_t, struct psref *);
209 
210 static struct {
211 	kmutex_t lock;
212 	LIST_HEAD(vlan_ifvlist, ifvlan) list;
213 } ifv_list __cacheline_aligned;
214 
215 
216 #if !defined(VLAN_TAG_HASH_SIZE)
217 #define VLAN_TAG_HASH_SIZE 32
218 #endif
219 static struct {
220 	kmutex_t lock;
221 	struct pslist_head *lists;
222 	u_long mask;
223 } ifv_hash __cacheline_aligned = {
224 	.lists = NULL,
225 	.mask = 0,
226 };
227 
228 pserialize_t vlan_psz __read_mostly;
229 static struct psref_class *ifvm_psref_class __read_mostly;
230 
231 struct if_clone vlan_cloner =
232     IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233 
234 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236 
237 static inline int
238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239 {
240 	int e;
241 
242 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 	e = ifpromisc(ifp, pswitch);
244 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245 
246 	return e;
247 }
248 
249 static inline int
250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251 {
252 	int e;
253 
254 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 	e = ifpromisc_locked(ifp, pswitch);
256 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257 
258 	return e;
259 }
260 
261 void
262 vlanattach(int n)
263 {
264 
265 	/*
266 	 * Nothing to do here, initialization is handled by the
267 	 * module initialization code in vlaninit() below.
268 	 */
269 }
270 
271 static void
272 vlaninit(void)
273 {
274 	mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 	LIST_INIT(&ifv_list.list);
276 
277 	mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 	vlan_psz = pserialize_create();
279 	ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 	if_clone_attach(&vlan_cloner);
281 
282 	vlan_hash_init();
283 	MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284 }
285 
286 static int
287 vlandetach(void)
288 {
289 	bool is_empty;
290 	int error;
291 
292 	mutex_enter(&ifv_list.lock);
293 	is_empty = LIST_EMPTY(&ifv_list.list);
294 	mutex_exit(&ifv_list.lock);
295 
296 	if (!is_empty)
297 		return EBUSY;
298 
299 	error = vlan_hash_fini();
300 	if (error != 0)
301 		return error;
302 
303 	if_clone_detach(&vlan_cloner);
304 	psref_class_destroy(ifvm_psref_class);
305 	pserialize_destroy(vlan_psz);
306 	mutex_destroy(&ifv_hash.lock);
307 	mutex_destroy(&ifv_list.lock);
308 
309 	MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 	return 0;
311 }
312 
313 static void
314 vlan_reset_linkname(struct ifnet *ifp)
315 {
316 
317 	/*
318 	 * We start out with a "802.1Q VLAN" type and zero-length
319 	 * addresses.  When we attach to a parent interface, we
320 	 * inherit its type, address length, address, and data link
321 	 * type.
322 	 */
323 
324 	ifp->if_type = IFT_L2VLAN;
325 	ifp->if_addrlen = 0;
326 	ifp->if_dlt = DLT_NULL;
327 	if_alloc_sadl(ifp);
328 }
329 
330 static int
331 vlan_clone_create(struct if_clone *ifc, int unit)
332 {
333 	struct ifvlan *ifv;
334 	struct ifnet *ifp;
335 	struct ifvlan_linkmib *mib;
336 	int rv;
337 
338 	ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
339 	mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
340 	ifp = &ifv->ifv_if;
341 	LIST_INIT(&ifv->ifv_mc_listhead);
342 
343 	mib->ifvm_ifvlan = ifv;
344 	mib->ifvm_p = NULL;
345 	psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
346 
347 	mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
348 	ifv->ifv_psz = pserialize_create();
349 	ifv->ifv_mib = mib;
350 
351 	mutex_enter(&ifv_list.lock);
352 	LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
353 	mutex_exit(&ifv_list.lock);
354 
355 	if_initname(ifp, ifc->ifc_name, unit);
356 	ifp->if_softc = ifv;
357 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358 #ifdef NET_MPSAFE
359 	ifp->if_extflags = IFEF_MPSAFE;
360 #endif
361 	ifp->if_start = vlan_start;
362 	ifp->if_transmit = vlan_transmit;
363 	ifp->if_ioctl = vlan_ioctl;
364 	IFQ_SET_READY(&ifp->if_snd);
365 
366 	rv = if_initialize(ifp);
367 	if (rv != 0) {
368 		aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
369 		    rv);
370 		goto fail;
371 	}
372 
373 	/*
374 	 * Set the link state to down.
375 	 * When the parent interface attaches we will use that link state.
376 	 * When the parent interface link state changes, so will ours.
377 	 */
378 	ifp->if_link_state = LINK_STATE_DOWN;
379 
380 	vlan_reset_linkname(ifp);
381 	if_register(ifp);
382 	return 0;
383 
384 fail:
385 	mutex_enter(&ifv_list.lock);
386 	LIST_REMOVE(ifv, ifv_list);
387 	mutex_exit(&ifv_list.lock);
388 
389 	mutex_destroy(&ifv->ifv_lock);
390 	psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
391 	kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
392 	free(ifv, M_DEVBUF);
393 
394 	return rv;
395 }
396 
397 static int
398 vlan_clone_destroy(struct ifnet *ifp)
399 {
400 	struct ifvlan *ifv = ifp->if_softc;
401 
402 	mutex_enter(&ifv_list.lock);
403 	LIST_REMOVE(ifv, ifv_list);
404 	mutex_exit(&ifv_list.lock);
405 
406 	IFNET_LOCK(ifp);
407 	vlan_unconfig(ifp);
408 	IFNET_UNLOCK(ifp);
409 	if_detach(ifp);
410 
411 	psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
412 	kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
413 	pserialize_destroy(ifv->ifv_psz);
414 	mutex_destroy(&ifv->ifv_lock);
415 	free(ifv, M_DEVBUF);
416 
417 	return 0;
418 }
419 
420 /*
421  * Configure a VLAN interface.
422  */
423 static int
424 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
425 {
426 	struct ifnet *ifp = &ifv->ifv_if;
427 	struct ifvlan_linkmib *nmib = NULL;
428 	struct ifvlan_linkmib *omib = NULL;
429 	struct ifvlan_linkmib *checkmib;
430 	struct psref_target *nmib_psref = NULL;
431 	const uint16_t vid = EVL_VLANOFTAG(tag);
432 	int error = 0;
433 	int idx;
434 	bool omib_cleanup = false;
435 	struct psref psref;
436 
437 	/* VLAN ID 0 and 4095 are reserved in the spec */
438 	if ((vid == 0) || (vid == 0xfff))
439 		return EINVAL;
440 
441 	nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
442 	mutex_enter(&ifv->ifv_lock);
443 	omib = ifv->ifv_mib;
444 
445 	if (omib->ifvm_p != NULL) {
446 		error = EBUSY;
447 		goto done;
448 	}
449 
450 	/* Duplicate check */
451 	checkmib = vlan_lookup_tag_psref(p, vid, &psref);
452 	if (checkmib != NULL) {
453 		vlan_putref_linkmib(checkmib, &psref);
454 		error = EEXIST;
455 		goto done;
456 	}
457 
458 	*nmib = *omib;
459 	nmib_psref = &nmib->ifvm_psref;
460 
461 	psref_target_init(nmib_psref, ifvm_psref_class);
462 
463 	switch (p->if_type) {
464 	case IFT_ETHER:
465 	    {
466 		struct ethercom *ec = (void *)p;
467 		struct vlanid_list *vidmem;
468 
469 		nmib->ifvm_msw = &vlan_ether_multisw;
470 		nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
471 		nmib->ifvm_mintu = ETHERMIN;
472 
473 		if (ec->ec_nvlans++ == 0) {
474 			IFNET_LOCK(p);
475 			error = ether_enable_vlan_mtu(p);
476 			IFNET_UNLOCK(p);
477 			if (error >= 0) {
478 				if (error) {
479 					ec->ec_nvlans--;
480 					goto done;
481 				}
482 				nmib->ifvm_mtufudge = 0;
483 			} else {
484 				/*
485 				 * Fudge the MTU by the encapsulation size. This
486 				 * makes us incompatible with strictly compliant
487 				 * 802.1Q implementations, but allows us to use
488 				 * the feature with other NetBSD
489 				 * implementations, which might still be useful.
490 				 */
491 				nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
492 			}
493 			error = 0;
494 		}
495 		/* Add a vid to the list */
496 		vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
497 		vidmem->vid = vid;
498 		ETHER_LOCK(ec);
499 		SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
500 		ETHER_UNLOCK(ec);
501 
502 		if (ec->ec_vlan_cb != NULL) {
503 			/*
504 			 * Call ec_vlan_cb(). It will setup VLAN HW filter or
505 			 * HW tagging function.
506 			 */
507 			error = (*ec->ec_vlan_cb)(ec, vid, true);
508 			if (error) {
509 				ec->ec_nvlans--;
510 				if (ec->ec_nvlans == 0) {
511 					IFNET_LOCK(p);
512 					(void)ether_disable_vlan_mtu(p);
513 					IFNET_UNLOCK(p);
514 				}
515 				goto done;
516 			}
517 		}
518 		/*
519 		 * If the parent interface can do hardware-assisted
520 		 * VLAN encapsulation, then propagate its hardware-
521 		 * assisted checksumming flags and tcp segmentation
522 		 * offload.
523 		 */
524 		if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
525 			ifp->if_capabilities = p->if_capabilities &
526 			    (IFCAP_TSOv4 | IFCAP_TSOv6 |
527 				IFCAP_CSUM_IPv4_Tx  | IFCAP_CSUM_IPv4_Rx |
528 				IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
529 				IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
530 				IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
531 				IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
532 		}
533 
534 		/*
535 		 * We inherit the parent's Ethernet address.
536 		 */
537 		ether_ifattach(ifp, CLLADDR(p->if_sadl));
538 		ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
539 		break;
540 	    }
541 
542 	default:
543 		error = EPROTONOSUPPORT;
544 		goto done;
545 	}
546 
547 	nmib->ifvm_p = p;
548 	nmib->ifvm_tag = vid;
549 	ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
550 	ifv->ifv_if.if_flags = p->if_flags &
551 	    (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
552 
553 	/*
554 	 * Inherit the if_type from the parent.  This allows us
555 	 * to participate in bridges of that type.
556 	 */
557 	ifv->ifv_if.if_type = p->if_type;
558 
559 	PSLIST_ENTRY_INIT(ifv, ifv_hash);
560 	idx = vlan_tag_hash(vid, ifv_hash.mask);
561 
562 	mutex_enter(&ifv_hash.lock);
563 	PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
564 	mutex_exit(&ifv_hash.lock);
565 
566 	vlan_linkmib_update(ifv, nmib);
567 	nmib = NULL;
568 	nmib_psref = NULL;
569 	omib_cleanup = true;
570 
571 
572 	/*
573 	 * We inherit the parents link state.
574 	 */
575 	if_link_state_change(&ifv->ifv_if, p->if_link_state);
576 
577 done:
578 	mutex_exit(&ifv->ifv_lock);
579 
580 	if (nmib_psref)
581 		psref_target_destroy(nmib_psref, ifvm_psref_class);
582 	if (nmib)
583 		kmem_free(nmib, sizeof(*nmib));
584 	if (omib_cleanup)
585 		kmem_free(omib, sizeof(*omib));
586 
587 	return error;
588 }
589 
590 /*
591  * Unconfigure a VLAN interface.
592  */
593 static void
594 vlan_unconfig(struct ifnet *ifp)
595 {
596 	struct ifvlan *ifv = ifp->if_softc;
597 	struct ifvlan_linkmib *nmib = NULL;
598 	int error;
599 
600 	KASSERT(IFNET_LOCKED(ifp));
601 
602 	nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
603 
604 	mutex_enter(&ifv->ifv_lock);
605 	error = vlan_unconfig_locked(ifv, nmib);
606 	mutex_exit(&ifv->ifv_lock);
607 
608 	if (error)
609 		kmem_free(nmib, sizeof(*nmib));
610 }
611 static int
612 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
613 {
614 	struct ifnet *p;
615 	struct ifnet *ifp = &ifv->ifv_if;
616 	struct psref_target *nmib_psref = NULL;
617 	struct ifvlan_linkmib *omib;
618 	int error = 0;
619 
620 	KASSERT(IFNET_LOCKED(ifp));
621 	KASSERT(mutex_owned(&ifv->ifv_lock));
622 
623 	ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
624 
625 	omib = ifv->ifv_mib;
626 	p = omib->ifvm_p;
627 
628 	if (p == NULL) {
629 		error = -1;
630 		goto done;
631 	}
632 
633 	*nmib = *omib;
634 	nmib_psref = &nmib->ifvm_psref;
635 	psref_target_init(nmib_psref, ifvm_psref_class);
636 
637 	/*
638 	 * Since the interface is being unconfigured, we need to empty the
639 	 * list of multicast groups that we may have joined while we were
640 	 * alive and remove them from the parent's list also.
641 	 */
642 	(*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
643 
644 	/* Disconnect from parent. */
645 	switch (p->if_type) {
646 	case IFT_ETHER:
647 	    {
648 		struct ethercom *ec = (void *)p;
649 		struct vlanid_list *vlanidp;
650 		uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
651 
652 		ETHER_LOCK(ec);
653 		SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
654 			if (vlanidp->vid == vid) {
655 				SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
656 				    vlanid_list, vid_list);
657 				break;
658 			}
659 		}
660 		ETHER_UNLOCK(ec);
661 		if (vlanidp != NULL)
662 			kmem_free(vlanidp, sizeof(*vlanidp));
663 
664 		if (ec->ec_vlan_cb != NULL) {
665 			/*
666 			 * Call ec_vlan_cb(). It will setup VLAN HW filter or
667 			 * HW tagging function.
668 			 */
669 			(void)(*ec->ec_vlan_cb)(ec, vid, false);
670 		}
671 		if (--ec->ec_nvlans == 0) {
672 			IFNET_LOCK(p);
673 			(void)ether_disable_vlan_mtu(p);
674 			IFNET_UNLOCK(p);
675 		}
676 
677 		/* XXX ether_ifdetach must not be called with IFNET_LOCK */
678 		mutex_exit(&ifv->ifv_lock);
679 		IFNET_UNLOCK(ifp);
680 		ether_ifdetach(ifp);
681 		IFNET_LOCK(ifp);
682 		mutex_enter(&ifv->ifv_lock);
683 
684 		/* if_free_sadl must be called with IFNET_LOCK */
685 		if_free_sadl(ifp, 1);
686 
687 		/* Restore vlan_ioctl overwritten by ether_ifdetach */
688 		ifp->if_ioctl = vlan_ioctl;
689 		vlan_reset_linkname(ifp);
690 		break;
691 	    }
692 
693 	default:
694 		panic("%s: impossible", __func__);
695 	}
696 
697 	nmib->ifvm_p = NULL;
698 	ifv->ifv_if.if_mtu = 0;
699 	ifv->ifv_flags = 0;
700 
701 	mutex_enter(&ifv_hash.lock);
702 	PSLIST_WRITER_REMOVE(ifv, ifv_hash);
703 	pserialize_perform(vlan_psz);
704 	mutex_exit(&ifv_hash.lock);
705 	PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
706 
707 	vlan_linkmib_update(ifv, nmib);
708 
709 	mutex_exit(&ifv->ifv_lock);
710 
711 	nmib_psref = NULL;
712 	kmem_free(omib, sizeof(*omib));
713 
714 #ifdef INET6
715 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
716 	/* To delete v6 link local addresses */
717 	if (in6_present)
718 		in6_ifdetach(ifp);
719 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
720 #endif
721 
722 	if ((ifp->if_flags & IFF_PROMISC) != 0)
723 		vlan_safe_ifpromisc_locked(ifp, 0);
724 	if_down_locked(ifp);
725 	ifp->if_capabilities = 0;
726 	mutex_enter(&ifv->ifv_lock);
727 done:
728 
729 	if (nmib_psref)
730 		psref_target_destroy(nmib_psref, ifvm_psref_class);
731 
732 	return error;
733 }
734 
735 static void
736 vlan_hash_init(void)
737 {
738 
739 	ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
740 	    &ifv_hash.mask);
741 }
742 
743 static int
744 vlan_hash_fini(void)
745 {
746 	int i;
747 
748 	mutex_enter(&ifv_hash.lock);
749 
750 	for (i = 0; i < ifv_hash.mask + 1; i++) {
751 		if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
752 		    ifv_hash) != NULL) {
753 			mutex_exit(&ifv_hash.lock);
754 			return EBUSY;
755 		}
756 	}
757 
758 	for (i = 0; i < ifv_hash.mask + 1; i++)
759 		PSLIST_DESTROY(&ifv_hash.lists[i]);
760 
761 	mutex_exit(&ifv_hash.lock);
762 
763 	hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
764 
765 	ifv_hash.lists = NULL;
766 	ifv_hash.mask = 0;
767 
768 	return 0;
769 }
770 
771 static int
772 vlan_tag_hash(uint16_t tag, u_long mask)
773 {
774 	uint32_t hash;
775 
776 	hash = (tag >> 8) ^ tag;
777 	hash = (hash >> 2) ^ hash;
778 
779 	return hash & mask;
780 }
781 
782 static struct ifvlan_linkmib *
783 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
784 {
785 	struct ifvlan_linkmib *mib;
786 	int s;
787 
788 	s = pserialize_read_enter();
789 	mib = atomic_load_consume(&sc->ifv_mib);
790 	if (mib == NULL) {
791 		pserialize_read_exit(s);
792 		return NULL;
793 	}
794 	psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
795 	pserialize_read_exit(s);
796 
797 	return mib;
798 }
799 
800 static void
801 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
802 {
803 	if (mib == NULL)
804 		return;
805 	psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
806 }
807 
808 static struct ifvlan_linkmib *
809 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
810 {
811 	int idx;
812 	int s;
813 	struct ifvlan *sc;
814 
815 	idx = vlan_tag_hash(tag, ifv_hash.mask);
816 
817 	s = pserialize_read_enter();
818 	PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
819 	    ifv_hash) {
820 		struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
821 		if (mib == NULL)
822 			continue;
823 		if (mib->ifvm_tag != tag)
824 			continue;
825 		if (mib->ifvm_p != ifp)
826 			continue;
827 
828 		psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
829 		pserialize_read_exit(s);
830 		return mib;
831 	}
832 	pserialize_read_exit(s);
833 	return NULL;
834 }
835 
836 static void
837 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
838 {
839 	struct ifvlan_linkmib *omib = ifv->ifv_mib;
840 
841 	KASSERT(mutex_owned(&ifv->ifv_lock));
842 
843 	atomic_store_release(&ifv->ifv_mib, nmib);
844 
845 	pserialize_perform(ifv->ifv_psz);
846 	psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
847 }
848 
849 /*
850  * Called when a parent interface is detaching; destroy any VLAN
851  * configuration for the parent interface.
852  */
853 void
854 vlan_ifdetach(struct ifnet *p)
855 {
856 	struct ifvlan *ifv;
857 	struct ifvlan_linkmib *mib, **nmibs;
858 	struct psref psref;
859 	int error;
860 	int bound;
861 	int i, cnt = 0;
862 
863 	bound = curlwp_bind();
864 
865 	mutex_enter(&ifv_list.lock);
866 	LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
867 		mib = vlan_getref_linkmib(ifv, &psref);
868 		if (mib == NULL)
869 			continue;
870 
871 		if (mib->ifvm_p == p)
872 			cnt++;
873 
874 		vlan_putref_linkmib(mib, &psref);
875 	}
876 	mutex_exit(&ifv_list.lock);
877 
878 	if (cnt == 0) {
879 		curlwp_bindx(bound);
880 		return;
881 	}
882 
883 	/*
884 	 * The value of "cnt" does not increase while ifv_list.lock
885 	 * and ifv->ifv_lock are released here, because the parent
886 	 * interface is detaching.
887 	 */
888 	nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
889 	for (i = 0; i < cnt; i++) {
890 		nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
891 	}
892 
893 	mutex_enter(&ifv_list.lock);
894 
895 	i = 0;
896 	LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
897 		struct ifnet *ifp = &ifv->ifv_if;
898 
899 		/* IFNET_LOCK must be held before ifv_lock. */
900 		IFNET_LOCK(ifp);
901 		mutex_enter(&ifv->ifv_lock);
902 
903 		/* XXX ifv_mib = NULL? */
904 		if (ifv->ifv_mib->ifvm_p == p) {
905 			KASSERTMSG(i < cnt,
906 			    "no memory for unconfig, parent=%s", p->if_xname);
907 			error = vlan_unconfig_locked(ifv, nmibs[i]);
908 			if (!error) {
909 				nmibs[i] = NULL;
910 				i++;
911 			}
912 
913 		}
914 
915 		mutex_exit(&ifv->ifv_lock);
916 		IFNET_UNLOCK(ifp);
917 	}
918 
919 	mutex_exit(&ifv_list.lock);
920 
921 	curlwp_bindx(bound);
922 
923 	for (i = 0; i < cnt; i++) {
924 		if (nmibs[i])
925 			kmem_free(nmibs[i], sizeof(*nmibs[i]));
926 	}
927 
928 	kmem_free(nmibs, sizeof(*nmibs) * cnt);
929 
930 	return;
931 }
932 
933 static int
934 vlan_set_promisc(struct ifnet *ifp)
935 {
936 	struct ifvlan *ifv = ifp->if_softc;
937 	struct ifvlan_linkmib *mib;
938 	struct psref psref;
939 	int error = 0;
940 	int bound;
941 
942 	bound = curlwp_bind();
943 	mib = vlan_getref_linkmib(ifv, &psref);
944 	if (mib == NULL) {
945 		curlwp_bindx(bound);
946 		return EBUSY;
947 	}
948 
949 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
950 		if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
951 			error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
952 			if (error == 0)
953 				ifv->ifv_flags |= IFVF_PROMISC;
954 		}
955 	} else {
956 		if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
957 			error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
958 			if (error == 0)
959 				ifv->ifv_flags &= ~IFVF_PROMISC;
960 		}
961 	}
962 	vlan_putref_linkmib(mib, &psref);
963 	curlwp_bindx(bound);
964 
965 	return error;
966 }
967 
968 static int
969 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
970 {
971 	struct lwp *l = curlwp;
972 	struct ifvlan *ifv = ifp->if_softc;
973 	struct ifaddr *ifa = (struct ifaddr *) data;
974 	struct ifreq *ifr = (struct ifreq *) data;
975 	struct ifnet *pr;
976 	struct ifcapreq *ifcr;
977 	struct vlanreq vlr;
978 	struct ifvlan_linkmib *mib;
979 	struct psref psref;
980 	int error = 0;
981 	int bound;
982 
983 	switch (cmd) {
984 	case SIOCSIFMTU:
985 		bound = curlwp_bind();
986 		mib = vlan_getref_linkmib(ifv, &psref);
987 		if (mib == NULL) {
988 			curlwp_bindx(bound);
989 			error = EBUSY;
990 			break;
991 		}
992 
993 		if (mib->ifvm_p == NULL) {
994 			vlan_putref_linkmib(mib, &psref);
995 			curlwp_bindx(bound);
996 			error = EINVAL;
997 		} else if (
998 		    ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
999 		    ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
1000 			vlan_putref_linkmib(mib, &psref);
1001 			curlwp_bindx(bound);
1002 			error = EINVAL;
1003 		} else {
1004 			vlan_putref_linkmib(mib, &psref);
1005 			curlwp_bindx(bound);
1006 
1007 			error = ifioctl_common(ifp, cmd, data);
1008 			if (error == ENETRESET)
1009 				error = 0;
1010 		}
1011 
1012 		break;
1013 
1014 	case SIOCSETVLAN:
1015 		if ((error = kauth_authorize_network(l->l_cred,
1016 		    KAUTH_NETWORK_INTERFACE,
1017 		    KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1018 		    NULL)) != 0)
1019 			break;
1020 		if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1021 			break;
1022 
1023 		if (vlr.vlr_parent[0] == '\0') {
1024 			bound = curlwp_bind();
1025 			mib = vlan_getref_linkmib(ifv, &psref);
1026 			if (mib == NULL) {
1027 				curlwp_bindx(bound);
1028 				error = EBUSY;
1029 				break;
1030 			}
1031 
1032 			if (mib->ifvm_p != NULL &&
1033 			    (ifp->if_flags & IFF_PROMISC) != 0)
1034 				error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1035 
1036 			vlan_putref_linkmib(mib, &psref);
1037 			curlwp_bindx(bound);
1038 
1039 			vlan_unconfig(ifp);
1040 			break;
1041 		}
1042 		if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1043 			error = EINVAL;		 /* check for valid tag */
1044 			break;
1045 		}
1046 		if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1047 			error = ENOENT;
1048 			break;
1049 		}
1050 
1051 		error = vlan_config(ifv, pr, vlr.vlr_tag);
1052 		if (error != 0)
1053 			break;
1054 
1055 		/* Update promiscuous mode, if necessary. */
1056 		vlan_set_promisc(ifp);
1057 
1058 		ifp->if_flags |= IFF_RUNNING;
1059 		break;
1060 
1061 	case SIOCGETVLAN:
1062 		memset(&vlr, 0, sizeof(vlr));
1063 		bound = curlwp_bind();
1064 		mib = vlan_getref_linkmib(ifv, &psref);
1065 		if (mib == NULL) {
1066 			curlwp_bindx(bound);
1067 			error = EBUSY;
1068 			break;
1069 		}
1070 		if (mib->ifvm_p != NULL) {
1071 			snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1072 			    mib->ifvm_p->if_xname);
1073 			vlr.vlr_tag = mib->ifvm_tag;
1074 		}
1075 		vlan_putref_linkmib(mib, &psref);
1076 		curlwp_bindx(bound);
1077 		error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1078 		break;
1079 
1080 	case SIOCSIFFLAGS:
1081 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1082 			break;
1083 		/*
1084 		 * For promiscuous mode, we enable promiscuous mode on
1085 		 * the parent if we need promiscuous on the VLAN interface.
1086 		 */
1087 		bound = curlwp_bind();
1088 		mib = vlan_getref_linkmib(ifv, &psref);
1089 		if (mib == NULL) {
1090 			curlwp_bindx(bound);
1091 			error = EBUSY;
1092 			break;
1093 		}
1094 
1095 		if (mib->ifvm_p != NULL)
1096 			error = vlan_set_promisc(ifp);
1097 		vlan_putref_linkmib(mib, &psref);
1098 		curlwp_bindx(bound);
1099 		break;
1100 
1101 	case SIOCADDMULTI:
1102 		mutex_enter(&ifv->ifv_lock);
1103 		mib = ifv->ifv_mib;
1104 		if (mib == NULL) {
1105 			error = EBUSY;
1106 			mutex_exit(&ifv->ifv_lock);
1107 			break;
1108 		}
1109 
1110 		error = (mib->ifvm_p != NULL) ?
1111 		    (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1112 		mib = NULL;
1113 		mutex_exit(&ifv->ifv_lock);
1114 		break;
1115 
1116 	case SIOCDELMULTI:
1117 		mutex_enter(&ifv->ifv_lock);
1118 		mib = ifv->ifv_mib;
1119 		if (mib == NULL) {
1120 			error = EBUSY;
1121 			mutex_exit(&ifv->ifv_lock);
1122 			break;
1123 		}
1124 		error = (mib->ifvm_p != NULL) ?
1125 		    (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1126 		mib = NULL;
1127 		mutex_exit(&ifv->ifv_lock);
1128 		break;
1129 
1130 	case SIOCSIFCAP:
1131 		ifcr = data;
1132 		/* make sure caps are enabled on parent */
1133 		bound = curlwp_bind();
1134 		mib = vlan_getref_linkmib(ifv, &psref);
1135 		if (mib == NULL) {
1136 			curlwp_bindx(bound);
1137 			error = EBUSY;
1138 			break;
1139 		}
1140 
1141 		if (mib->ifvm_p == NULL) {
1142 			vlan_putref_linkmib(mib, &psref);
1143 			curlwp_bindx(bound);
1144 			error = EINVAL;
1145 			break;
1146 		}
1147 		if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1148 		    ifcr->ifcr_capenable) {
1149 			vlan_putref_linkmib(mib, &psref);
1150 			curlwp_bindx(bound);
1151 			error = EINVAL;
1152 			break;
1153 		}
1154 
1155 		vlan_putref_linkmib(mib, &psref);
1156 		curlwp_bindx(bound);
1157 
1158 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1159 			error = 0;
1160 		break;
1161 	case SIOCINITIFADDR:
1162 		bound = curlwp_bind();
1163 		mib = vlan_getref_linkmib(ifv, &psref);
1164 		if (mib == NULL) {
1165 			curlwp_bindx(bound);
1166 			error = EBUSY;
1167 			break;
1168 		}
1169 
1170 		if (mib->ifvm_p == NULL) {
1171 			error = EINVAL;
1172 			vlan_putref_linkmib(mib, &psref);
1173 			curlwp_bindx(bound);
1174 			break;
1175 		}
1176 		vlan_putref_linkmib(mib, &psref);
1177 		curlwp_bindx(bound);
1178 
1179 		ifp->if_flags |= IFF_UP;
1180 #ifdef INET
1181 		if (ifa->ifa_addr->sa_family == AF_INET)
1182 			arp_ifinit(ifp, ifa);
1183 #endif
1184 		break;
1185 
1186 	default:
1187 		error = ether_ioctl(ifp, cmd, data);
1188 	}
1189 
1190 	return error;
1191 }
1192 
1193 static int
1194 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1195 {
1196 	const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1197 	struct vlan_mc_entry *mc;
1198 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1199 	struct ifvlan_linkmib *mib;
1200 	int error;
1201 
1202 	KASSERT(mutex_owned(&ifv->ifv_lock));
1203 
1204 	if (sa->sa_len > sizeof(struct sockaddr_storage))
1205 		return EINVAL;
1206 
1207 	error = ether_addmulti(sa, &ifv->ifv_ec);
1208 	if (error != ENETRESET)
1209 		return error;
1210 
1211 	/*
1212 	 * This is a new multicast address.  We have to tell parent
1213 	 * about it.  Also, remember this multicast address so that
1214 	 * we can delete it on unconfigure.
1215 	 */
1216 	mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1217 	if (mc == NULL) {
1218 		error = ENOMEM;
1219 		goto alloc_failed;
1220 	}
1221 
1222 	/*
1223 	 * Since ether_addmulti() returned ENETRESET, the following two
1224 	 * statements shouldn't fail. Here ifv_ec is implicitly protected
1225 	 * by the ifv_lock lock.
1226 	 */
1227 	error = ether_multiaddr(sa, addrlo, addrhi);
1228 	KASSERT(error == 0);
1229 
1230 	ETHER_LOCK(&ifv->ifv_ec);
1231 	mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1232 	ETHER_UNLOCK(&ifv->ifv_ec);
1233 
1234 	KASSERT(mc->mc_enm != NULL);
1235 
1236 	memcpy(&mc->mc_addr, sa, sa->sa_len);
1237 	LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1238 
1239 	mib = ifv->ifv_mib;
1240 
1241 	KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1242 	error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1243 	KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1244 
1245 	if (error != 0)
1246 		goto ioctl_failed;
1247 	return error;
1248 
1249 ioctl_failed:
1250 	LIST_REMOVE(mc, mc_entries);
1251 	free(mc, M_DEVBUF);
1252 
1253 alloc_failed:
1254 	(void)ether_delmulti(sa, &ifv->ifv_ec);
1255 	return error;
1256 }
1257 
1258 static int
1259 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1260 {
1261 	const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1262 	struct ether_multi *enm;
1263 	struct vlan_mc_entry *mc;
1264 	struct ifvlan_linkmib *mib;
1265 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1266 	int error;
1267 
1268 	KASSERT(mutex_owned(&ifv->ifv_lock));
1269 
1270 	/*
1271 	 * Find a key to lookup vlan_mc_entry.  We have to do this
1272 	 * before calling ether_delmulti for obvious reasons.
1273 	 */
1274 	if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1275 		return error;
1276 
1277 	ETHER_LOCK(&ifv->ifv_ec);
1278 	enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1279 	ETHER_UNLOCK(&ifv->ifv_ec);
1280 	if (enm == NULL)
1281 		return EINVAL;
1282 
1283 	LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1284 		if (mc->mc_enm == enm)
1285 			break;
1286 	}
1287 
1288 	/* We woun't delete entries we didn't add */
1289 	if (mc == NULL)
1290 		return EINVAL;
1291 
1292 	error = ether_delmulti(sa, &ifv->ifv_ec);
1293 	if (error != ENETRESET)
1294 		return error;
1295 
1296 	/* We no longer use this multicast address.  Tell parent so. */
1297 	mib = ifv->ifv_mib;
1298 	error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1299 
1300 	if (error == 0) {
1301 		/* And forget about this address. */
1302 		LIST_REMOVE(mc, mc_entries);
1303 		free(mc, M_DEVBUF);
1304 	} else {
1305 		(void)ether_addmulti(sa, &ifv->ifv_ec);
1306 	}
1307 
1308 	return error;
1309 }
1310 
1311 /*
1312  * Delete any multicast address we have asked to add from parent
1313  * interface.  Called when the vlan is being unconfigured.
1314  */
1315 static void
1316 vlan_ether_purgemulti(struct ifvlan *ifv)
1317 {
1318 	struct vlan_mc_entry *mc;
1319 	struct ifvlan_linkmib *mib;
1320 
1321 	KASSERT(mutex_owned(&ifv->ifv_lock));
1322 	mib = ifv->ifv_mib;
1323 	if (mib == NULL) {
1324 		return;
1325 	}
1326 
1327 	while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1328 		(void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1329 		    sstocsa(&mc->mc_addr));
1330 		LIST_REMOVE(mc, mc_entries);
1331 		free(mc, M_DEVBUF);
1332 	}
1333 }
1334 
1335 static void
1336 vlan_start(struct ifnet *ifp)
1337 {
1338 	struct ifvlan *ifv = ifp->if_softc;
1339 	struct ifnet *p;
1340 	struct ethercom *ec;
1341 	struct mbuf *m;
1342 	struct ifvlan_linkmib *mib;
1343 	struct psref psref;
1344 	int error;
1345 
1346 	mib = vlan_getref_linkmib(ifv, &psref);
1347 	if (mib == NULL)
1348 		return;
1349 	p = mib->ifvm_p;
1350 	ec = (void *)mib->ifvm_p;
1351 
1352 	ifp->if_flags |= IFF_OACTIVE;
1353 
1354 	for (;;) {
1355 		IFQ_DEQUEUE(&ifp->if_snd, m);
1356 		if (m == NULL)
1357 			break;
1358 
1359 #ifdef ALTQ
1360 		/*
1361 		 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1362 		 * defined.
1363 		 */
1364 		KERNEL_LOCK(1, NULL);
1365 		/*
1366 		 * If ALTQ is enabled on the parent interface, do
1367 		 * classification; the queueing discipline might
1368 		 * not require classification, but might require
1369 		 * the address family/header pointer in the pktattr.
1370 		 */
1371 		if (ALTQ_IS_ENABLED(&p->if_snd)) {
1372 			switch (p->if_type) {
1373 			case IFT_ETHER:
1374 				altq_etherclassify(&p->if_snd, m);
1375 				break;
1376 			default:
1377 				panic("%s: impossible (altq)", __func__);
1378 			}
1379 		}
1380 		KERNEL_UNLOCK_ONE(NULL);
1381 #endif /* ALTQ */
1382 
1383 		bpf_mtap(ifp, m, BPF_D_OUT);
1384 		/*
1385 		 * If the parent can insert the tag itself, just mark
1386 		 * the tag in the mbuf header.
1387 		 */
1388 		if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1389 			vlan_set_tag(m, mib->ifvm_tag);
1390 		} else {
1391 			/*
1392 			 * insert the tag ourselves
1393 			 */
1394 			M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1395 			if (m == NULL) {
1396 				printf("%s: unable to prepend encap header",
1397 				    p->if_xname);
1398 				if_statinc(ifp, if_oerrors);
1399 				continue;
1400 			}
1401 
1402 			switch (p->if_type) {
1403 			case IFT_ETHER:
1404 			    {
1405 				struct ether_vlan_header *evl;
1406 
1407 				if (m->m_len < sizeof(struct ether_vlan_header))
1408 					m = m_pullup(m,
1409 					    sizeof(struct ether_vlan_header));
1410 				if (m == NULL) {
1411 					printf("%s: unable to pullup encap "
1412 					    "header", p->if_xname);
1413 					if_statinc(ifp, if_oerrors);
1414 					continue;
1415 				}
1416 
1417 				/*
1418 				 * Transform the Ethernet header into an
1419 				 * Ethernet header with 802.1Q encapsulation.
1420 				 */
1421 				memmove(mtod(m, void *),
1422 				    mtod(m, char *) + mib->ifvm_encaplen,
1423 				    sizeof(struct ether_header));
1424 				evl = mtod(m, struct ether_vlan_header *);
1425 				evl->evl_proto = evl->evl_encap_proto;
1426 				evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1427 				evl->evl_tag = htons(mib->ifvm_tag);
1428 
1429 				/*
1430 				 * To cater for VLAN-aware layer 2 ethernet
1431 				 * switches which may need to strip the tag
1432 				 * before forwarding the packet, make sure
1433 				 * the packet+tag is at least 68 bytes long.
1434 				 * This is necessary because our parent will
1435 				 * only pad to 64 bytes (ETHER_MIN_LEN) and
1436 				 * some switches will not pad by themselves
1437 				 * after deleting a tag.
1438 				 */
1439 				const size_t min_data_len = ETHER_MIN_LEN -
1440 				    ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1441 				if (m->m_pkthdr.len < min_data_len) {
1442 					m_copyback(m, m->m_pkthdr.len,
1443 					    min_data_len - m->m_pkthdr.len,
1444 					    vlan_zero_pad_buff);
1445 				}
1446 				break;
1447 			    }
1448 
1449 			default:
1450 				panic("%s: impossible", __func__);
1451 			}
1452 		}
1453 
1454 		if ((p->if_flags & IFF_RUNNING) == 0) {
1455 			m_freem(m);
1456 			continue;
1457 		}
1458 
1459 		error = if_transmit_lock(p, m);
1460 		if (error) {
1461 			/* mbuf is already freed */
1462 			if_statinc(ifp, if_oerrors);
1463 			continue;
1464 		}
1465 		if_statinc(ifp, if_opackets);
1466 	}
1467 
1468 	ifp->if_flags &= ~IFF_OACTIVE;
1469 
1470 	/* Remove reference to mib before release */
1471 	vlan_putref_linkmib(mib, &psref);
1472 }
1473 
1474 static int
1475 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1476 {
1477 	struct ifvlan *ifv = ifp->if_softc;
1478 	struct ifnet *p;
1479 	struct ethercom *ec;
1480 	struct ifvlan_linkmib *mib;
1481 	struct psref psref;
1482 	int error;
1483 	size_t pktlen = m->m_pkthdr.len;
1484 	bool mcast = (m->m_flags & M_MCAST) != 0;
1485 
1486 	mib = vlan_getref_linkmib(ifv, &psref);
1487 	if (mib == NULL) {
1488 		m_freem(m);
1489 		return ENETDOWN;
1490 	}
1491 
1492 	p = mib->ifvm_p;
1493 	ec = (void *)mib->ifvm_p;
1494 
1495 	bpf_mtap(ifp, m, BPF_D_OUT);
1496 
1497 	if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1498 		goto out;
1499 	if (m == NULL)
1500 		goto out;
1501 
1502 	/*
1503 	 * If the parent can insert the tag itself, just mark
1504 	 * the tag in the mbuf header.
1505 	 */
1506 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1507 		vlan_set_tag(m, mib->ifvm_tag);
1508 	} else {
1509 		/*
1510 		 * insert the tag ourselves
1511 		 */
1512 		M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1513 		if (m == NULL) {
1514 			printf("%s: unable to prepend encap header",
1515 			    p->if_xname);
1516 			if_statinc(ifp, if_oerrors);
1517 			error = ENOBUFS;
1518 			goto out;
1519 		}
1520 
1521 		switch (p->if_type) {
1522 		case IFT_ETHER:
1523 		    {
1524 			struct ether_vlan_header *evl;
1525 
1526 			if (m->m_len < sizeof(struct ether_vlan_header))
1527 				m = m_pullup(m,
1528 				    sizeof(struct ether_vlan_header));
1529 			if (m == NULL) {
1530 				printf("%s: unable to pullup encap "
1531 				    "header", p->if_xname);
1532 				if_statinc(ifp, if_oerrors);
1533 				error = ENOBUFS;
1534 				goto out;
1535 			}
1536 
1537 			/*
1538 			 * Transform the Ethernet header into an
1539 			 * Ethernet header with 802.1Q encapsulation.
1540 			 */
1541 			memmove(mtod(m, void *),
1542 			    mtod(m, char *) + mib->ifvm_encaplen,
1543 			    sizeof(struct ether_header));
1544 			evl = mtod(m, struct ether_vlan_header *);
1545 			evl->evl_proto = evl->evl_encap_proto;
1546 			evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1547 			evl->evl_tag = htons(mib->ifvm_tag);
1548 
1549 			/*
1550 			 * To cater for VLAN-aware layer 2 ethernet
1551 			 * switches which may need to strip the tag
1552 			 * before forwarding the packet, make sure
1553 			 * the packet+tag is at least 68 bytes long.
1554 			 * This is necessary because our parent will
1555 			 * only pad to 64 bytes (ETHER_MIN_LEN) and
1556 			 * some switches will not pad by themselves
1557 			 * after deleting a tag.
1558 			 */
1559 			const size_t min_data_len = ETHER_MIN_LEN -
1560 			    ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1561 			if (m->m_pkthdr.len < min_data_len) {
1562 				m_copyback(m, m->m_pkthdr.len,
1563 				    min_data_len - m->m_pkthdr.len,
1564 				    vlan_zero_pad_buff);
1565 			}
1566 			break;
1567 		    }
1568 
1569 		default:
1570 			panic("%s: impossible", __func__);
1571 		}
1572 	}
1573 
1574 	if ((p->if_flags & IFF_RUNNING) == 0) {
1575 		m_freem(m);
1576 		error = ENETDOWN;
1577 		goto out;
1578 	}
1579 
1580 	error = if_transmit_lock(p, m);
1581 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1582 	if (error) {
1583 		/* mbuf is already freed */
1584 		if_statinc_ref(nsr, if_oerrors);
1585 	} else {
1586 		if_statinc_ref(nsr, if_opackets);
1587 		if_statadd_ref(nsr, if_obytes, pktlen);
1588 		if (mcast)
1589 			if_statinc_ref(nsr, if_omcasts);
1590 	}
1591 	IF_STAT_PUTREF(ifp);
1592 
1593 out:
1594 	/* Remove reference to mib before release */
1595 	vlan_putref_linkmib(mib, &psref);
1596 	return error;
1597 }
1598 
1599 /*
1600  * Given an Ethernet frame, find a valid vlan interface corresponding to the
1601  * given source interface and tag, then run the real packet through the
1602  * parent's input routine.
1603  */
1604 void
1605 vlan_input(struct ifnet *ifp, struct mbuf *m)
1606 {
1607 	struct ifvlan *ifv;
1608 	uint16_t vid;
1609 	struct ifvlan_linkmib *mib;
1610 	struct psref psref;
1611 	bool have_vtag;
1612 
1613 	have_vtag = vlan_has_tag(m);
1614 	if (have_vtag) {
1615 		vid = EVL_VLANOFTAG(vlan_get_tag(m));
1616 		m->m_flags &= ~M_VLANTAG;
1617 	} else {
1618 		struct ether_vlan_header *evl;
1619 
1620 		if (ifp->if_type != IFT_ETHER) {
1621 			panic("%s: impossible", __func__);
1622 		}
1623 
1624 		if (m->m_len < sizeof(struct ether_vlan_header) &&
1625 		    (m = m_pullup(m,
1626 		     sizeof(struct ether_vlan_header))) == NULL) {
1627 			printf("%s: no memory for VLAN header, "
1628 			    "dropping packet.\n", ifp->if_xname);
1629 			return;
1630 		}
1631 		evl = mtod(m, struct ether_vlan_header *);
1632 		KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1633 
1634 		vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1635 
1636 		/*
1637 		 * Restore the original ethertype.  We'll remove
1638 		 * the encapsulation after we've found the vlan
1639 		 * interface corresponding to the tag.
1640 		 */
1641 		evl->evl_encap_proto = evl->evl_proto;
1642 	}
1643 
1644 	mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1645 	if (mib == NULL) {
1646 		m_freem(m);
1647 		if_statinc(ifp, if_noproto);
1648 		return;
1649 	}
1650 	KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1651 
1652 	ifv = mib->ifvm_ifvlan;
1653 	if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1654 	    (IFF_UP | IFF_RUNNING)) {
1655 		m_freem(m);
1656 		if_statinc(ifp, if_noproto);
1657 		goto out;
1658 	}
1659 
1660 	/*
1661 	 * Now, remove the encapsulation header.  The original
1662 	 * header has already been fixed up above.
1663 	 */
1664 	if (!have_vtag) {
1665 		memmove(mtod(m, char *) + mib->ifvm_encaplen,
1666 		    mtod(m, void *), sizeof(struct ether_header));
1667 		m_adj(m, mib->ifvm_encaplen);
1668 	}
1669 
1670 	m_set_rcvif(m, &ifv->ifv_if);
1671 
1672 	if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1673 		goto out;
1674 	if (m == NULL)
1675 		goto out;
1676 
1677 	m->m_flags &= ~M_PROMISC;
1678 	if_input(&ifv->ifv_if, m);
1679 out:
1680 	vlan_putref_linkmib(mib, &psref);
1681 }
1682 
1683 /*
1684  * If the parent link state changed, the vlan link state should change also.
1685  */
1686 void
1687 vlan_link_state_changed(struct ifnet *p, int link_state)
1688 {
1689 	struct ifvlan *ifv;
1690 	struct ifvlan_linkmib *mib;
1691 	struct psref psref;
1692 	struct ifnet *ifp;
1693 
1694 	mutex_enter(&ifv_list.lock);
1695 
1696 	LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
1697 		mib = vlan_getref_linkmib(ifv, &psref);
1698 		if (mib == NULL)
1699 			continue;
1700 
1701 		if (mib->ifvm_p == p) {
1702 			ifp = &mib->ifvm_ifvlan->ifv_if;
1703 			if_link_state_change(ifp, link_state);
1704 		}
1705 
1706 		vlan_putref_linkmib(mib, &psref);
1707 	}
1708 
1709 	mutex_exit(&ifv_list.lock);
1710 }
1711 
1712 /*
1713  * Module infrastructure
1714  */
1715 #include "if_module.h"
1716 
1717 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1718