xref: /netbsd-src/sys/net/if_bridge.c (revision 9dc0bb4cc688e21ede7c5dc70e8e7f007735d600)
1 /*	$NetBSD: if_bridge.c,v 1.193 2024/07/16 03:35:38 ozaki-r Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.193 2024/07/16 03:35:38 ozaki-r Exp $");
84 
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89 
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 
112 #include <net/if_ether.h>
113 #include <net/if_bridgevar.h>
114 #include <net/ether_sw_offload.h>
115 
116 /* Used for bridge_ip[6]_checkbasic */
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #include <netinet/ip_private.h>		/* XXX */
122 #include <netinet/ip6.h>
123 #include <netinet6/in6_var.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/ip6_private.h>	/* XXX */
126 
127 /*
128  * Size of the route hash table.  Must be a power of two.
129  */
130 #ifndef BRIDGE_RTHASH_SIZE
131 #define	BRIDGE_RTHASH_SIZE		1024
132 #endif
133 
134 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
135 
136 #include "carp.h"
137 #if NCARP > 0
138 #include <netinet/in.h>
139 #include <netinet/in_var.h>
140 #include <netinet/ip_carp.h>
141 #endif
142 
143 #include "ioconf.h"
144 
145 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
146 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
148 
149 /*
150  * Maximum number of addresses to cache.
151  */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define	BRIDGE_RTABLE_MAX		100
154 #endif
155 
156 /*
157  * Spanning tree defaults.
158  */
159 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
160 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
161 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
162 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
163 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
164 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
165 #define	BSTP_DEFAULT_PATH_COST		55
166 
167 /*
168  * Timeout (in seconds) for entries learned dynamically.
169  */
170 #ifndef BRIDGE_RTABLE_TIMEOUT
171 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
172 #endif
173 
174 /*
175  * Number of seconds between walks of the route list.
176  */
177 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
178 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
179 #endif
180 
181 #define BRIDGE_RT_LOCK(_sc)	mutex_enter((_sc)->sc_rtlist_lock)
182 #define BRIDGE_RT_UNLOCK(_sc)	mutex_exit((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_LOCKED(_sc)	mutex_owned((_sc)->sc_rtlist_lock)
184 
185 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
186 				pserialize_perform((_sc)->sc_rtlist_psz)
187 
188 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc)			\
189 	PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
190 	    struct bridge_rtnode, brt_list)
191 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc)			\
192 	PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
193 	    struct bridge_rtnode, brt_list)
194 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt)			\
195 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
196 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt)				\
197 	PSLIST_WRITER_REMOVE((_brt), brt_list)
198 
199 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash)			\
200 	PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
201 	    struct bridge_rtnode, brt_hash)
202 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash)			\
203 	PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
204 	    struct bridge_rtnode, brt_hash)
205 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt)		\
206 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
207 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new)			\
208 	PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
209 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt)				\
210 	PSLIST_WRITER_REMOVE((_brt), brt_hash)
211 
212 #ifdef NET_MPSAFE
213 #define DECLARE_LOCK_VARIABLE
214 #define ACQUIRE_GLOBAL_LOCKS()	do { } while (0)
215 #define RELEASE_GLOBAL_LOCKS()	do { } while (0)
216 #else
217 #define DECLARE_LOCK_VARIABLE	int __s
218 #define ACQUIRE_GLOBAL_LOCKS()	do {					\
219 					KERNEL_LOCK(1, NULL);		\
220 					mutex_enter(softnet_lock);	\
221 					__s = splsoftnet();		\
222 				} while (0)
223 #define RELEASE_GLOBAL_LOCKS()	do {					\
224 					splx(__s);			\
225 					mutex_exit(softnet_lock);	\
226 					KERNEL_UNLOCK_ONE(NULL);	\
227 				} while (0)
228 #endif
229 
230 struct psref_class *bridge_psref_class __read_mostly;
231 
232 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
233 
234 static struct pool bridge_rtnode_pool;
235 
236 static int	bridge_clone_create(struct if_clone *, int);
237 static int	bridge_clone_destroy(struct ifnet *);
238 
239 static int	bridge_ioctl(struct ifnet *, u_long, void *);
240 static int	bridge_init(struct ifnet *);
241 static void	bridge_stop(struct ifnet *, int);
242 static void	bridge_start(struct ifnet *);
243 static void	bridge_ifdetach(void *);
244 
245 static void	bridge_input(struct ifnet *, struct mbuf *);
246 static void	bridge_forward(struct bridge_softc *, struct mbuf *);
247 
248 static void	bridge_timer(void *);
249 
250 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
251 				 struct mbuf *);
252 
253 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
254 				struct ifnet *, int, uint8_t);
255 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
256 static void	bridge_rttrim(struct bridge_softc *);
257 static void	bridge_rtage(struct bridge_softc *);
258 static void	bridge_rtage_work(struct work *, void *);
259 static void	bridge_rtflush(struct bridge_softc *, int);
260 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
261 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
262 
263 static void	bridge_rtable_init(struct bridge_softc *);
264 static void	bridge_rtable_fini(struct bridge_softc *);
265 
266 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
267 						  const uint8_t *);
268 static int	bridge_rtnode_insert(struct bridge_softc *,
269 				     struct bridge_rtnode *);
270 static void	bridge_rtnode_remove(struct bridge_softc *,
271 				     struct bridge_rtnode *);
272 static void	bridge_rtnode_destroy(struct bridge_rtnode *);
273 
274 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
275 						  const char *name,
276 						  struct psref *);
277 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
278 						     struct ifnet *ifp,
279 						     struct psref *);
280 static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
281                                       struct psref *);
282 static void	bridge_delete_member(struct bridge_softc *,
283 				     struct bridge_iflist *);
284 static void	bridge_acquire_member(struct bridge_softc *sc,
285                                       struct bridge_iflist *,
286                                       struct psref *);
287 
288 static int	bridge_ioctl_add(struct bridge_softc *, void *);
289 static int	bridge_ioctl_del(struct bridge_softc *, void *);
290 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
291 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
292 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
294 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
295 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
296 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
297 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
298 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
299 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
300 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
301 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
302 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
303 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
304 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
305 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
306 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
307 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
308 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
309 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
310 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
311 static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
313 static int	bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
314 static int	bridge_ip_checkbasic(struct mbuf **mp);
315 # ifdef INET6
316 static int	bridge_ip6_checkbasic(struct mbuf **mp);
317 # endif /* INET6 */
318 
319 struct bridge_control {
320 	int	(*bc_func)(struct bridge_softc *, void *);
321 	int	bc_argsize;
322 	int	bc_flags;
323 };
324 
325 #define	BC_F_COPYIN		0x01	/* copy arguments in */
326 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
327 #define	BC_F_SUSER		0x04	/* do super-user check */
328 #define BC_F_XLATEIN		0x08	/* xlate arguments in */
329 #define BC_F_XLATEOUT		0x10	/* xlate arguments out */
330 
331 static const struct bridge_control bridge_control_table[] = {
332 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
333 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
334 
335 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
336 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337 
338 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
339 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
340 
341 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
342 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
343 
344 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
345 
346 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
347 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 
349 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
350 
351 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
352 
353 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
354 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
355 
356 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
357 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
358 
359 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
360 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
361 
362 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
363 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
364 
365 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
366 
367 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
368 
369 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
370 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
371 
372 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
373 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
374 };
375 
376 static const int bridge_control_table_size = __arraycount(bridge_control_table);
377 
378 static struct if_clone bridge_cloner =
379     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
380 
381 /*
382  * bridgeattach:
383  *
384  *	Pseudo-device attach routine.
385  */
386 void
387 bridgeattach(int n)
388 {
389 
390 	pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
391 	    0, 0, 0, "brtpl", NULL, IPL_NET);
392 
393 	bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
394 
395 	if_clone_attach(&bridge_cloner);
396 }
397 
398 /*
399  * bridge_clone_create:
400  *
401  *	Create a new bridge instance.
402  */
403 static int
404 bridge_clone_create(struct if_clone *ifc, int unit)
405 {
406 	struct bridge_softc *sc;
407 	struct ifnet *ifp;
408 	int error;
409 
410 	sc = kmem_zalloc(sizeof(*sc),  KM_SLEEP);
411 	ifp = &sc->sc_if;
412 
413 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
414 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
415 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
416 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
417 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
418 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
419 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
420 	sc->sc_filter_flags = 0;
421 
422 	/* Initialize our routing table. */
423 	bridge_rtable_init(sc);
424 
425 	error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
426 	    bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
427 	if (error)
428 		panic("%s: workqueue_create %d\n", __func__, error);
429 
430 	callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
431 	callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
432 
433 	mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
434 	PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
435 	sc->sc_iflist_psref.bip_psz = pserialize_create();
436 
437 	if_initname(ifp, ifc->ifc_name, unit);
438 	ifp->if_softc = sc;
439 #ifdef NET_MPSAFE
440 	ifp->if_extflags = IFEF_MPSAFE;
441 #endif
442 	ifp->if_mtu = ETHERMTU;
443 	ifp->if_ioctl = bridge_ioctl;
444 	ifp->if_output = bridge_output;
445 	ifp->if_start = bridge_start;
446 	ifp->if_stop = bridge_stop;
447 	ifp->if_init = bridge_init;
448 	ifp->if_type = IFT_BRIDGE;
449 	ifp->if_addrlen = 0;
450 	ifp->if_dlt = DLT_EN10MB;
451 	ifp->if_hdrlen = ETHER_HDR_LEN;
452 	if_initialize(ifp);
453 
454 	/*
455 	 * Set the link state to down.
456 	 * When interfaces are added the link state will reflect
457 	 * the best link state of the combined interfaces.
458 	 */
459 	ifp->if_link_state = LINK_STATE_DOWN;
460 
461 	if_alloc_sadl(ifp);
462 	if_register(ifp);
463 
464 	return 0;
465 }
466 
467 /*
468  * bridge_clone_destroy:
469  *
470  *	Destroy a bridge instance.
471  */
472 static int
473 bridge_clone_destroy(struct ifnet *ifp)
474 {
475 	struct bridge_softc *sc = ifp->if_softc;
476 	struct bridge_iflist *bif;
477 
478 	if ((ifp->if_flags & IFF_RUNNING) != 0)
479 		bridge_stop(ifp, 1);
480 
481 	BRIDGE_LOCK(sc);
482 	for (;;) {
483 		bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
484 		    bif_next);
485 		if (bif == NULL)
486 			break;
487 		bridge_delete_member(sc, bif);
488 	}
489 	PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
490 	BRIDGE_UNLOCK(sc);
491 
492 	if_detach(ifp);
493 
494 	/* Tear down the routing table. */
495 	bridge_rtable_fini(sc);
496 
497 	pserialize_destroy(sc->sc_iflist_psref.bip_psz);
498 	mutex_destroy(&sc->sc_iflist_psref.bip_lock);
499 	callout_destroy(&sc->sc_brcallout);
500 	callout_destroy(&sc->sc_bstpcallout);
501 	workqueue_destroy(sc->sc_rtage_wq);
502 	kmem_free(sc, sizeof(*sc));
503 
504 	return 0;
505 }
506 
507 /*
508  * bridge_ioctl:
509  *
510  *	Handle a control request from the operator.
511  */
512 static int
513 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
514 {
515 	struct bridge_softc *sc = ifp->if_softc;
516 	struct lwp *l = curlwp;	/* XXX */
517 	union {
518 		struct ifbreq ifbreq;
519 		struct ifbifconf ifbifconf;
520 		struct ifbareq ifbareq;
521 		struct ifbaconf ifbaconf;
522 		struct ifbrparam ifbrparam;
523 	} args;
524 	struct ifdrv *ifd = (struct ifdrv *) data;
525 	const struct bridge_control *bc = NULL; /* XXXGCC */
526 	int error = 0;
527 
528 	/* Authorize command before calling splsoftnet(). */
529 	switch (cmd) {
530 	case SIOCGDRVSPEC:
531 	case SIOCSDRVSPEC:
532 		if (ifd->ifd_cmd >= bridge_control_table_size
533 		    || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
534 			error = EINVAL;
535 			return error;
536 		}
537 
538 		/* We only care about BC_F_SUSER at this point. */
539 		if ((bc->bc_flags & BC_F_SUSER) == 0)
540 			break;
541 
542 		error = kauth_authorize_network(l->l_cred,
543 		    KAUTH_NETWORK_INTERFACE_BRIDGE,
544 		    cmd == SIOCGDRVSPEC ?
545 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
546 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
547 		     ifd, NULL, NULL);
548 		if (error)
549 			return error;
550 
551 		break;
552 	}
553 
554 	const int s = splsoftnet();
555 
556 	switch (cmd) {
557 	case SIOCGDRVSPEC:
558 	case SIOCSDRVSPEC:
559 		KASSERT(bc != NULL);
560 		if (cmd == SIOCGDRVSPEC &&
561 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
562 			error = EINVAL;
563 			break;
564 		}
565 		else if (cmd == SIOCSDRVSPEC &&
566 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
567 			error = EINVAL;
568 			break;
569 		}
570 
571 		/* BC_F_SUSER is checked above, before splsoftnet(). */
572 
573 		if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
574 		    && (ifd->ifd_len != bc->bc_argsize
575 			|| ifd->ifd_len > sizeof(args))) {
576 			error = EINVAL;
577 			break;
578 		}
579 
580 		memset(&args, 0, sizeof(args));
581 		if (bc->bc_flags & BC_F_COPYIN) {
582 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
583 			if (error)
584 				break;
585 		} else if (bc->bc_flags & BC_F_XLATEIN) {
586 			args.ifbifconf.ifbic_len = ifd->ifd_len;
587 			args.ifbifconf.ifbic_buf = ifd->ifd_data;
588 		}
589 
590 		error = (*bc->bc_func)(sc, &args);
591 		if (error)
592 			break;
593 
594 		if (bc->bc_flags & BC_F_COPYOUT) {
595 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
596 		} else if (bc->bc_flags & BC_F_XLATEOUT) {
597 			ifd->ifd_len = args.ifbifconf.ifbic_len;
598 			ifd->ifd_data = args.ifbifconf.ifbic_buf;
599 		}
600 		break;
601 
602 	case SIOCSIFFLAGS:
603 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
604 			break;
605 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
606 		case IFF_RUNNING:
607 			/*
608 			 * If interface is marked down and it is running,
609 			 * then stop and disable it.
610 			 */
611 			if_stop(ifp, 1);
612 			break;
613 		case IFF_UP:
614 			/*
615 			 * If interface is marked up and it is stopped, then
616 			 * start it.
617 			 */
618 			error = if_init(ifp);
619 			break;
620 		default:
621 			break;
622 		}
623 		break;
624 
625 	case SIOCSIFMTU:
626 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
627 			error = 0;
628 		break;
629 
630         case SIOCGIFCAP:
631 	    {
632 		struct ifcapreq *ifcr = (struct ifcapreq *)data;
633                 ifcr->ifcr_capabilities = sc->sc_capenable;
634                 ifcr->ifcr_capenable = sc->sc_capenable;
635 		break;
636 	    }
637 
638 	default:
639 		error = ifioctl_common(ifp, cmd, data);
640 		break;
641 	}
642 
643 	splx(s);
644 
645 	return error;
646 }
647 
648 /*
649  * bridge_lookup_member:
650  *
651  *	Lookup a bridge member interface.
652  */
653 static struct bridge_iflist *
654 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
655 {
656 	struct bridge_iflist *bif;
657 	struct ifnet *ifp;
658 	int s;
659 
660 	s = pserialize_read_enter();
661 
662 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
663 		ifp = bif->bif_ifp;
664 		if (strcmp(ifp->if_xname, name) == 0)
665 			break;
666 	}
667 	if (bif != NULL)
668 		bridge_acquire_member(sc, bif, psref);
669 
670 	pserialize_read_exit(s);
671 
672 	return bif;
673 }
674 
675 /*
676  * bridge_lookup_member_if:
677  *
678  *	Lookup a bridge member interface by ifnet*.
679  */
680 static struct bridge_iflist *
681 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
682     struct psref *psref)
683 {
684 	struct bridge_iflist *bif;
685 	int s;
686 
687 	s = pserialize_read_enter();
688 
689 	bif = member_ifp->if_bridgeif;
690 	if (bif != NULL) {
691 		psref_acquire(psref, &bif->bif_psref,
692 		    bridge_psref_class);
693 	}
694 
695 	pserialize_read_exit(s);
696 
697 	return bif;
698 }
699 
700 static void
701 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
702     struct psref *psref)
703 {
704 
705 	psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
706 }
707 
708 /*
709  * bridge_release_member:
710  *
711  *	Release the specified member interface.
712  */
713 static void
714 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
715     struct psref *psref)
716 {
717 
718 	psref_release(psref, &bif->bif_psref, bridge_psref_class);
719 }
720 
721 /*
722  * bridge_delete_member:
723  *
724  *	Delete the specified member interface.
725  */
726 static void
727 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
728 {
729 	struct ifnet *ifs = bif->bif_ifp;
730 
731 	KASSERT(BRIDGE_LOCKED(sc));
732 
733 	ifs->_if_input = ether_input;
734 	ifs->if_bridge = NULL;
735 	ifs->if_bridgeif = NULL;
736 
737 	PSLIST_WRITER_REMOVE(bif, bif_next);
738 	BRIDGE_PSZ_PERFORM(sc);
739 
740 	if_linkstate_change_disestablish(ifs,
741 	    bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
742 	ether_ifdetachhook_disestablish(ifs,
743 	    bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
744 
745 	BRIDGE_UNLOCK(sc);
746 
747 	switch (ifs->if_type) {
748 	case IFT_ETHER:
749 	case IFT_L2TP:
750 		/*
751 		 * Take the interface out of promiscuous mode.
752 		 * Don't call it with holding a spin lock.
753 		 */
754 		(void) ifpromisc(ifs, 0);
755 		IFNET_LOCK(ifs);
756 		(void) ether_disable_vlan_mtu(ifs);
757 		IFNET_UNLOCK(ifs);
758 		break;
759 	default:
760 #ifdef DIAGNOSTIC
761 		panic("%s: impossible", __func__);
762 #endif
763 		break;
764 	}
765 
766 	psref_target_destroy(&bif->bif_psref, bridge_psref_class);
767 
768 	PSLIST_ENTRY_DESTROY(bif, bif_next);
769 	kmem_free(bif, sizeof(*bif));
770 
771 	BRIDGE_LOCK(sc);
772 }
773 
774 /*
775  * bridge_calc_csum_flags:
776  *
777  *	Calculate logical and b/w csum flags each member interface supports.
778  */
779 void
780 bridge_calc_csum_flags(struct bridge_softc *sc)
781 {
782 	struct bridge_iflist *bif;
783 	struct ifnet *ifs = NULL;
784 	int flags = ~0;
785 	int capenable = ~0;
786 
787 	BRIDGE_LOCK(sc);
788 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
789 		ifs = bif->bif_ifp;
790 		flags &= ifs->if_csum_flags_tx;
791 		capenable &= ifs->if_capenable;
792 	}
793 	sc->sc_csum_flags_tx = flags;
794 	sc->sc_capenable = (ifs != NULL) ? capenable : 0;
795 	BRIDGE_UNLOCK(sc);
796 }
797 
798 /*
799  * bridge_calc_link_state:
800  *
801  *	Calculate the link state based on each member interface.
802  */
803 static void
804 bridge_calc_link_state(void *xsc)
805 {
806 	struct bridge_softc *sc = xsc;
807 	struct bridge_iflist *bif;
808 	struct ifnet *ifs;
809 	int link_state = LINK_STATE_DOWN;
810 
811 	BRIDGE_LOCK(sc);
812 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
813 		ifs = bif->bif_ifp;
814 		if (ifs->if_link_state == LINK_STATE_UP) {
815 			link_state = LINK_STATE_UP;
816 			break;
817 		}
818 		if (ifs->if_link_state == LINK_STATE_UNKNOWN)
819 			link_state = LINK_STATE_UNKNOWN;
820 	}
821 	if_link_state_change(&sc->sc_if, link_state);
822 	BRIDGE_UNLOCK(sc);
823 }
824 
825 static int
826 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
827 {
828 	struct ifbreq *req = arg;
829 	struct bridge_iflist *bif = NULL;
830 	struct ifnet *ifs;
831 	int error = 0;
832 	struct psref psref;
833 
834 	ifs = if_get(req->ifbr_ifsname, &psref);
835 	if (ifs == NULL)
836 		return ENOENT;
837 
838 	if (ifs->if_bridge == sc) {
839 		error = EEXIST;
840 		goto out;
841 	}
842 
843 	if (ifs->if_bridge != NULL) {
844 		error = EBUSY;
845 		goto out;
846 	}
847 
848 	if (ifs->_if_input != ether_input) {
849 		error = EINVAL;
850 		goto out;
851 	}
852 
853 	/* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
854 	if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
855 		error = EINVAL;
856 		goto out;
857 	}
858 
859 	bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
860 
861 	switch (ifs->if_type) {
862 	case IFT_ETHER:
863 		if (sc->sc_if.if_mtu != ifs->if_mtu) {
864 			/* Change MTU of added interface to bridge MTU */
865 			struct ifreq ifr;
866 			memset(&ifr, 0, sizeof(ifr));
867 			ifr.ifr_mtu = sc->sc_if.if_mtu;
868 			IFNET_LOCK(ifs);
869 			error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
870 			IFNET_UNLOCK(ifs);
871 			if (error != 0)
872 				goto out;
873 		}
874 		/* FALLTHROUGH */
875 	case IFT_L2TP:
876 		IFNET_LOCK(ifs);
877 		error = ether_enable_vlan_mtu(ifs);
878 		IFNET_UNLOCK(ifs);
879 		if (error > 0)
880 			goto out;
881 		/*
882 		 * Place the interface into promiscuous mode.
883 		 */
884 		error = ifpromisc(ifs, 1);
885 		if (error)
886 			goto out;
887 		break;
888 	default:
889 		error = EINVAL;
890 		goto out;
891 	}
892 
893 	bif->bif_ifp = ifs;
894 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
895 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
896 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
897 	bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
898 	    bridge_calc_link_state, sc);
899 	PSLIST_ENTRY_INIT(bif, bif_next);
900 	psref_target_init(&bif->bif_psref, bridge_psref_class);
901 
902 	BRIDGE_LOCK(sc);
903 
904 	ifs->if_bridge = sc;
905 	ifs->if_bridgeif = bif;
906 	PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
907 	ifs->_if_input = bridge_input;
908 
909 	BRIDGE_UNLOCK(sc);
910 
911 	bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
912 	    bridge_ifdetach, (void *)ifs);
913 
914 	bridge_calc_csum_flags(sc);
915 	bridge_calc_link_state(sc);
916 
917 	if (sc->sc_if.if_flags & IFF_RUNNING)
918 		bstp_initialization(sc);
919 	else
920 		bstp_stop(sc);
921 
922 out:
923 	if_put(ifs, &psref);
924 	if (error) {
925 		if (bif != NULL)
926 			kmem_free(bif, sizeof(*bif));
927 	}
928 	return error;
929 }
930 
931 static int
932 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
933 {
934 	struct ifbreq *req = arg;
935 	const char *name = req->ifbr_ifsname;
936 	struct bridge_iflist *bif;
937 	struct ifnet *ifs;
938 
939 	BRIDGE_LOCK(sc);
940 
941 	/*
942 	 * Don't use bridge_lookup_member. We want to get a member
943 	 * with bif_refs == 0.
944 	 */
945 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
946 		ifs = bif->bif_ifp;
947 		if (strcmp(ifs->if_xname, name) == 0)
948 			break;
949 	}
950 
951 	if (bif == NULL) {
952 		BRIDGE_UNLOCK(sc);
953 		return ENOENT;
954 	}
955 
956 	bridge_delete_member(sc, bif);
957 
958 	BRIDGE_UNLOCK(sc);
959 
960 	bridge_rtdelete(sc, ifs);
961 	bridge_calc_csum_flags(sc);
962 	bridge_calc_link_state(sc);
963 
964 	if (sc->sc_if.if_flags & IFF_RUNNING)
965 		bstp_initialization(sc);
966 
967 	return 0;
968 }
969 
970 static int
971 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
972 {
973 	struct ifbreq *req = arg;
974 	struct bridge_iflist *bif;
975 	struct psref psref;
976 
977 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
978 	if (bif == NULL)
979 		return ENOENT;
980 
981 	req->ifbr_ifsflags = bif->bif_flags;
982 	req->ifbr_state = bif->bif_state;
983 	req->ifbr_priority = bif->bif_priority;
984 	req->ifbr_path_cost = bif->bif_path_cost;
985 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
986 
987 	bridge_release_member(sc, bif, &psref);
988 
989 	return 0;
990 }
991 
992 static int
993 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
994 {
995 	struct ifbreq *req = arg;
996 	struct bridge_iflist *bif;
997 	struct psref psref;
998 
999 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1000 	if (bif == NULL)
1001 		return ENOENT;
1002 
1003 	if (req->ifbr_ifsflags & IFBIF_STP) {
1004 		switch (bif->bif_ifp->if_type) {
1005 		case IFT_ETHER:
1006 		case IFT_L2TP:
1007 			/* These can do spanning tree. */
1008 			break;
1009 
1010 		default:
1011 			/* Nothing else can. */
1012 			bridge_release_member(sc, bif, &psref);
1013 			return EINVAL;
1014 		}
1015 	}
1016 
1017 	bif->bif_flags = req->ifbr_ifsflags;
1018 
1019 	bridge_release_member(sc, bif, &psref);
1020 
1021 	if (sc->sc_if.if_flags & IFF_RUNNING)
1022 		bstp_initialization(sc);
1023 
1024 	return 0;
1025 }
1026 
1027 static int
1028 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1029 {
1030 	struct ifbrparam *param = arg;
1031 
1032 	sc->sc_brtmax = param->ifbrp_csize;
1033 	bridge_rttrim(sc);
1034 
1035 	return 0;
1036 }
1037 
1038 static int
1039 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1040 {
1041 	struct ifbrparam *param = arg;
1042 
1043 	param->ifbrp_csize = sc->sc_brtmax;
1044 
1045 	return 0;
1046 }
1047 
1048 static int
1049 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1050 {
1051 	struct ifbifconf *bifc = arg;
1052 	struct bridge_iflist *bif;
1053 	struct ifbreq *breqs;
1054 	int i, count, error = 0;
1055 
1056 retry:
1057 	BRIDGE_LOCK(sc);
1058 	count = 0;
1059 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1060 		count++;
1061 	BRIDGE_UNLOCK(sc);
1062 
1063 	if (count == 0) {
1064 		bifc->ifbic_len = 0;
1065 		return 0;
1066 	}
1067 
1068 	if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1069 		/* Tell that a larger buffer is needed */
1070 		bifc->ifbic_len = sizeof(*breqs) * count;
1071 		return 0;
1072 	}
1073 
1074 	breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1075 
1076 	BRIDGE_LOCK(sc);
1077 
1078 	i = 0;
1079 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1080 		i++;
1081 	if (i > count) {
1082 		/*
1083 		 * The number of members has been increased.
1084 		 * We need more memory!
1085 		 */
1086 		BRIDGE_UNLOCK(sc);
1087 		kmem_free(breqs, sizeof(*breqs) * count);
1088 		goto retry;
1089 	}
1090 
1091 	i = 0;
1092 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1093 		struct ifbreq *breq = &breqs[i++];
1094 		memset(breq, 0, sizeof(*breq));
1095 
1096 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1097 		    sizeof(breq->ifbr_ifsname));
1098 		breq->ifbr_ifsflags = bif->bif_flags;
1099 		breq->ifbr_state = bif->bif_state;
1100 		breq->ifbr_priority = bif->bif_priority;
1101 		breq->ifbr_path_cost = bif->bif_path_cost;
1102 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1103 	}
1104 
1105 	/* Don't call copyout with holding the mutex */
1106 	BRIDGE_UNLOCK(sc);
1107 
1108 	for (i = 0; i < count; i++) {
1109 		error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1110 		if (error)
1111 			break;
1112 	}
1113 	bifc->ifbic_len = sizeof(*breqs) * i;
1114 
1115 	kmem_free(breqs, sizeof(*breqs) * count);
1116 
1117 	return error;
1118 }
1119 
1120 static int
1121 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1122 {
1123 	struct ifbaconf *bac = arg;
1124 	struct bridge_rtnode *brt;
1125 	struct ifbareq bareq;
1126 	int count = 0, error = 0, len;
1127 
1128 	if (bac->ifbac_len == 0)
1129 		return 0;
1130 
1131 	BRIDGE_RT_LOCK(sc);
1132 
1133 	/* The passed buffer is not enough, tell a required size. */
1134 	if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1135 		count = sc->sc_brtcnt;
1136 		goto out;
1137 	}
1138 
1139 	len = bac->ifbac_len;
1140 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1141 		if (len < sizeof(bareq))
1142 			goto out;
1143 		memset(&bareq, 0, sizeof(bareq));
1144 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1145 		    sizeof(bareq.ifba_ifsname));
1146 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1147 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1148 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1149 		} else
1150 			bareq.ifba_expire = 0;
1151 		bareq.ifba_flags = brt->brt_flags;
1152 
1153 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1154 		if (error)
1155 			goto out;
1156 		count++;
1157 		len -= sizeof(bareq);
1158 	}
1159 out:
1160 	BRIDGE_RT_UNLOCK(sc);
1161 
1162 	bac->ifbac_len = sizeof(bareq) * count;
1163 	return error;
1164 }
1165 
1166 static int
1167 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1168 {
1169 	struct ifbareq *req = arg;
1170 	struct bridge_iflist *bif;
1171 	int error;
1172 	struct psref psref;
1173 
1174 	bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1175 	if (bif == NULL)
1176 		return ENOENT;
1177 
1178 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1179 	    req->ifba_flags);
1180 
1181 	bridge_release_member(sc, bif, &psref);
1182 
1183 	return error;
1184 }
1185 
1186 static int
1187 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1188 {
1189 	struct ifbrparam *param = arg;
1190 
1191 	sc->sc_brttimeout = param->ifbrp_ctime;
1192 
1193 	return 0;
1194 }
1195 
1196 static int
1197 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1198 {
1199 	struct ifbrparam *param = arg;
1200 
1201 	param->ifbrp_ctime = sc->sc_brttimeout;
1202 
1203 	return 0;
1204 }
1205 
1206 static int
1207 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1208 {
1209 	struct ifbareq *req = arg;
1210 
1211 	return (bridge_rtdaddr(sc, req->ifba_dst));
1212 }
1213 
1214 static int
1215 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1216 {
1217 	struct ifbreq *req = arg;
1218 
1219 	bridge_rtflush(sc, req->ifbr_ifsflags);
1220 
1221 	return 0;
1222 }
1223 
1224 static int
1225 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1226 {
1227 	struct ifbrparam *param = arg;
1228 
1229 	param->ifbrp_prio = sc->sc_bridge_priority;
1230 
1231 	return 0;
1232 }
1233 
1234 static int
1235 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1236 {
1237 	struct ifbrparam *param = arg;
1238 
1239 	sc->sc_bridge_priority = param->ifbrp_prio;
1240 
1241 	if (sc->sc_if.if_flags & IFF_RUNNING)
1242 		bstp_initialization(sc);
1243 
1244 	return 0;
1245 }
1246 
1247 static int
1248 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1249 {
1250 	struct ifbrparam *param = arg;
1251 
1252 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1253 
1254 	return 0;
1255 }
1256 
1257 static int
1258 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1259 {
1260 	struct ifbrparam *param = arg;
1261 
1262 	if (param->ifbrp_hellotime == 0)
1263 		return EINVAL;
1264 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1265 
1266 	if (sc->sc_if.if_flags & IFF_RUNNING)
1267 		bstp_initialization(sc);
1268 
1269 	return 0;
1270 }
1271 
1272 static int
1273 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1274 {
1275 	struct ifbrparam *param = arg;
1276 
1277 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1278 
1279 	return 0;
1280 }
1281 
1282 static int
1283 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1284 {
1285 	struct ifbrparam *param = arg;
1286 
1287 	if (param->ifbrp_fwddelay == 0)
1288 		return EINVAL;
1289 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1290 
1291 	if (sc->sc_if.if_flags & IFF_RUNNING)
1292 		bstp_initialization(sc);
1293 
1294 	return 0;
1295 }
1296 
1297 static int
1298 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1299 {
1300 	struct ifbrparam *param = arg;
1301 
1302 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1303 
1304 	return 0;
1305 }
1306 
1307 static int
1308 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1309 {
1310 	struct ifbrparam *param = arg;
1311 
1312 	if (param->ifbrp_maxage == 0)
1313 		return EINVAL;
1314 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1315 
1316 	if (sc->sc_if.if_flags & IFF_RUNNING)
1317 		bstp_initialization(sc);
1318 
1319 	return 0;
1320 }
1321 
1322 static int
1323 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1324 {
1325 	struct ifbreq *req = arg;
1326 	struct bridge_iflist *bif;
1327 	struct psref psref;
1328 
1329 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1330 	if (bif == NULL)
1331 		return ENOENT;
1332 
1333 	bif->bif_priority = req->ifbr_priority;
1334 
1335 	if (sc->sc_if.if_flags & IFF_RUNNING)
1336 		bstp_initialization(sc);
1337 
1338 	bridge_release_member(sc, bif, &psref);
1339 
1340 	return 0;
1341 }
1342 
1343 static int
1344 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1345 {
1346 	struct ifbrparam *param = arg;
1347 
1348 	param->ifbrp_filter = sc->sc_filter_flags;
1349 
1350 	return 0;
1351 }
1352 
1353 static int
1354 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1355 {
1356 	struct ifbrparam *param = arg;
1357 	uint32_t nflags, oflags;
1358 
1359 	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1360 		return EINVAL;
1361 
1362 	nflags = param->ifbrp_filter;
1363 	oflags = sc->sc_filter_flags;
1364 
1365 	if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1366 		pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1367 			sc->sc_if.if_pfil);
1368 	}
1369 	if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1370 		pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1371 			sc->sc_if.if_pfil);
1372 	}
1373 
1374 	sc->sc_filter_flags = nflags;
1375 
1376 	return 0;
1377 }
1378 
1379 static int
1380 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1381 {
1382 	struct ifbreq *req = arg;
1383 	struct bridge_iflist *bif;
1384 	struct psref psref;
1385 
1386 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1387 	if (bif == NULL)
1388 		return ENOENT;
1389 
1390 	bif->bif_path_cost = req->ifbr_path_cost;
1391 
1392 	if (sc->sc_if.if_flags & IFF_RUNNING)
1393 		bstp_initialization(sc);
1394 
1395 	bridge_release_member(sc, bif, &psref);
1396 
1397 	return 0;
1398 }
1399 
1400 /*
1401  * bridge_ifdetach:
1402  *
1403  *	Detach an interface from a bridge.  Called when a member
1404  *	interface is detaching.
1405  */
1406 static void
1407 bridge_ifdetach(void *xifs)
1408 {
1409 	struct ifnet *ifs;
1410 	struct bridge_softc *sc;
1411 	struct ifbreq breq;
1412 
1413 	ifs = (struct ifnet *)xifs;
1414 	sc = ifs->if_bridge;
1415 
1416 	/* ioctl_lock should prevent this from happening */
1417 	KASSERT(sc != NULL);
1418 
1419 	memset(&breq, 0, sizeof(breq));
1420 	strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1421 
1422 	(void) bridge_ioctl_del(sc, &breq);
1423 }
1424 
1425 /*
1426  * bridge_init:
1427  *
1428  *	Initialize a bridge interface.
1429  */
1430 static int
1431 bridge_init(struct ifnet *ifp)
1432 {
1433 	struct bridge_softc *sc = ifp->if_softc;
1434 
1435 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1436 
1437 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1438 	    bridge_timer, sc);
1439 	bstp_initialization(sc);
1440 
1441 	ifp->if_flags |= IFF_RUNNING;
1442 	return 0;
1443 }
1444 
1445 /*
1446  * bridge_stop:
1447  *
1448  *	Stop the bridge interface.
1449  */
1450 static void
1451 bridge_stop(struct ifnet *ifp, int disable)
1452 {
1453 	struct bridge_softc *sc = ifp->if_softc;
1454 
1455 	KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1456 	ifp->if_flags &= ~IFF_RUNNING;
1457 
1458 	callout_halt(&sc->sc_brcallout, NULL);
1459 	workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1460 	bstp_stop(sc);
1461 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1462 }
1463 
1464 /*
1465  * bridge_enqueue:
1466  *
1467  *	Enqueue a packet on a bridge member interface.
1468  */
1469 void
1470 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1471     int runfilt)
1472 {
1473 	int len, error;
1474 	short mflags;
1475 
1476 	if (runfilt) {
1477 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1478 		    dst_ifp, PFIL_OUT) != 0) {
1479 			m_freem(m);
1480 			return;
1481 		}
1482 		if (m == NULL)
1483 			return;
1484 	}
1485 
1486 #ifdef ALTQ
1487 	KERNEL_LOCK(1, NULL);
1488 	/*
1489 	 * If ALTQ is enabled on the member interface, do
1490 	 * classification; the queueing discipline might
1491 	 * not require classification, but might require
1492 	 * the address family/header pointer in the pktattr.
1493 	 */
1494 	if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1495 		/* XXX IFT_ETHER */
1496 		altq_etherclassify(&dst_ifp->if_snd, m);
1497 	}
1498 	KERNEL_UNLOCK_ONE(NULL);
1499 #endif /* ALTQ */
1500 
1501 	if (vlan_has_tag(m) &&
1502 	    !vlan_is_hwtag_enabled(dst_ifp)) {
1503 		(void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1504 		    vlan_get_tag(m));
1505 		if (m == NULL) {
1506 			if_statinc(&sc->sc_if, if_oerrors);
1507 			return;
1508 		}
1509 	}
1510 
1511 	len = m->m_pkthdr.len;
1512 	mflags = m->m_flags;
1513 
1514 	error = if_transmit_lock(dst_ifp, m);
1515 	if (error) {
1516 		/* mbuf is already freed */
1517 		if_statinc(&sc->sc_if, if_oerrors);
1518 		return;
1519 	}
1520 
1521 	net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1522 	if_statinc_ref(&sc->sc_if, nsr, if_opackets);
1523 	if_statadd_ref(&sc->sc_if, nsr, if_obytes, len);
1524 	if (mflags & M_MCAST)
1525 		if_statinc_ref(&sc->sc_if, nsr, if_omcasts);
1526 	IF_STAT_PUTREF(&sc->sc_if);
1527 }
1528 
1529 /*
1530  * bridge_output:
1531  *
1532  *	Send output from a bridge member interface.  This
1533  *	performs the bridging function for locally originated
1534  *	packets.
1535  *
1536  *	The mbuf has the Ethernet header already attached.  We must
1537  *	enqueue or free the mbuf before returning.
1538  */
1539 int
1540 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1541     const struct rtentry *rt)
1542 {
1543 	struct ether_header *eh;
1544 	struct ifnet *dst_if;
1545 	struct bridge_softc *sc;
1546 	struct mbuf *n;
1547 	int s, bound;
1548 
1549 	/*
1550 	 * bridge_output() is called from ether_output(), furthermore
1551 	 * ifp argument doesn't point to bridge(4). So, don't assert
1552 	 * IFEF_MPSAFE here.
1553 	 */
1554 
1555 	KASSERT(m->m_len >= ETHER_HDR_LEN);
1556 
1557 	eh = mtod(m, struct ether_header *);
1558 	sc = ifp->if_bridge;
1559 
1560 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1561 		if (memcmp(etherbroadcastaddr,
1562 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1563 			m->m_flags |= M_BCAST;
1564 		else
1565 			m->m_flags |= M_MCAST;
1566 	}
1567 
1568 	/*
1569 	 * If bridge is down, but the original output interface is up,
1570 	 * go ahead and send out that interface.  Otherwise, the packet
1571 	 * is dropped below.
1572 	 */
1573 	if (__predict_false(sc == NULL) ||
1574 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1575 		dst_if = ifp;
1576 		goto unicast_asis;
1577 	}
1578 
1579 	/*
1580 	 * If the packet is a multicast, or we don't know a better way to
1581 	 * get there, send to all interfaces.
1582 	 */
1583 	if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1584 		dst_if = NULL;
1585 	else
1586 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1587 
1588 	/*
1589 	 * In general, we need to handle TX offload in software before
1590 	 * enqueueing a packet. However, we can send it as is in the
1591 	 * cases of unicast via (1) the source interface, or (2) an
1592 	 * interface which supports the specified offload options.
1593 	 * For multicast or broadcast, send it as is only if (3) all
1594 	 * the member interfaces support the specified options.
1595 	 */
1596 
1597 	/*
1598 	 * Unicast via the source interface.
1599 	 */
1600 	if (dst_if == ifp)
1601 		goto unicast_asis;
1602 
1603 	/*
1604 	 * Unicast via other interface.
1605 	 */
1606 	if (dst_if != NULL) {
1607 		KASSERT(m->m_flags & M_PKTHDR);
1608 		if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1609 		    m->m_pkthdr.csum_flags)) {
1610 			/*
1611 			 * Unicast via an interface which supports the
1612 			 * specified offload options.
1613 			 */
1614 			goto unicast_asis;
1615 		}
1616 
1617 		/*
1618 		 * Handle TX offload in software. For TSO, a packet is
1619 		 * split into multiple chunks. Thus, the return value of
1620 		 * ether_sw_offload_tx() is mbuf queue consists of them.
1621 		 */
1622 		m = ether_sw_offload_tx(ifp, m);
1623 		if (m == NULL)
1624 			return 0;
1625 
1626 		do {
1627 			n = m->m_nextpkt;
1628 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1629 				m_freem(m);
1630 			else
1631 				bridge_enqueue(sc, dst_if, m, 0);
1632 			m = n;
1633 		} while (m != NULL);
1634 
1635 		return 0;
1636 	}
1637 
1638 	/*
1639 	 * Multicast or broadcast.
1640 	 */
1641 	if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1642 	    m->m_pkthdr.csum_flags)) {
1643 		/*
1644 		 * Specified TX offload options are supported by all
1645 		 * the member interfaces of this bridge.
1646 		 */
1647 		m->m_nextpkt = NULL;	/* XXX */
1648 	} else {
1649 		/*
1650 		 * Otherwise, handle TX offload in software.
1651 		 */
1652 		m = ether_sw_offload_tx(ifp, m);
1653 		if (m == NULL)
1654 			return 0;
1655 	}
1656 
1657 	/*
1658 	 * When we use pppoe over bridge, bridge_output() can be called
1659 	 * in a lwp context by pppoe_timeout_wk().
1660 	 */
1661 	bound = curlwp_bind();
1662 	do {
1663 		/* XXX Should call bridge_broadcast, but there are locking
1664 		 * issues which need resolving first. */
1665 		struct bridge_iflist *bif;
1666 		struct mbuf *mc;
1667 		bool used = false;
1668 
1669 		n = m->m_nextpkt;
1670 
1671 		s = pserialize_read_enter();
1672 		BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1673 			struct psref psref;
1674 
1675 			bridge_acquire_member(sc, bif, &psref);
1676 			pserialize_read_exit(s);
1677 
1678 			dst_if = bif->bif_ifp;
1679 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1680 				goto next;
1681 
1682 			/*
1683 			 * If this is not the original output interface,
1684 			 * and the interface is participating in spanning
1685 			 * tree, make sure the port is in a state that
1686 			 * allows forwarding.
1687 			 */
1688 			if (dst_if != ifp &&
1689 			    (bif->bif_flags & IFBIF_STP) != 0) {
1690 				switch (bif->bif_state) {
1691 				case BSTP_IFSTATE_BLOCKING:
1692 				case BSTP_IFSTATE_LISTENING:
1693 				case BSTP_IFSTATE_DISABLED:
1694 					goto next;
1695 				}
1696 			}
1697 
1698 			if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1699 			    bif_next) == NULL &&
1700 			    ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1701 			    dst_if == ifp))
1702 			{
1703 				used = true;
1704 				mc = m;
1705 			} else {
1706 				mc = m_copypacket(m, M_DONTWAIT);
1707 				if (mc == NULL) {
1708 					if_statinc(&sc->sc_if, if_oerrors);
1709 					goto next;
1710 				}
1711 			}
1712 
1713 			bridge_enqueue(sc, dst_if, mc, 0);
1714 
1715 			if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1716 			    dst_if != ifp)
1717 			{
1718 				if (PSLIST_READER_NEXT(bif,
1719 				    struct bridge_iflist, bif_next) == NULL)
1720 				{
1721 					used = true;
1722 					mc = m;
1723 				} else {
1724 					mc = m_copypacket(m, M_DONTWAIT);
1725 					if (mc == NULL) {
1726 						if_statinc(&sc->sc_if,
1727 						    if_oerrors);
1728 						goto next;
1729 					}
1730 				}
1731 
1732 				m_set_rcvif(mc, dst_if);
1733 				mc->m_flags &= ~M_PROMISC;
1734 
1735 				const int _s = splsoftnet();
1736 				KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1737 				ether_input(dst_if, mc);
1738 				KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1739 				splx(_s);
1740 			}
1741 
1742 next:
1743 			s = pserialize_read_enter();
1744 			bridge_release_member(sc, bif, &psref);
1745 
1746 			/* Guarantee we don't re-enter the loop as we already
1747 			 * decided we're at the end. */
1748 			if (used)
1749 				break;
1750 		}
1751 		pserialize_read_exit(s);
1752 
1753 		if (!used)
1754 			m_freem(m);
1755 
1756 		m = n;
1757 	} while (m != NULL);
1758 	curlwp_bindx(bound);
1759 
1760 	return 0;
1761 
1762 unicast_asis:
1763 	/*
1764 	 * XXX Spanning tree consideration here?
1765 	 */
1766 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1767 		m_freem(m);
1768 	else
1769 		bridge_enqueue(sc, dst_if, m, 0);
1770 	return 0;
1771 }
1772 
1773 /*
1774  * bridge_start:
1775  *
1776  *	Start output on a bridge.
1777  *
1778  *	NOTE: This routine should never be called in this implementation.
1779  */
1780 static void
1781 bridge_start(struct ifnet *ifp)
1782 {
1783 
1784 	printf("%s: bridge_start() called\n", ifp->if_xname);
1785 }
1786 
1787 /*
1788  * bridge_forward:
1789  *
1790  *	The forwarding function of the bridge.
1791  */
1792 static void
1793 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1794 {
1795 	struct bridge_iflist *bif;
1796 	struct ifnet *src_if, *dst_if;
1797 	struct ether_header *eh;
1798 	struct psref psref;
1799 	struct psref psref_src;
1800 	DECLARE_LOCK_VARIABLE;
1801 
1802 	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1803 		return;
1804 
1805 	src_if = m_get_rcvif_psref(m, &psref_src);
1806 	if (src_if == NULL) {
1807 		/* Interface is being destroyed? */
1808 		m_freem(m);
1809 		goto out;
1810 	}
1811 
1812 	if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1813 
1814 	/*
1815 	 * Look up the bridge_iflist.
1816 	 */
1817 	bif = bridge_lookup_member_if(sc, src_if, &psref);
1818 	if (bif == NULL) {
1819 		/* Interface is not a bridge member (anymore?) */
1820 		m_freem(m);
1821 		goto out;
1822 	}
1823 
1824 	if (bif->bif_flags & IFBIF_STP) {
1825 		switch (bif->bif_state) {
1826 		case BSTP_IFSTATE_BLOCKING:
1827 		case BSTP_IFSTATE_LISTENING:
1828 		case BSTP_IFSTATE_DISABLED:
1829 			m_freem(m);
1830 			bridge_release_member(sc, bif, &psref);
1831 			goto out;
1832 		}
1833 	}
1834 
1835 	eh = mtod(m, struct ether_header *);
1836 
1837 	/*
1838 	 * If the interface is learning, and the source
1839 	 * address is valid and not multicast, record
1840 	 * the address.
1841 	 */
1842 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1843 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1844 	    (eh->ether_shost[0] == 0 &&
1845 	     eh->ether_shost[1] == 0 &&
1846 	     eh->ether_shost[2] == 0 &&
1847 	     eh->ether_shost[3] == 0 &&
1848 	     eh->ether_shost[4] == 0 &&
1849 	     eh->ether_shost[5] == 0) == 0) {
1850 		(void) bridge_rtupdate(sc, eh->ether_shost,
1851 		    src_if, 0, IFBAF_DYNAMIC);
1852 	}
1853 
1854 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
1855 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
1856 		m_freem(m);
1857 		bridge_release_member(sc, bif, &psref);
1858 		goto out;
1859 	}
1860 
1861 	bridge_release_member(sc, bif, &psref);
1862 
1863 	/*
1864 	 * At this point, the port either doesn't participate
1865 	 * in spanning tree or it is in the forwarding state.
1866 	 */
1867 
1868 	/*
1869 	 * If the packet is unicast, destined for someone on
1870 	 * "this" side of the bridge, drop it.
1871 	 */
1872 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1873 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1874 		if (src_if == dst_if) {
1875 			m_freem(m);
1876 			goto out;
1877 		}
1878 	} else {
1879 		/* ...forward it to all interfaces. */
1880 		if_statinc(&sc->sc_if, if_imcasts);
1881 		dst_if = NULL;
1882 	}
1883 
1884 	if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1885 		m_freem(m);
1886 		goto out;
1887 	}
1888 	if (m == NULL)
1889 		goto out;
1890 
1891 	if (dst_if == NULL) {
1892 		bridge_broadcast(sc, src_if, m);
1893 		goto out;
1894 	}
1895 
1896 	m_put_rcvif_psref(src_if, &psref_src);
1897 	src_if = NULL;
1898 
1899 	/*
1900 	 * At this point, we're dealing with a unicast frame
1901 	 * going to a different interface.
1902 	 */
1903 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1904 		m_freem(m);
1905 		goto out;
1906 	}
1907 
1908 	bif = bridge_lookup_member_if(sc, dst_if, &psref);
1909 	if (bif == NULL) {
1910 		/* Not a member of the bridge (anymore?) */
1911 		m_freem(m);
1912 		goto out;
1913 	}
1914 
1915 	if (bif->bif_flags & IFBIF_STP) {
1916 		switch (bif->bif_state) {
1917 		case BSTP_IFSTATE_DISABLED:
1918 		case BSTP_IFSTATE_BLOCKING:
1919 			m_freem(m);
1920 			bridge_release_member(sc, bif, &psref);
1921 			goto out;
1922 		}
1923 	}
1924 
1925 	bridge_release_member(sc, bif, &psref);
1926 
1927 	/*
1928 	 * Before enqueueing this packet to the destination interface,
1929 	 * clear any in-bound checksum flags to prevent them from being
1930 	 * misused as out-bound flags.
1931 	 */
1932 	m->m_pkthdr.csum_flags = 0;
1933 
1934 	ACQUIRE_GLOBAL_LOCKS();
1935 	bridge_enqueue(sc, dst_if, m, 1);
1936 	RELEASE_GLOBAL_LOCKS();
1937 out:
1938 	if (src_if != NULL)
1939 		m_put_rcvif_psref(src_if, &psref_src);
1940 	return;
1941 }
1942 
1943 static bool
1944 bstp_state_before_learning(struct bridge_iflist *bif)
1945 {
1946 	if (bif->bif_flags & IFBIF_STP) {
1947 		switch (bif->bif_state) {
1948 		case BSTP_IFSTATE_BLOCKING:
1949 		case BSTP_IFSTATE_LISTENING:
1950 		case BSTP_IFSTATE_DISABLED:
1951 			return true;
1952 		}
1953 	}
1954 	return false;
1955 }
1956 
1957 static bool
1958 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1959 {
1960 	uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1961 
1962 	if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1963 #if NCARP > 0
1964 	    || (bif->bif_ifp->if_carp &&
1965 	        carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1966 #endif /* NCARP > 0 */
1967 	    )
1968 		return true;
1969 
1970 	return false;
1971 }
1972 
1973 /*
1974  * bridge_input:
1975  *
1976  *	Receive input from a member interface.  Queue the packet for
1977  *	bridging if it is not for us.
1978  */
1979 static void
1980 bridge_input(struct ifnet *ifp, struct mbuf *m)
1981 {
1982 	struct bridge_softc *sc = ifp->if_bridge;
1983 	struct bridge_iflist *bif;
1984 	struct ether_header *eh;
1985 	struct psref psref;
1986 	int bound;
1987 	DECLARE_LOCK_VARIABLE;
1988 
1989 	KASSERT(!cpu_intr_p());
1990 
1991 	if (__predict_false(sc == NULL) ||
1992 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1993 		ACQUIRE_GLOBAL_LOCKS();
1994 		ether_input(ifp, m);
1995 		RELEASE_GLOBAL_LOCKS();
1996 		return;
1997 	}
1998 
1999 	bound = curlwp_bind();
2000 	bif = bridge_lookup_member_if(sc, ifp, &psref);
2001 	if (bif == NULL) {
2002 		curlwp_bindx(bound);
2003 		ACQUIRE_GLOBAL_LOCKS();
2004 		ether_input(ifp, m);
2005 		RELEASE_GLOBAL_LOCKS();
2006 		return;
2007 	}
2008 
2009 	eh = mtod(m, struct ether_header *);
2010 
2011 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2012 		if (memcmp(etherbroadcastaddr,
2013 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2014 			m->m_flags |= M_BCAST;
2015 		else
2016 			m->m_flags |= M_MCAST;
2017 	}
2018 
2019 	/*
2020 	 * A 'fast' path for packets addressed to interfaces that are
2021 	 * part of this bridge.
2022 	 */
2023 	if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2024 	    !bstp_state_before_learning(bif)) {
2025 		struct bridge_iflist *_bif;
2026 		struct ifnet *_ifp = NULL;
2027 		int s;
2028 		struct psref _psref;
2029 
2030 		s = pserialize_read_enter();
2031 		BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2032 			/* It is destined for us. */
2033 			if (bridge_ourether(_bif, eh, 0)) {
2034 				bridge_acquire_member(sc, _bif, &_psref);
2035 				pserialize_read_exit(s);
2036 				if (_bif->bif_flags & IFBIF_LEARNING)
2037 					(void) bridge_rtupdate(sc,
2038 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2039 				m_set_rcvif(m, _bif->bif_ifp);
2040 				_ifp = _bif->bif_ifp;
2041 				bridge_release_member(sc, _bif, &_psref);
2042 				goto out;
2043 			}
2044 
2045 			/* We just received a packet that we sent out. */
2046 			if (bridge_ourether(_bif, eh, 1))
2047 				break;
2048 		}
2049 		pserialize_read_exit(s);
2050 out:
2051 
2052 		if (_bif != NULL) {
2053 			bridge_release_member(sc, bif, &psref);
2054 			curlwp_bindx(bound);
2055 			if (_ifp != NULL) {
2056 				m->m_flags &= ~M_PROMISC;
2057 				ACQUIRE_GLOBAL_LOCKS();
2058 				ether_input(_ifp, m);
2059 				RELEASE_GLOBAL_LOCKS();
2060 			} else
2061 				m_freem(m);
2062 			return;
2063 		}
2064 	}
2065 
2066 	/* Tap off 802.1D packets; they do not get forwarded. */
2067 	if (bif->bif_flags & IFBIF_STP &&
2068 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2069 		bstp_input(sc, bif, m);
2070 		bridge_release_member(sc, bif, &psref);
2071 		curlwp_bindx(bound);
2072 		return;
2073 	}
2074 
2075 	/*
2076 	 * A normal switch would discard the packet here, but that's not what
2077 	 * we've done historically. This also prevents some obnoxious behaviour.
2078 	 */
2079 	if (bstp_state_before_learning(bif)) {
2080 		bridge_release_member(sc, bif, &psref);
2081 		curlwp_bindx(bound);
2082 		ACQUIRE_GLOBAL_LOCKS();
2083 		ether_input(ifp, m);
2084 		RELEASE_GLOBAL_LOCKS();
2085 		return;
2086 	}
2087 
2088 	bridge_release_member(sc, bif, &psref);
2089 
2090 	bridge_forward(sc, m);
2091 
2092 	curlwp_bindx(bound);
2093 }
2094 
2095 /*
2096  * bridge_broadcast:
2097  *
2098  *	Send a frame to all interfaces that are members of
2099  *	the bridge, except for the one on which the packet
2100  *	arrived.
2101  */
2102 static void
2103 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2104     struct mbuf *m)
2105 {
2106 	struct bridge_iflist *bif;
2107 	struct mbuf *mc;
2108 	struct ifnet *dst_if;
2109 	bool bmcast;
2110 	int s;
2111 	DECLARE_LOCK_VARIABLE;
2112 
2113 	bmcast = m->m_flags & (M_BCAST|M_MCAST);
2114 
2115 	s = pserialize_read_enter();
2116 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2117 		struct psref psref;
2118 
2119 		bridge_acquire_member(sc, bif, &psref);
2120 		pserialize_read_exit(s);
2121 
2122 		dst_if = bif->bif_ifp;
2123 
2124 		if (bif->bif_flags & IFBIF_STP) {
2125 			switch (bif->bif_state) {
2126 			case BSTP_IFSTATE_BLOCKING:
2127 			case BSTP_IFSTATE_DISABLED:
2128 				goto next;
2129 			}
2130 		}
2131 
2132 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2133 			goto next;
2134 
2135 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2136 			goto next;
2137 
2138 		if (dst_if != src_if) {
2139 			mc = m_copypacket(m, M_DONTWAIT);
2140 			if (mc == NULL) {
2141 				if_statinc(&sc->sc_if, if_oerrors);
2142 				goto next;
2143 			}
2144 			/*
2145 			 * Before enqueueing this packet to the destination
2146 			 * interface, clear any in-bound checksum flags to
2147 			 * prevent them from being misused as out-bound flags.
2148 			 */
2149 			mc->m_pkthdr.csum_flags = 0;
2150 
2151 			ACQUIRE_GLOBAL_LOCKS();
2152 			bridge_enqueue(sc, dst_if, mc, 1);
2153 			RELEASE_GLOBAL_LOCKS();
2154 		}
2155 
2156 		if (bmcast) {
2157 			mc = m_copypacket(m, M_DONTWAIT);
2158 			if (mc == NULL) {
2159 				if_statinc(&sc->sc_if, if_oerrors);
2160 				goto next;
2161 			}
2162 			/*
2163 			 * Before enqueueing this packet to the destination
2164 			 * interface, clear any in-bound checksum flags to
2165 			 * prevent them from being misused as out-bound flags.
2166 			 */
2167 			mc->m_pkthdr.csum_flags = 0;
2168 
2169 			m_set_rcvif(mc, dst_if);
2170 			mc->m_flags &= ~M_PROMISC;
2171 
2172 			ACQUIRE_GLOBAL_LOCKS();
2173 			ether_input(dst_if, mc);
2174 			RELEASE_GLOBAL_LOCKS();
2175 		}
2176 next:
2177 		s = pserialize_read_enter();
2178 		bridge_release_member(sc, bif, &psref);
2179 	}
2180 	pserialize_read_exit(s);
2181 
2182 	m_freem(m);
2183 }
2184 
2185 static int
2186 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2187     struct bridge_rtnode **brtp)
2188 {
2189 	struct bridge_rtnode *brt;
2190 	int error;
2191 
2192 	if (sc->sc_brtcnt >= sc->sc_brtmax)
2193 		return ENOSPC;
2194 
2195 	/*
2196 	 * Allocate a new bridge forwarding node, and
2197 	 * initialize the expiration time and Ethernet
2198 	 * address.
2199 	 */
2200 	brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2201 	if (brt == NULL)
2202 		return ENOMEM;
2203 
2204 	memset(brt, 0, sizeof(*brt));
2205 	brt->brt_expire = time_uptime + sc->sc_brttimeout;
2206 	brt->brt_flags = IFBAF_DYNAMIC;
2207 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2208 	PSLIST_ENTRY_INIT(brt, brt_list);
2209 	PSLIST_ENTRY_INIT(brt, brt_hash);
2210 
2211 	BRIDGE_RT_LOCK(sc);
2212 	error = bridge_rtnode_insert(sc, brt);
2213 	BRIDGE_RT_UNLOCK(sc);
2214 
2215 	if (error != 0) {
2216 		pool_put(&bridge_rtnode_pool, brt);
2217 		return error;
2218 	}
2219 
2220 	*brtp = brt;
2221 	return 0;
2222 }
2223 
2224 /*
2225  * bridge_rtupdate:
2226  *
2227  *	Add a bridge routing entry.
2228  */
2229 static int
2230 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2231     struct ifnet *dst_if, int setflags, uint8_t flags)
2232 {
2233 	struct bridge_rtnode *brt;
2234 	int s;
2235 
2236 again:
2237 	/*
2238 	 * A route for this destination might already exist.  If so,
2239 	 * update it, otherwise create a new one.
2240 	 */
2241 	s = pserialize_read_enter();
2242 	brt = bridge_rtnode_lookup(sc, dst);
2243 
2244 	if (brt != NULL) {
2245 		brt->brt_ifp = dst_if;
2246 		if (setflags) {
2247 			brt->brt_flags = flags;
2248 			if (flags & IFBAF_STATIC)
2249 				brt->brt_expire = 0;
2250 			else
2251 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2252 		} else {
2253 			if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2254 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2255 		}
2256 	}
2257 	pserialize_read_exit(s);
2258 
2259 	if (brt == NULL) {
2260 		int r;
2261 
2262 		r = bridge_rtalloc(sc, dst, &brt);
2263 		if (r != 0)
2264 			return r;
2265 		goto again;
2266 	}
2267 
2268 	return 0;
2269 }
2270 
2271 /*
2272  * bridge_rtlookup:
2273  *
2274  *	Lookup the destination interface for an address.
2275  */
2276 static struct ifnet *
2277 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2278 {
2279 	struct bridge_rtnode *brt;
2280 	struct ifnet *ifs = NULL;
2281 	int s;
2282 
2283 	s = pserialize_read_enter();
2284 	brt = bridge_rtnode_lookup(sc, addr);
2285 	if (brt != NULL)
2286 		ifs = brt->brt_ifp;
2287 	pserialize_read_exit(s);
2288 
2289 	return ifs;
2290 }
2291 
2292 typedef bool (*bridge_iterate_cb_t)
2293     (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2294 
2295 /*
2296  * bridge_rtlist_iterate_remove:
2297  *
2298  *	It iterates on sc->sc_rtlist and removes rtnodes of it which func
2299  *	callback judges to remove. Removals of rtnodes are done in a manner
2300  *	of pserialize. To this end, all kmem_* operations are placed out of
2301  *	mutexes.
2302  */
2303 static void
2304 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2305 {
2306 	struct bridge_rtnode *brt;
2307 	struct bridge_rtnode **brt_list;
2308 	int i, count;
2309 
2310 retry:
2311 	count = sc->sc_brtcnt;
2312 	if (count == 0)
2313 		return;
2314 	brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2315 
2316 	BRIDGE_RT_LOCK(sc);
2317 	if (__predict_false(sc->sc_brtcnt > count)) {
2318 		/* The rtnodes increased, we need more memory */
2319 		BRIDGE_RT_UNLOCK(sc);
2320 		kmem_free(brt_list, sizeof(*brt_list) * count);
2321 		goto retry;
2322 	}
2323 
2324 	i = 0;
2325 	/*
2326 	 * We don't need to use a _SAFE variant here because we know
2327 	 * that a removed item keeps its next pointer as-is thanks to
2328 	 * pslist(9) and isn't freed in the loop.
2329 	 */
2330 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2331 		bool need_break = false;
2332 		if (func(sc, brt, &need_break, arg)) {
2333 			bridge_rtnode_remove(sc, brt);
2334 			brt_list[i++] = brt;
2335 		}
2336 		if (need_break)
2337 			break;
2338 	}
2339 
2340 	if (i > 0)
2341 		BRIDGE_RT_PSZ_PERFORM(sc);
2342 	BRIDGE_RT_UNLOCK(sc);
2343 
2344 	while (--i >= 0)
2345 		bridge_rtnode_destroy(brt_list[i]);
2346 
2347 	kmem_free(brt_list, sizeof(*brt_list) * count);
2348 }
2349 
2350 static bool
2351 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2352     bool *need_break, void *arg)
2353 {
2354 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2355 		/* Take into account of the subsequent removal */
2356 		if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2357 			*need_break = true;
2358 		return true;
2359 	} else
2360 		return false;
2361 }
2362 
2363 static void
2364 bridge_rttrim0(struct bridge_softc *sc)
2365 {
2366 	bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2367 }
2368 
2369 /*
2370  * bridge_rttrim:
2371  *
2372  *	Trim the routine table so that we have a number
2373  *	of routing entries less than or equal to the
2374  *	maximum number.
2375  */
2376 static void
2377 bridge_rttrim(struct bridge_softc *sc)
2378 {
2379 
2380 	/* Make sure we actually need to do this. */
2381 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2382 		return;
2383 
2384 	/* Force an aging cycle; this might trim enough addresses. */
2385 	bridge_rtage(sc);
2386 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2387 		return;
2388 
2389 	bridge_rttrim0(sc);
2390 
2391 	return;
2392 }
2393 
2394 /*
2395  * bridge_timer:
2396  *
2397  *	Aging timer for the bridge.
2398  */
2399 static void
2400 bridge_timer(void *arg)
2401 {
2402 	struct bridge_softc *sc = arg;
2403 
2404 	workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2405 }
2406 
2407 static void
2408 bridge_rtage_work(struct work *wk, void *arg)
2409 {
2410 	struct bridge_softc *sc = arg;
2411 
2412 	KASSERT(wk == &sc->sc_rtage_wk);
2413 
2414 	bridge_rtage(sc);
2415 
2416 	if (sc->sc_if.if_flags & IFF_RUNNING)
2417 		callout_reset(&sc->sc_brcallout,
2418 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2419 }
2420 
2421 static bool
2422 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2423     bool *need_break, void *arg)
2424 {
2425 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2426 	    time_uptime >= brt->brt_expire)
2427 		return true;
2428 	else
2429 		return false;
2430 }
2431 
2432 /*
2433  * bridge_rtage:
2434  *
2435  *	Perform an aging cycle.
2436  */
2437 static void
2438 bridge_rtage(struct bridge_softc *sc)
2439 {
2440 	bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2441 }
2442 
2443 
2444 static bool
2445 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2446     bool *need_break, void *arg)
2447 {
2448 	int full = *(int*)arg;
2449 
2450 	if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2451 		return true;
2452 	else
2453 		return false;
2454 }
2455 
2456 /*
2457  * bridge_rtflush:
2458  *
2459  *	Remove all dynamic addresses from the bridge.
2460  */
2461 static void
2462 bridge_rtflush(struct bridge_softc *sc, int full)
2463 {
2464 	bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2465 }
2466 
2467 /*
2468  * bridge_rtdaddr:
2469  *
2470  *	Remove an address from the table.
2471  */
2472 static int
2473 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2474 {
2475 	struct bridge_rtnode *brt;
2476 
2477 	BRIDGE_RT_LOCK(sc);
2478 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2479 		BRIDGE_RT_UNLOCK(sc);
2480 		return ENOENT;
2481 	}
2482 	bridge_rtnode_remove(sc, brt);
2483 	BRIDGE_RT_PSZ_PERFORM(sc);
2484 	BRIDGE_RT_UNLOCK(sc);
2485 
2486 	bridge_rtnode_destroy(brt);
2487 
2488 	return 0;
2489 }
2490 
2491 /*
2492  * bridge_rtdelete:
2493  *
2494  *	Delete routes to a speicifc member interface.
2495  */
2496 static void
2497 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2498 {
2499 	struct bridge_rtnode *brt;
2500 
2501 	/* XXX pserialize_perform for each entry is slow */
2502 again:
2503 	BRIDGE_RT_LOCK(sc);
2504 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2505 		if (brt->brt_ifp == ifp)
2506 			break;
2507 	}
2508 	if (brt == NULL) {
2509 		BRIDGE_RT_UNLOCK(sc);
2510 		return;
2511 	}
2512 	bridge_rtnode_remove(sc, brt);
2513 	BRIDGE_RT_PSZ_PERFORM(sc);
2514 	BRIDGE_RT_UNLOCK(sc);
2515 
2516 	bridge_rtnode_destroy(brt);
2517 
2518 	goto again;
2519 }
2520 
2521 /*
2522  * bridge_rtable_init:
2523  *
2524  *	Initialize the route table for this bridge.
2525  */
2526 static void
2527 bridge_rtable_init(struct bridge_softc *sc)
2528 {
2529 	int i;
2530 
2531 	sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2532 	    KM_SLEEP);
2533 
2534 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2535 		PSLIST_INIT(&sc->sc_rthash[i]);
2536 
2537 	sc->sc_rthash_key = cprng_fast32();
2538 
2539 	PSLIST_INIT(&sc->sc_rtlist);
2540 
2541 	sc->sc_rtlist_psz = pserialize_create();
2542 	sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2543 }
2544 
2545 /*
2546  * bridge_rtable_fini:
2547  *
2548  *	Deconstruct the route table for this bridge.
2549  */
2550 static void
2551 bridge_rtable_fini(struct bridge_softc *sc)
2552 {
2553 
2554 	kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2555 	mutex_obj_free(sc->sc_rtlist_lock);
2556 	pserialize_destroy(sc->sc_rtlist_psz);
2557 }
2558 
2559 /*
2560  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2561  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2562  */
2563 #define	mix(a, b, c)							\
2564 do {									\
2565 	a -= b; a -= c; a ^= (c >> 13);					\
2566 	b -= c; b -= a; b ^= (a << 8);					\
2567 	c -= a; c -= b; c ^= (b >> 13);					\
2568 	a -= b; a -= c; a ^= (c >> 12);					\
2569 	b -= c; b -= a; b ^= (a << 16);					\
2570 	c -= a; c -= b; c ^= (b >> 5);					\
2571 	a -= b; a -= c; a ^= (c >> 3);					\
2572 	b -= c; b -= a; b ^= (a << 10);					\
2573 	c -= a; c -= b; c ^= (b >> 15);					\
2574 } while (/*CONSTCOND*/0)
2575 
2576 static inline uint32_t
2577 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2578 {
2579 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2580 
2581 	b += addr[5] << 8;
2582 	b += addr[4];
2583 	a += (uint32_t)addr[3] << 24;
2584 	a += addr[2] << 16;
2585 	a += addr[1] << 8;
2586 	a += addr[0];
2587 
2588 	mix(a, b, c);
2589 
2590 	return (c & BRIDGE_RTHASH_MASK);
2591 }
2592 
2593 #undef mix
2594 
2595 /*
2596  * bridge_rtnode_lookup:
2597  *
2598  *	Look up a bridge route node for the specified destination.
2599  */
2600 static struct bridge_rtnode *
2601 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2602 {
2603 	struct bridge_rtnode *brt;
2604 	uint32_t hash;
2605 	int dir;
2606 
2607 	hash = bridge_rthash(sc, addr);
2608 	BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2609 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2610 		if (dir == 0)
2611 			return brt;
2612 		if (dir > 0)
2613 			return NULL;
2614 	}
2615 
2616 	return NULL;
2617 }
2618 
2619 /*
2620  * bridge_rtnode_insert:
2621  *
2622  *	Insert the specified bridge node into the route table.  We
2623  *	assume the entry is not already in the table.
2624  */
2625 static int
2626 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2627 {
2628 	struct bridge_rtnode *lbrt, *prev = NULL;
2629 	uint32_t hash;
2630 
2631 	KASSERT(BRIDGE_RT_LOCKED(sc));
2632 
2633 	hash = bridge_rthash(sc, brt->brt_addr);
2634 	BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2635 		int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2636 		if (dir == 0)
2637 			return EEXIST;
2638 		if (dir > 0)
2639 			break;
2640 		prev = lbrt;
2641 	}
2642 	if (prev == NULL)
2643 		BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2644 	else
2645 		BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2646 
2647 	BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2648 	sc->sc_brtcnt++;
2649 
2650 	return 0;
2651 }
2652 
2653 /*
2654  * bridge_rtnode_remove:
2655  *
2656  *	Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2657  */
2658 static void
2659 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2660 {
2661 
2662 	KASSERT(BRIDGE_RT_LOCKED(sc));
2663 
2664 	BRIDGE_RTHASH_WRITER_REMOVE(brt);
2665 	BRIDGE_RTLIST_WRITER_REMOVE(brt);
2666 	sc->sc_brtcnt--;
2667 }
2668 
2669 /*
2670  * bridge_rtnode_destroy:
2671  *
2672  *	Destroy a bridge rtnode.
2673  */
2674 static void
2675 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2676 {
2677 
2678 	PSLIST_ENTRY_DESTROY(brt, brt_list);
2679 	PSLIST_ENTRY_DESTROY(brt, brt_hash);
2680 	pool_put(&bridge_rtnode_pool, brt);
2681 }
2682 
2683 extern pfil_head_t *inet_pfil_hook;                 /* XXX */
2684 extern pfil_head_t *inet6_pfil_hook;                /* XXX */
2685 
2686 /*
2687  * Send bridge packets through IPF if they are one of the types IPF can deal
2688  * with, or if they are ARP or REVARP.  (IPF will pass ARP and REVARP without
2689  * question.)
2690  */
2691 static int
2692 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2693 {
2694 	int snap, error;
2695 	struct ether_header *eh1, eh2;
2696 	struct llc llc1;
2697 	uint16_t ether_type;
2698 
2699 	snap = 0;
2700 	error = -1;	/* Default error if not error == 0 */
2701 	eh1 = mtod(*mp, struct ether_header *);
2702 	ether_type = ntohs(eh1->ether_type);
2703 
2704 	/*
2705 	 * Check for SNAP/LLC.
2706 	 */
2707 	if (ether_type < ETHERMTU) {
2708 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2709 
2710 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2711 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2712 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2713 		    llc2->llc_control == LLC_UI) {
2714 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2715 			snap = 1;
2716 		}
2717 	}
2718 
2719 	/* drop VLAN traffic untagged by hardware offloading */
2720 	if (vlan_has_tag(*mp))
2721 		goto bad;
2722 
2723 	/*
2724 	 * If we're trying to filter bridge traffic, don't look at anything
2725 	 * other than IP and ARP traffic.  If the filter doesn't understand
2726 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2727 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2728 	 * but of course we don't have an AppleTalk filter to begin with.
2729 	 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2730 	 * ARP traffic.)
2731 	 */
2732 	switch (ether_type) {
2733 		case ETHERTYPE_ARP:
2734 		case ETHERTYPE_REVARP:
2735 			return 0; /* Automatically pass */
2736 		case ETHERTYPE_IP:
2737 # ifdef INET6
2738 		case ETHERTYPE_IPV6:
2739 # endif /* INET6 */
2740 			break;
2741 		default:
2742 			goto bad;
2743 	}
2744 
2745 	/* Strip off the Ethernet header and keep a copy. */
2746 	m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2747 	m_adj(*mp, ETHER_HDR_LEN);
2748 
2749 	/* Strip off snap header, if present */
2750 	if (snap) {
2751 		m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2752 		m_adj(*mp, sizeof(struct llc));
2753 	}
2754 
2755 	/*
2756 	 * Check basic packet sanity and run IPF through pfil.
2757 	 */
2758 	KASSERT(!cpu_intr_p());
2759 	switch (ether_type)
2760 	{
2761 	case ETHERTYPE_IP :
2762 		error = bridge_ip_checkbasic(mp);
2763 		if (error == 0)
2764 			error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2765 		break;
2766 # ifdef INET6
2767 	case ETHERTYPE_IPV6 :
2768 		error = bridge_ip6_checkbasic(mp);
2769 		if (error == 0)
2770 			error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2771 		break;
2772 # endif
2773 	default :
2774 		error = 0;
2775 		break;
2776 	}
2777 
2778 	if (*mp == NULL)
2779 		return error;
2780 	if (error != 0)
2781 		goto bad;
2782 
2783 	error = -1;
2784 
2785 	/*
2786 	 * Finally, put everything back the way it was and return
2787 	 */
2788 	if (snap) {
2789 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2790 		if (*mp == NULL)
2791 			return error;
2792 		bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2793 	}
2794 
2795 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2796 	if (*mp == NULL)
2797 		return error;
2798 	bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2799 
2800 	return 0;
2801 
2802     bad:
2803 	m_freem(*mp);
2804 	*mp = NULL;
2805 	return error;
2806 }
2807 
2808 /*
2809  * Perform basic checks on header size since
2810  * IPF assumes ip_input has already processed
2811  * it for it.  Cut-and-pasted from ip_input.c.
2812  * Given how simple the IPv6 version is,
2813  * does the IPv4 version really need to be
2814  * this complicated?
2815  *
2816  * XXX Should we update ipstat here, or not?
2817  * XXX Right now we update ipstat but not
2818  * XXX csum_counter.
2819  */
2820 static int
2821 bridge_ip_checkbasic(struct mbuf **mp)
2822 {
2823 	struct mbuf *m = *mp;
2824 	struct ip *ip;
2825 	int len, hlen;
2826 
2827 	if (*mp == NULL)
2828 		return -1;
2829 
2830 	if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2831 		/* XXXJRT new stat, please */
2832 		ip_statinc(IP_STAT_TOOSMALL);
2833 		goto bad;
2834 	}
2835 	ip = mtod(m, struct ip *);
2836 	if (ip == NULL) goto bad;
2837 
2838 	if (ip->ip_v != IPVERSION) {
2839 		ip_statinc(IP_STAT_BADVERS);
2840 		goto bad;
2841 	}
2842 	hlen = ip->ip_hl << 2;
2843 	if (hlen < sizeof(struct ip)) { /* minimum header length */
2844 		ip_statinc(IP_STAT_BADHLEN);
2845 		goto bad;
2846 	}
2847 	if (hlen > m->m_len) {
2848 		if ((m = m_pullup(m, hlen)) == 0) {
2849 			ip_statinc(IP_STAT_BADHLEN);
2850 			goto bad;
2851 		}
2852 		ip = mtod(m, struct ip *);
2853 		if (ip == NULL) goto bad;
2854 	}
2855 
2856 	switch (m->m_pkthdr.csum_flags &
2857 	        ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2858 	         M_CSUM_IPv4_BAD)) {
2859 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2860 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2861 		goto bad;
2862 
2863 	case M_CSUM_IPv4:
2864 		/* Checksum was okay. */
2865 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2866 		break;
2867 
2868 	default:
2869 		/* Must compute it ourselves. */
2870 		/* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2871 		if (in_cksum(m, hlen) != 0)
2872 			goto bad;
2873 		break;
2874 	}
2875 
2876 	/* Retrieve the packet length. */
2877 	len = ntohs(ip->ip_len);
2878 
2879 	/*
2880 	 * Check for additional length bogosity
2881 	 */
2882 	if (len < hlen) {
2883 		ip_statinc(IP_STAT_BADLEN);
2884 		goto bad;
2885 	}
2886 
2887 	/*
2888 	 * Check that the amount of data in the buffers
2889 	 * is as at least much as the IP header would have us expect.
2890 	 * Drop packet if shorter than we expect.
2891 	 */
2892 	if (m->m_pkthdr.len < len) {
2893 		ip_statinc(IP_STAT_TOOSHORT);
2894 		goto bad;
2895 	}
2896 
2897 	/* Checks out, proceed */
2898 	*mp = m;
2899 	return 0;
2900 
2901     bad:
2902 	*mp = m;
2903 	return -1;
2904 }
2905 
2906 # ifdef INET6
2907 /*
2908  * Same as above, but for IPv6.
2909  * Cut-and-pasted from ip6_input.c.
2910  * XXX Should we update ip6stat, or not?
2911  */
2912 static int
2913 bridge_ip6_checkbasic(struct mbuf **mp)
2914 {
2915 	struct mbuf *m = *mp;
2916 	struct ip6_hdr *ip6;
2917 
2918 	/*
2919 	 * If the IPv6 header is not aligned, slurp it up into a new
2920 	 * mbuf with space for link headers, in the event we forward
2921 	 * it.  Otherwise, if it is aligned, make sure the entire base
2922 	 * IPv6 header is in the first mbuf of the chain.
2923 	 */
2924 	if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2925 		struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2926 		/* XXXJRT new stat, please */
2927 		ip6_statinc(IP6_STAT_TOOSMALL);
2928 		in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2929 		goto bad;
2930 	}
2931 
2932 	ip6 = mtod(m, struct ip6_hdr *);
2933 
2934 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2935 		ip6_statinc(IP6_STAT_BADVERS);
2936 		in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2937 		goto bad;
2938 	}
2939 
2940 	/* Checks out, proceed */
2941 	*mp = m;
2942 	return 0;
2943 
2944     bad:
2945 	*mp = m;
2946 	return -1;
2947 }
2948 # endif /* INET6 */
2949