xref: /netbsd-src/sys/net/if_bridge.c (revision ccd9df534e375a4366c5b55f23782053c7a98d82)
1 /*	$NetBSD: if_bridge.c,v 1.191 2024/07/05 04:31:53 rin Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.191 2024/07/05 04:31:53 rin Exp $");
84 
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89 
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 
112 #include <net/if_ether.h>
113 #include <net/if_bridgevar.h>
114 #include <net/ether_sw_offload.h>
115 
116 /* Used for bridge_ip[6]_checkbasic */
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #include <netinet/ip_private.h>		/* XXX */
122 #include <netinet/ip6.h>
123 #include <netinet6/in6_var.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/ip6_private.h>	/* XXX */
126 
127 /*
128  * Size of the route hash table.  Must be a power of two.
129  */
130 #ifndef BRIDGE_RTHASH_SIZE
131 #define	BRIDGE_RTHASH_SIZE		1024
132 #endif
133 
134 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
135 
136 #include "carp.h"
137 #if NCARP > 0
138 #include <netinet/in.h>
139 #include <netinet/in_var.h>
140 #include <netinet/ip_carp.h>
141 #endif
142 
143 #include "ioconf.h"
144 
145 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
146 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
148 
149 /*
150  * Maximum number of addresses to cache.
151  */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define	BRIDGE_RTABLE_MAX		100
154 #endif
155 
156 /*
157  * Spanning tree defaults.
158  */
159 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
160 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
161 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
162 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
163 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
164 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
165 #define	BSTP_DEFAULT_PATH_COST		55
166 
167 /*
168  * Timeout (in seconds) for entries learned dynamically.
169  */
170 #ifndef BRIDGE_RTABLE_TIMEOUT
171 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
172 #endif
173 
174 /*
175  * Number of seconds between walks of the route list.
176  */
177 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
178 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
179 #endif
180 
181 #define BRIDGE_RT_LOCK(_sc)	mutex_enter((_sc)->sc_rtlist_lock)
182 #define BRIDGE_RT_UNLOCK(_sc)	mutex_exit((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_LOCKED(_sc)	mutex_owned((_sc)->sc_rtlist_lock)
184 
185 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
186 				pserialize_perform((_sc)->sc_rtlist_psz)
187 
188 #define BRIDGE_RT_RENTER(__s)	do { __s = pserialize_read_enter(); } while (0)
189 #define BRIDGE_RT_REXIT(__s)	do { pserialize_read_exit(__s); } while (0)
190 
191 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc)			\
192 	PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
193 	    struct bridge_rtnode, brt_list)
194 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc)			\
195 	PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
196 	    struct bridge_rtnode, brt_list)
197 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt)			\
198 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
199 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt)				\
200 	PSLIST_WRITER_REMOVE((_brt), brt_list)
201 
202 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash)			\
203 	PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
204 	    struct bridge_rtnode, brt_hash)
205 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash)			\
206 	PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
207 	    struct bridge_rtnode, brt_hash)
208 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt)		\
209 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
210 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new)			\
211 	PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
212 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt)				\
213 	PSLIST_WRITER_REMOVE((_brt), brt_hash)
214 
215 #ifdef NET_MPSAFE
216 #define DECLARE_LOCK_VARIABLE
217 #define ACQUIRE_GLOBAL_LOCKS()	do { } while (0)
218 #define RELEASE_GLOBAL_LOCKS()	do { } while (0)
219 #else
220 #define DECLARE_LOCK_VARIABLE	int __s
221 #define ACQUIRE_GLOBAL_LOCKS()	do {					\
222 					KERNEL_LOCK(1, NULL);		\
223 					mutex_enter(softnet_lock);	\
224 					__s = splsoftnet();		\
225 				} while (0)
226 #define RELEASE_GLOBAL_LOCKS()	do {					\
227 					splx(__s);			\
228 					mutex_exit(softnet_lock);	\
229 					KERNEL_UNLOCK_ONE(NULL);	\
230 				} while (0)
231 #endif
232 
233 struct psref_class *bridge_psref_class __read_mostly;
234 
235 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
236 
237 static struct pool bridge_rtnode_pool;
238 
239 static int	bridge_clone_create(struct if_clone *, int);
240 static int	bridge_clone_destroy(struct ifnet *);
241 
242 static int	bridge_ioctl(struct ifnet *, u_long, void *);
243 static int	bridge_init(struct ifnet *);
244 static void	bridge_stop(struct ifnet *, int);
245 static void	bridge_start(struct ifnet *);
246 static void	bridge_ifdetach(void *);
247 
248 static void	bridge_input(struct ifnet *, struct mbuf *);
249 static void	bridge_forward(struct bridge_softc *, struct mbuf *);
250 
251 static void	bridge_timer(void *);
252 
253 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
254 				 struct mbuf *);
255 
256 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
257 				struct ifnet *, int, uint8_t);
258 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
259 static void	bridge_rttrim(struct bridge_softc *);
260 static void	bridge_rtage(struct bridge_softc *);
261 static void	bridge_rtage_work(struct work *, void *);
262 static void	bridge_rtflush(struct bridge_softc *, int);
263 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
264 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
265 
266 static void	bridge_rtable_init(struct bridge_softc *);
267 static void	bridge_rtable_fini(struct bridge_softc *);
268 
269 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
270 						  const uint8_t *);
271 static int	bridge_rtnode_insert(struct bridge_softc *,
272 				     struct bridge_rtnode *);
273 static void	bridge_rtnode_remove(struct bridge_softc *,
274 				     struct bridge_rtnode *);
275 static void	bridge_rtnode_destroy(struct bridge_rtnode *);
276 
277 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
278 						  const char *name,
279 						  struct psref *);
280 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
281 						     struct ifnet *ifp,
282 						     struct psref *);
283 static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
284                                       struct psref *);
285 static void	bridge_delete_member(struct bridge_softc *,
286 				     struct bridge_iflist *);
287 static void	bridge_acquire_member(struct bridge_softc *sc,
288                                       struct bridge_iflist *,
289                                       struct psref *);
290 
291 static int	bridge_ioctl_add(struct bridge_softc *, void *);
292 static int	bridge_ioctl_del(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
294 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
295 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
296 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
298 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
299 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
300 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
301 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
302 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
303 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
304 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
305 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
306 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
307 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
308 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
309 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
310 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
311 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
313 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
314 static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
315 static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
316 static int	bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
317 static int	bridge_ip_checkbasic(struct mbuf **mp);
318 # ifdef INET6
319 static int	bridge_ip6_checkbasic(struct mbuf **mp);
320 # endif /* INET6 */
321 
322 struct bridge_control {
323 	int	(*bc_func)(struct bridge_softc *, void *);
324 	int	bc_argsize;
325 	int	bc_flags;
326 };
327 
328 #define	BC_F_COPYIN		0x01	/* copy arguments in */
329 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
330 #define	BC_F_SUSER		0x04	/* do super-user check */
331 #define BC_F_XLATEIN		0x08	/* xlate arguments in */
332 #define BC_F_XLATEOUT		0x10	/* xlate arguments out */
333 
334 static const struct bridge_control bridge_control_table[] = {
335 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
336 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337 
338 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
339 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
340 
341 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
342 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
343 
344 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
345 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
346 
347 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
348 
349 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
350 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
351 
352 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
353 
354 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
355 
356 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
357 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
358 
359 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
360 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
361 
362 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
363 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
364 
365 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
366 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
367 
368 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
369 
370 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
371 
372 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
373 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
374 
375 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
376 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
377 };
378 
379 static const int bridge_control_table_size = __arraycount(bridge_control_table);
380 
381 static struct if_clone bridge_cloner =
382     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
383 
384 /*
385  * bridgeattach:
386  *
387  *	Pseudo-device attach routine.
388  */
389 void
390 bridgeattach(int n)
391 {
392 
393 	pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
394 	    0, 0, 0, "brtpl", NULL, IPL_NET);
395 
396 	bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
397 
398 	if_clone_attach(&bridge_cloner);
399 }
400 
401 /*
402  * bridge_clone_create:
403  *
404  *	Create a new bridge instance.
405  */
406 static int
407 bridge_clone_create(struct if_clone *ifc, int unit)
408 {
409 	struct bridge_softc *sc;
410 	struct ifnet *ifp;
411 	int error;
412 
413 	sc = kmem_zalloc(sizeof(*sc),  KM_SLEEP);
414 	ifp = &sc->sc_if;
415 
416 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
417 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
418 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
419 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
420 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
421 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
422 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
423 	sc->sc_filter_flags = 0;
424 
425 	/* Initialize our routing table. */
426 	bridge_rtable_init(sc);
427 
428 	error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
429 	    bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
430 	if (error)
431 		panic("%s: workqueue_create %d\n", __func__, error);
432 
433 	callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
434 	callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
435 
436 	mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
437 	PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
438 	sc->sc_iflist_psref.bip_psz = pserialize_create();
439 
440 	if_initname(ifp, ifc->ifc_name, unit);
441 	ifp->if_softc = sc;
442 #ifdef NET_MPSAFE
443 	ifp->if_extflags = IFEF_MPSAFE;
444 #endif
445 	ifp->if_mtu = ETHERMTU;
446 	ifp->if_ioctl = bridge_ioctl;
447 	ifp->if_output = bridge_output;
448 	ifp->if_start = bridge_start;
449 	ifp->if_stop = bridge_stop;
450 	ifp->if_init = bridge_init;
451 	ifp->if_type = IFT_BRIDGE;
452 	ifp->if_addrlen = 0;
453 	ifp->if_dlt = DLT_EN10MB;
454 	ifp->if_hdrlen = ETHER_HDR_LEN;
455 	if_initialize(ifp);
456 
457 	/*
458 	 * Set the link state to down.
459 	 * When interfaces are added the link state will reflect
460 	 * the best link state of the combined interfaces.
461 	 */
462 	ifp->if_link_state = LINK_STATE_DOWN;
463 
464 	if_alloc_sadl(ifp);
465 	if_register(ifp);
466 
467 	return 0;
468 }
469 
470 /*
471  * bridge_clone_destroy:
472  *
473  *	Destroy a bridge instance.
474  */
475 static int
476 bridge_clone_destroy(struct ifnet *ifp)
477 {
478 	struct bridge_softc *sc = ifp->if_softc;
479 	struct bridge_iflist *bif;
480 
481 	if ((ifp->if_flags & IFF_RUNNING) != 0)
482 		bridge_stop(ifp, 1);
483 
484 	BRIDGE_LOCK(sc);
485 	for (;;) {
486 		bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
487 		    bif_next);
488 		if (bif == NULL)
489 			break;
490 		bridge_delete_member(sc, bif);
491 	}
492 	PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
493 	BRIDGE_UNLOCK(sc);
494 
495 	if_detach(ifp);
496 
497 	/* Tear down the routing table. */
498 	bridge_rtable_fini(sc);
499 
500 	pserialize_destroy(sc->sc_iflist_psref.bip_psz);
501 	mutex_destroy(&sc->sc_iflist_psref.bip_lock);
502 	callout_destroy(&sc->sc_brcallout);
503 	callout_destroy(&sc->sc_bstpcallout);
504 	workqueue_destroy(sc->sc_rtage_wq);
505 	kmem_free(sc, sizeof(*sc));
506 
507 	return 0;
508 }
509 
510 /*
511  * bridge_ioctl:
512  *
513  *	Handle a control request from the operator.
514  */
515 static int
516 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
517 {
518 	struct bridge_softc *sc = ifp->if_softc;
519 	struct lwp *l = curlwp;	/* XXX */
520 	union {
521 		struct ifbreq ifbreq;
522 		struct ifbifconf ifbifconf;
523 		struct ifbareq ifbareq;
524 		struct ifbaconf ifbaconf;
525 		struct ifbrparam ifbrparam;
526 	} args;
527 	struct ifdrv *ifd = (struct ifdrv *) data;
528 	const struct bridge_control *bc = NULL; /* XXXGCC */
529 	int error = 0;
530 
531 	/* Authorize command before calling splsoftnet(). */
532 	switch (cmd) {
533 	case SIOCGDRVSPEC:
534 	case SIOCSDRVSPEC:
535 		if (ifd->ifd_cmd >= bridge_control_table_size
536 		    || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
537 			error = EINVAL;
538 			return error;
539 		}
540 
541 		/* We only care about BC_F_SUSER at this point. */
542 		if ((bc->bc_flags & BC_F_SUSER) == 0)
543 			break;
544 
545 		error = kauth_authorize_network(l->l_cred,
546 		    KAUTH_NETWORK_INTERFACE_BRIDGE,
547 		    cmd == SIOCGDRVSPEC ?
548 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
549 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
550 		     ifd, NULL, NULL);
551 		if (error)
552 			return error;
553 
554 		break;
555 	}
556 
557 	const int s = splsoftnet();
558 
559 	switch (cmd) {
560 	case SIOCGDRVSPEC:
561 	case SIOCSDRVSPEC:
562 		KASSERT(bc != NULL);
563 		if (cmd == SIOCGDRVSPEC &&
564 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
565 			error = EINVAL;
566 			break;
567 		}
568 		else if (cmd == SIOCSDRVSPEC &&
569 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
570 			error = EINVAL;
571 			break;
572 		}
573 
574 		/* BC_F_SUSER is checked above, before splsoftnet(). */
575 
576 		if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
577 		    && (ifd->ifd_len != bc->bc_argsize
578 			|| ifd->ifd_len > sizeof(args))) {
579 			error = EINVAL;
580 			break;
581 		}
582 
583 		memset(&args, 0, sizeof(args));
584 		if (bc->bc_flags & BC_F_COPYIN) {
585 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
586 			if (error)
587 				break;
588 		} else if (bc->bc_flags & BC_F_XLATEIN) {
589 			args.ifbifconf.ifbic_len = ifd->ifd_len;
590 			args.ifbifconf.ifbic_buf = ifd->ifd_data;
591 		}
592 
593 		error = (*bc->bc_func)(sc, &args);
594 		if (error)
595 			break;
596 
597 		if (bc->bc_flags & BC_F_COPYOUT) {
598 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
599 		} else if (bc->bc_flags & BC_F_XLATEOUT) {
600 			ifd->ifd_len = args.ifbifconf.ifbic_len;
601 			ifd->ifd_data = args.ifbifconf.ifbic_buf;
602 		}
603 		break;
604 
605 	case SIOCSIFFLAGS:
606 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
607 			break;
608 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
609 		case IFF_RUNNING:
610 			/*
611 			 * If interface is marked down and it is running,
612 			 * then stop and disable it.
613 			 */
614 			if_stop(ifp, 1);
615 			break;
616 		case IFF_UP:
617 			/*
618 			 * If interface is marked up and it is stopped, then
619 			 * start it.
620 			 */
621 			error = if_init(ifp);
622 			break;
623 		default:
624 			break;
625 		}
626 		break;
627 
628 	case SIOCSIFMTU:
629 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
630 			error = 0;
631 		break;
632 
633         case SIOCGIFCAP:
634 	    {
635 		struct ifcapreq *ifcr = (struct ifcapreq *)data;
636                 ifcr->ifcr_capabilities = sc->sc_capenable;
637                 ifcr->ifcr_capenable = sc->sc_capenable;
638 		break;
639 	    }
640 
641 	default:
642 		error = ifioctl_common(ifp, cmd, data);
643 		break;
644 	}
645 
646 	splx(s);
647 
648 	return error;
649 }
650 
651 /*
652  * bridge_lookup_member:
653  *
654  *	Lookup a bridge member interface.
655  */
656 static struct bridge_iflist *
657 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
658 {
659 	struct bridge_iflist *bif;
660 	struct ifnet *ifp;
661 	int s;
662 
663 	BRIDGE_PSZ_RENTER(s);
664 
665 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
666 		ifp = bif->bif_ifp;
667 		if (strcmp(ifp->if_xname, name) == 0)
668 			break;
669 	}
670 	if (bif != NULL)
671 		bridge_acquire_member(sc, bif, psref);
672 
673 	BRIDGE_PSZ_REXIT(s);
674 
675 	return bif;
676 }
677 
678 /*
679  * bridge_lookup_member_if:
680  *
681  *	Lookup a bridge member interface by ifnet*.
682  */
683 static struct bridge_iflist *
684 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
685     struct psref *psref)
686 {
687 	struct bridge_iflist *bif;
688 	int s;
689 
690 	BRIDGE_PSZ_RENTER(s);
691 
692 	bif = member_ifp->if_bridgeif;
693 	if (bif != NULL) {
694 		psref_acquire(psref, &bif->bif_psref,
695 		    bridge_psref_class);
696 	}
697 
698 	BRIDGE_PSZ_REXIT(s);
699 
700 	return bif;
701 }
702 
703 static void
704 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
705     struct psref *psref)
706 {
707 
708 	psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
709 }
710 
711 /*
712  * bridge_release_member:
713  *
714  *	Release the specified member interface.
715  */
716 static void
717 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
718     struct psref *psref)
719 {
720 
721 	psref_release(psref, &bif->bif_psref, bridge_psref_class);
722 }
723 
724 /*
725  * bridge_delete_member:
726  *
727  *	Delete the specified member interface.
728  */
729 static void
730 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
731 {
732 	struct ifnet *ifs = bif->bif_ifp;
733 
734 	KASSERT(BRIDGE_LOCKED(sc));
735 
736 	ifs->_if_input = ether_input;
737 	ifs->if_bridge = NULL;
738 	ifs->if_bridgeif = NULL;
739 
740 	PSLIST_WRITER_REMOVE(bif, bif_next);
741 	BRIDGE_PSZ_PERFORM(sc);
742 
743 	if_linkstate_change_disestablish(ifs,
744 	    bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
745 	ether_ifdetachhook_disestablish(ifs,
746 	    bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
747 
748 	BRIDGE_UNLOCK(sc);
749 
750 	switch (ifs->if_type) {
751 	case IFT_ETHER:
752 	case IFT_L2TP:
753 		/*
754 		 * Take the interface out of promiscuous mode.
755 		 * Don't call it with holding a spin lock.
756 		 */
757 		(void) ifpromisc(ifs, 0);
758 		IFNET_LOCK(ifs);
759 		(void) ether_disable_vlan_mtu(ifs);
760 		IFNET_UNLOCK(ifs);
761 		break;
762 	default:
763 #ifdef DIAGNOSTIC
764 		panic("%s: impossible", __func__);
765 #endif
766 		break;
767 	}
768 
769 	psref_target_destroy(&bif->bif_psref, bridge_psref_class);
770 
771 	PSLIST_ENTRY_DESTROY(bif, bif_next);
772 	kmem_free(bif, sizeof(*bif));
773 
774 	BRIDGE_LOCK(sc);
775 }
776 
777 /*
778  * bridge_calc_csum_flags:
779  *
780  *	Calculate logical and b/w csum flags each member interface supports.
781  */
782 void
783 bridge_calc_csum_flags(struct bridge_softc *sc)
784 {
785 	struct bridge_iflist *bif;
786 	struct ifnet *ifs = NULL;
787 	int flags = ~0;
788 	int capenable = ~0;
789 
790 	BRIDGE_LOCK(sc);
791 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
792 		ifs = bif->bif_ifp;
793 		flags &= ifs->if_csum_flags_tx;
794 		capenable &= ifs->if_capenable;
795 	}
796 	sc->sc_csum_flags_tx = flags;
797 	sc->sc_capenable = (ifs != NULL) ? capenable : 0;
798 	BRIDGE_UNLOCK(sc);
799 }
800 
801 /*
802  * bridge_calc_link_state:
803  *
804  *	Calculate the link state based on each member interface.
805  */
806 static void
807 bridge_calc_link_state(void *xsc)
808 {
809 	struct bridge_softc *sc = xsc;
810 	struct bridge_iflist *bif;
811 	struct ifnet *ifs;
812 	int link_state = LINK_STATE_DOWN;
813 
814 	BRIDGE_LOCK(sc);
815 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
816 		ifs = bif->bif_ifp;
817 		if (ifs->if_link_state == LINK_STATE_UP) {
818 			link_state = LINK_STATE_UP;
819 			break;
820 		}
821 		if (ifs->if_link_state == LINK_STATE_UNKNOWN)
822 			link_state = LINK_STATE_UNKNOWN;
823 	}
824 	if_link_state_change(&sc->sc_if, link_state);
825 	BRIDGE_UNLOCK(sc);
826 }
827 
828 static int
829 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
830 {
831 	struct ifbreq *req = arg;
832 	struct bridge_iflist *bif = NULL;
833 	struct ifnet *ifs;
834 	int error = 0;
835 	struct psref psref;
836 
837 	ifs = if_get(req->ifbr_ifsname, &psref);
838 	if (ifs == NULL)
839 		return ENOENT;
840 
841 	if (ifs->if_bridge == sc) {
842 		error = EEXIST;
843 		goto out;
844 	}
845 
846 	if (ifs->if_bridge != NULL) {
847 		error = EBUSY;
848 		goto out;
849 	}
850 
851 	if (ifs->_if_input != ether_input) {
852 		error = EINVAL;
853 		goto out;
854 	}
855 
856 	/* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
857 	if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
858 		error = EINVAL;
859 		goto out;
860 	}
861 
862 	bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
863 
864 	switch (ifs->if_type) {
865 	case IFT_ETHER:
866 		if (sc->sc_if.if_mtu != ifs->if_mtu) {
867 			/* Change MTU of added interface to bridge MTU */
868 			struct ifreq ifr;
869 			memset(&ifr, 0, sizeof(ifr));
870 			ifr.ifr_mtu = sc->sc_if.if_mtu;
871 			IFNET_LOCK(ifs);
872 			error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
873 			IFNET_UNLOCK(ifs);
874 			if (error != 0)
875 				goto out;
876 		}
877 		/* FALLTHROUGH */
878 	case IFT_L2TP:
879 		IFNET_LOCK(ifs);
880 		error = ether_enable_vlan_mtu(ifs);
881 		IFNET_UNLOCK(ifs);
882 		if (error > 0)
883 			goto out;
884 		/*
885 		 * Place the interface into promiscuous mode.
886 		 */
887 		error = ifpromisc(ifs, 1);
888 		if (error)
889 			goto out;
890 		break;
891 	default:
892 		error = EINVAL;
893 		goto out;
894 	}
895 
896 	bif->bif_ifp = ifs;
897 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
898 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
899 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
900 	bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
901 	    bridge_calc_link_state, sc);
902 	PSLIST_ENTRY_INIT(bif, bif_next);
903 	psref_target_init(&bif->bif_psref, bridge_psref_class);
904 
905 	BRIDGE_LOCK(sc);
906 
907 	ifs->if_bridge = sc;
908 	ifs->if_bridgeif = bif;
909 	PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
910 	ifs->_if_input = bridge_input;
911 
912 	BRIDGE_UNLOCK(sc);
913 
914 	bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
915 	    bridge_ifdetach, (void *)ifs);
916 
917 	bridge_calc_csum_flags(sc);
918 	bridge_calc_link_state(sc);
919 
920 	if (sc->sc_if.if_flags & IFF_RUNNING)
921 		bstp_initialization(sc);
922 	else
923 		bstp_stop(sc);
924 
925 out:
926 	if_put(ifs, &psref);
927 	if (error) {
928 		if (bif != NULL)
929 			kmem_free(bif, sizeof(*bif));
930 	}
931 	return error;
932 }
933 
934 static int
935 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
936 {
937 	struct ifbreq *req = arg;
938 	const char *name = req->ifbr_ifsname;
939 	struct bridge_iflist *bif;
940 	struct ifnet *ifs;
941 
942 	BRIDGE_LOCK(sc);
943 
944 	/*
945 	 * Don't use bridge_lookup_member. We want to get a member
946 	 * with bif_refs == 0.
947 	 */
948 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
949 		ifs = bif->bif_ifp;
950 		if (strcmp(ifs->if_xname, name) == 0)
951 			break;
952 	}
953 
954 	if (bif == NULL) {
955 		BRIDGE_UNLOCK(sc);
956 		return ENOENT;
957 	}
958 
959 	bridge_delete_member(sc, bif);
960 
961 	BRIDGE_UNLOCK(sc);
962 
963 	bridge_rtdelete(sc, ifs);
964 	bridge_calc_csum_flags(sc);
965 	bridge_calc_link_state(sc);
966 
967 	if (sc->sc_if.if_flags & IFF_RUNNING)
968 		bstp_initialization(sc);
969 
970 	return 0;
971 }
972 
973 static int
974 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
975 {
976 	struct ifbreq *req = arg;
977 	struct bridge_iflist *bif;
978 	struct psref psref;
979 
980 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
981 	if (bif == NULL)
982 		return ENOENT;
983 
984 	req->ifbr_ifsflags = bif->bif_flags;
985 	req->ifbr_state = bif->bif_state;
986 	req->ifbr_priority = bif->bif_priority;
987 	req->ifbr_path_cost = bif->bif_path_cost;
988 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
989 
990 	bridge_release_member(sc, bif, &psref);
991 
992 	return 0;
993 }
994 
995 static int
996 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
997 {
998 	struct ifbreq *req = arg;
999 	struct bridge_iflist *bif;
1000 	struct psref psref;
1001 
1002 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1003 	if (bif == NULL)
1004 		return ENOENT;
1005 
1006 	if (req->ifbr_ifsflags & IFBIF_STP) {
1007 		switch (bif->bif_ifp->if_type) {
1008 		case IFT_ETHER:
1009 		case IFT_L2TP:
1010 			/* These can do spanning tree. */
1011 			break;
1012 
1013 		default:
1014 			/* Nothing else can. */
1015 			bridge_release_member(sc, bif, &psref);
1016 			return EINVAL;
1017 		}
1018 	}
1019 
1020 	bif->bif_flags = req->ifbr_ifsflags;
1021 
1022 	bridge_release_member(sc, bif, &psref);
1023 
1024 	if (sc->sc_if.if_flags & IFF_RUNNING)
1025 		bstp_initialization(sc);
1026 
1027 	return 0;
1028 }
1029 
1030 static int
1031 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1032 {
1033 	struct ifbrparam *param = arg;
1034 
1035 	sc->sc_brtmax = param->ifbrp_csize;
1036 	bridge_rttrim(sc);
1037 
1038 	return 0;
1039 }
1040 
1041 static int
1042 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1043 {
1044 	struct ifbrparam *param = arg;
1045 
1046 	param->ifbrp_csize = sc->sc_brtmax;
1047 
1048 	return 0;
1049 }
1050 
1051 static int
1052 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1053 {
1054 	struct ifbifconf *bifc = arg;
1055 	struct bridge_iflist *bif;
1056 	struct ifbreq *breqs;
1057 	int i, count, error = 0;
1058 
1059 retry:
1060 	BRIDGE_LOCK(sc);
1061 	count = 0;
1062 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1063 		count++;
1064 	BRIDGE_UNLOCK(sc);
1065 
1066 	if (count == 0) {
1067 		bifc->ifbic_len = 0;
1068 		return 0;
1069 	}
1070 
1071 	if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1072 		/* Tell that a larger buffer is needed */
1073 		bifc->ifbic_len = sizeof(*breqs) * count;
1074 		return 0;
1075 	}
1076 
1077 	breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1078 
1079 	BRIDGE_LOCK(sc);
1080 
1081 	i = 0;
1082 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1083 		i++;
1084 	if (i > count) {
1085 		/*
1086 		 * The number of members has been increased.
1087 		 * We need more memory!
1088 		 */
1089 		BRIDGE_UNLOCK(sc);
1090 		kmem_free(breqs, sizeof(*breqs) * count);
1091 		goto retry;
1092 	}
1093 
1094 	i = 0;
1095 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1096 		struct ifbreq *breq = &breqs[i++];
1097 		memset(breq, 0, sizeof(*breq));
1098 
1099 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1100 		    sizeof(breq->ifbr_ifsname));
1101 		breq->ifbr_ifsflags = bif->bif_flags;
1102 		breq->ifbr_state = bif->bif_state;
1103 		breq->ifbr_priority = bif->bif_priority;
1104 		breq->ifbr_path_cost = bif->bif_path_cost;
1105 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1106 	}
1107 
1108 	/* Don't call copyout with holding the mutex */
1109 	BRIDGE_UNLOCK(sc);
1110 
1111 	for (i = 0; i < count; i++) {
1112 		error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1113 		if (error)
1114 			break;
1115 	}
1116 	bifc->ifbic_len = sizeof(*breqs) * i;
1117 
1118 	kmem_free(breqs, sizeof(*breqs) * count);
1119 
1120 	return error;
1121 }
1122 
1123 static int
1124 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1125 {
1126 	struct ifbaconf *bac = arg;
1127 	struct bridge_rtnode *brt;
1128 	struct ifbareq bareq;
1129 	int count = 0, error = 0, len;
1130 
1131 	if (bac->ifbac_len == 0)
1132 		return 0;
1133 
1134 	BRIDGE_RT_LOCK(sc);
1135 
1136 	/* The passed buffer is not enough, tell a required size. */
1137 	if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1138 		count = sc->sc_brtcnt;
1139 		goto out;
1140 	}
1141 
1142 	len = bac->ifbac_len;
1143 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1144 		if (len < sizeof(bareq))
1145 			goto out;
1146 		memset(&bareq, 0, sizeof(bareq));
1147 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1148 		    sizeof(bareq.ifba_ifsname));
1149 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1150 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1151 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1152 		} else
1153 			bareq.ifba_expire = 0;
1154 		bareq.ifba_flags = brt->brt_flags;
1155 
1156 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1157 		if (error)
1158 			goto out;
1159 		count++;
1160 		len -= sizeof(bareq);
1161 	}
1162 out:
1163 	BRIDGE_RT_UNLOCK(sc);
1164 
1165 	bac->ifbac_len = sizeof(bareq) * count;
1166 	return error;
1167 }
1168 
1169 static int
1170 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1171 {
1172 	struct ifbareq *req = arg;
1173 	struct bridge_iflist *bif;
1174 	int error;
1175 	struct psref psref;
1176 
1177 	bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1178 	if (bif == NULL)
1179 		return ENOENT;
1180 
1181 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1182 	    req->ifba_flags);
1183 
1184 	bridge_release_member(sc, bif, &psref);
1185 
1186 	return error;
1187 }
1188 
1189 static int
1190 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1191 {
1192 	struct ifbrparam *param = arg;
1193 
1194 	sc->sc_brttimeout = param->ifbrp_ctime;
1195 
1196 	return 0;
1197 }
1198 
1199 static int
1200 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1201 {
1202 	struct ifbrparam *param = arg;
1203 
1204 	param->ifbrp_ctime = sc->sc_brttimeout;
1205 
1206 	return 0;
1207 }
1208 
1209 static int
1210 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1211 {
1212 	struct ifbareq *req = arg;
1213 
1214 	return (bridge_rtdaddr(sc, req->ifba_dst));
1215 }
1216 
1217 static int
1218 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1219 {
1220 	struct ifbreq *req = arg;
1221 
1222 	bridge_rtflush(sc, req->ifbr_ifsflags);
1223 
1224 	return 0;
1225 }
1226 
1227 static int
1228 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1229 {
1230 	struct ifbrparam *param = arg;
1231 
1232 	param->ifbrp_prio = sc->sc_bridge_priority;
1233 
1234 	return 0;
1235 }
1236 
1237 static int
1238 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1239 {
1240 	struct ifbrparam *param = arg;
1241 
1242 	sc->sc_bridge_priority = param->ifbrp_prio;
1243 
1244 	if (sc->sc_if.if_flags & IFF_RUNNING)
1245 		bstp_initialization(sc);
1246 
1247 	return 0;
1248 }
1249 
1250 static int
1251 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1252 {
1253 	struct ifbrparam *param = arg;
1254 
1255 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1256 
1257 	return 0;
1258 }
1259 
1260 static int
1261 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1262 {
1263 	struct ifbrparam *param = arg;
1264 
1265 	if (param->ifbrp_hellotime == 0)
1266 		return EINVAL;
1267 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1268 
1269 	if (sc->sc_if.if_flags & IFF_RUNNING)
1270 		bstp_initialization(sc);
1271 
1272 	return 0;
1273 }
1274 
1275 static int
1276 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1277 {
1278 	struct ifbrparam *param = arg;
1279 
1280 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1281 
1282 	return 0;
1283 }
1284 
1285 static int
1286 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1287 {
1288 	struct ifbrparam *param = arg;
1289 
1290 	if (param->ifbrp_fwddelay == 0)
1291 		return EINVAL;
1292 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1293 
1294 	if (sc->sc_if.if_flags & IFF_RUNNING)
1295 		bstp_initialization(sc);
1296 
1297 	return 0;
1298 }
1299 
1300 static int
1301 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1302 {
1303 	struct ifbrparam *param = arg;
1304 
1305 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1306 
1307 	return 0;
1308 }
1309 
1310 static int
1311 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1312 {
1313 	struct ifbrparam *param = arg;
1314 
1315 	if (param->ifbrp_maxage == 0)
1316 		return EINVAL;
1317 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1318 
1319 	if (sc->sc_if.if_flags & IFF_RUNNING)
1320 		bstp_initialization(sc);
1321 
1322 	return 0;
1323 }
1324 
1325 static int
1326 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1327 {
1328 	struct ifbreq *req = arg;
1329 	struct bridge_iflist *bif;
1330 	struct psref psref;
1331 
1332 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1333 	if (bif == NULL)
1334 		return ENOENT;
1335 
1336 	bif->bif_priority = req->ifbr_priority;
1337 
1338 	if (sc->sc_if.if_flags & IFF_RUNNING)
1339 		bstp_initialization(sc);
1340 
1341 	bridge_release_member(sc, bif, &psref);
1342 
1343 	return 0;
1344 }
1345 
1346 static int
1347 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1348 {
1349 	struct ifbrparam *param = arg;
1350 
1351 	param->ifbrp_filter = sc->sc_filter_flags;
1352 
1353 	return 0;
1354 }
1355 
1356 static int
1357 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1358 {
1359 	struct ifbrparam *param = arg;
1360 	uint32_t nflags, oflags;
1361 
1362 	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1363 		return EINVAL;
1364 
1365 	nflags = param->ifbrp_filter;
1366 	oflags = sc->sc_filter_flags;
1367 
1368 	if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1369 		pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1370 			sc->sc_if.if_pfil);
1371 	}
1372 	if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1373 		pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1374 			sc->sc_if.if_pfil);
1375 	}
1376 
1377 	sc->sc_filter_flags = nflags;
1378 
1379 	return 0;
1380 }
1381 
1382 static int
1383 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1384 {
1385 	struct ifbreq *req = arg;
1386 	struct bridge_iflist *bif;
1387 	struct psref psref;
1388 
1389 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1390 	if (bif == NULL)
1391 		return ENOENT;
1392 
1393 	bif->bif_path_cost = req->ifbr_path_cost;
1394 
1395 	if (sc->sc_if.if_flags & IFF_RUNNING)
1396 		bstp_initialization(sc);
1397 
1398 	bridge_release_member(sc, bif, &psref);
1399 
1400 	return 0;
1401 }
1402 
1403 /*
1404  * bridge_ifdetach:
1405  *
1406  *	Detach an interface from a bridge.  Called when a member
1407  *	interface is detaching.
1408  */
1409 static void
1410 bridge_ifdetach(void *xifs)
1411 {
1412 	struct ifnet *ifs;
1413 	struct bridge_softc *sc;
1414 	struct ifbreq breq;
1415 
1416 	ifs = (struct ifnet *)xifs;
1417 	sc = ifs->if_bridge;
1418 
1419 	/* ioctl_lock should prevent this from happening */
1420 	KASSERT(sc != NULL);
1421 
1422 	memset(&breq, 0, sizeof(breq));
1423 	strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1424 
1425 	(void) bridge_ioctl_del(sc, &breq);
1426 }
1427 
1428 /*
1429  * bridge_init:
1430  *
1431  *	Initialize a bridge interface.
1432  */
1433 static int
1434 bridge_init(struct ifnet *ifp)
1435 {
1436 	struct bridge_softc *sc = ifp->if_softc;
1437 
1438 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1439 
1440 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1441 	    bridge_timer, sc);
1442 	bstp_initialization(sc);
1443 
1444 	ifp->if_flags |= IFF_RUNNING;
1445 	return 0;
1446 }
1447 
1448 /*
1449  * bridge_stop:
1450  *
1451  *	Stop the bridge interface.
1452  */
1453 static void
1454 bridge_stop(struct ifnet *ifp, int disable)
1455 {
1456 	struct bridge_softc *sc = ifp->if_softc;
1457 
1458 	KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1459 	ifp->if_flags &= ~IFF_RUNNING;
1460 
1461 	callout_halt(&sc->sc_brcallout, NULL);
1462 	workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1463 	bstp_stop(sc);
1464 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1465 }
1466 
1467 /*
1468  * bridge_enqueue:
1469  *
1470  *	Enqueue a packet on a bridge member interface.
1471  */
1472 void
1473 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1474     int runfilt)
1475 {
1476 	int len, error;
1477 	short mflags;
1478 
1479 	if (runfilt) {
1480 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1481 		    dst_ifp, PFIL_OUT) != 0) {
1482 			m_freem(m);
1483 			return;
1484 		}
1485 		if (m == NULL)
1486 			return;
1487 	}
1488 
1489 #ifdef ALTQ
1490 	KERNEL_LOCK(1, NULL);
1491 	/*
1492 	 * If ALTQ is enabled on the member interface, do
1493 	 * classification; the queueing discipline might
1494 	 * not require classification, but might require
1495 	 * the address family/header pointer in the pktattr.
1496 	 */
1497 	if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1498 		/* XXX IFT_ETHER */
1499 		altq_etherclassify(&dst_ifp->if_snd, m);
1500 	}
1501 	KERNEL_UNLOCK_ONE(NULL);
1502 #endif /* ALTQ */
1503 
1504 	if (vlan_has_tag(m) &&
1505 	    !vlan_is_hwtag_enabled(dst_ifp)) {
1506 		(void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1507 		    vlan_get_tag(m));
1508 		if (m == NULL) {
1509 			if_statinc(&sc->sc_if, if_oerrors);
1510 			return;
1511 		}
1512 	}
1513 
1514 	len = m->m_pkthdr.len;
1515 	mflags = m->m_flags;
1516 
1517 	error = if_transmit_lock(dst_ifp, m);
1518 	if (error) {
1519 		/* mbuf is already freed */
1520 		if_statinc(&sc->sc_if, if_oerrors);
1521 		return;
1522 	}
1523 
1524 	net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1525 	if_statinc_ref(&sc->sc_if, nsr, if_opackets);
1526 	if_statadd_ref(&sc->sc_if, nsr, if_obytes, len);
1527 	if (mflags & M_MCAST)
1528 		if_statinc_ref(&sc->sc_if, nsr, if_omcasts);
1529 	IF_STAT_PUTREF(&sc->sc_if);
1530 }
1531 
1532 /*
1533  * bridge_output:
1534  *
1535  *	Send output from a bridge member interface.  This
1536  *	performs the bridging function for locally originated
1537  *	packets.
1538  *
1539  *	The mbuf has the Ethernet header already attached.  We must
1540  *	enqueue or free the mbuf before returning.
1541  */
1542 int
1543 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1544     const struct rtentry *rt)
1545 {
1546 	struct ether_header *eh;
1547 	struct ifnet *dst_if;
1548 	struct bridge_softc *sc;
1549 	struct mbuf *n;
1550 	int s;
1551 
1552 	/*
1553 	 * bridge_output() is called from ether_output(), furthermore
1554 	 * ifp argument doesn't point to bridge(4). So, don't assert
1555 	 * IFEF_MPSAFE here.
1556 	 */
1557 
1558 	KASSERT(m->m_len >= ETHER_HDR_LEN);
1559 
1560 	eh = mtod(m, struct ether_header *);
1561 	sc = ifp->if_bridge;
1562 
1563 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1564 		if (memcmp(etherbroadcastaddr,
1565 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1566 			m->m_flags |= M_BCAST;
1567 		else
1568 			m->m_flags |= M_MCAST;
1569 	}
1570 
1571 	/*
1572 	 * If bridge is down, but the original output interface is up,
1573 	 * go ahead and send out that interface.  Otherwise, the packet
1574 	 * is dropped below.
1575 	 */
1576 	if (__predict_false(sc == NULL) ||
1577 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1578 		dst_if = ifp;
1579 		goto unicast_asis;
1580 	}
1581 
1582 	/*
1583 	 * If the packet is a multicast, or we don't know a better way to
1584 	 * get there, send to all interfaces.
1585 	 */
1586 	if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1587 		dst_if = NULL;
1588 	else
1589 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1590 
1591 	/*
1592 	 * In general, we need to handle TX offload in software before
1593 	 * enqueueing a packet. However, we can send it as is in the
1594 	 * cases of unicast via (1) the source interface, or (2) an
1595 	 * interface which supports the specified offload options.
1596 	 * For multicast or broadcast, send it as is only if (3) all
1597 	 * the member interfaces support the specified options.
1598 	 */
1599 
1600 	/*
1601 	 * Unicast via the source interface.
1602 	 */
1603 	if (dst_if == ifp)
1604 		goto unicast_asis;
1605 
1606 	/*
1607 	 * Unicast via other interface.
1608 	 */
1609 	if (dst_if != NULL) {
1610 		KASSERT(m->m_flags & M_PKTHDR);
1611 		if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1612 		    m->m_pkthdr.csum_flags)) {
1613 			/*
1614 			 * Unicast via an interface which supports the
1615 			 * specified offload options.
1616 			 */
1617 			goto unicast_asis;
1618 		}
1619 
1620 		/*
1621 		 * Handle TX offload in software. For TSO, a packet is
1622 		 * split into multiple chunks. Thus, the return value of
1623 		 * ether_sw_offload_tx() is mbuf queue consists of them.
1624 		 */
1625 		m = ether_sw_offload_tx(ifp, m);
1626 		if (m == NULL)
1627 			return 0;
1628 
1629 		do {
1630 			n = m->m_nextpkt;
1631 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1632 				m_freem(m);
1633 			else
1634 				bridge_enqueue(sc, dst_if, m, 0);
1635 			m = n;
1636 		} while (m != NULL);
1637 
1638 		return 0;
1639 	}
1640 
1641 	/*
1642 	 * Multicast or broadcast.
1643 	 */
1644 	if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1645 	    m->m_pkthdr.csum_flags)) {
1646 		/*
1647 		 * Specified TX offload options are supported by all
1648 		 * the member interfaces of this bridge.
1649 		 */
1650 		m->m_nextpkt = NULL;	/* XXX */
1651 	} else {
1652 		/*
1653 		 * Otherwise, handle TX offload in software.
1654 		 */
1655 		m = ether_sw_offload_tx(ifp, m);
1656 		if (m == NULL)
1657 			return 0;
1658 	}
1659 
1660 	do {
1661 		/* XXX Should call bridge_broadcast, but there are locking
1662 		 * issues which need resolving first. */
1663 		struct bridge_iflist *bif;
1664 		struct mbuf *mc;
1665 		bool used = false;
1666 
1667 		n = m->m_nextpkt;
1668 
1669 		BRIDGE_PSZ_RENTER(s);
1670 		BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1671 			struct psref psref;
1672 
1673 			bridge_acquire_member(sc, bif, &psref);
1674 			BRIDGE_PSZ_REXIT(s);
1675 
1676 			dst_if = bif->bif_ifp;
1677 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1678 				goto next;
1679 
1680 			/*
1681 			 * If this is not the original output interface,
1682 			 * and the interface is participating in spanning
1683 			 * tree, make sure the port is in a state that
1684 			 * allows forwarding.
1685 			 */
1686 			if (dst_if != ifp &&
1687 			    (bif->bif_flags & IFBIF_STP) != 0) {
1688 				switch (bif->bif_state) {
1689 				case BSTP_IFSTATE_BLOCKING:
1690 				case BSTP_IFSTATE_LISTENING:
1691 				case BSTP_IFSTATE_DISABLED:
1692 					goto next;
1693 				}
1694 			}
1695 
1696 			if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1697 			    bif_next) == NULL &&
1698 			    ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1699 			    dst_if == ifp))
1700 			{
1701 				used = true;
1702 				mc = m;
1703 			} else {
1704 				mc = m_copypacket(m, M_DONTWAIT);
1705 				if (mc == NULL) {
1706 					if_statinc(&sc->sc_if, if_oerrors);
1707 					goto next;
1708 				}
1709 			}
1710 
1711 			bridge_enqueue(sc, dst_if, mc, 0);
1712 
1713 			if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1714 			    dst_if != ifp)
1715 			{
1716 				if (PSLIST_READER_NEXT(bif,
1717 				    struct bridge_iflist, bif_next) == NULL)
1718 				{
1719 					used = true;
1720 					mc = m;
1721 				} else {
1722 					mc = m_copypacket(m, M_DONTWAIT);
1723 					if (mc == NULL) {
1724 						if_statinc(&sc->sc_if,
1725 						    if_oerrors);
1726 						goto next;
1727 					}
1728 				}
1729 
1730 				m_set_rcvif(mc, dst_if);
1731 				mc->m_flags &= ~M_PROMISC;
1732 
1733 				const int _s = splsoftnet();
1734 				KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1735 				ether_input(dst_if, mc);
1736 				KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1737 				splx(_s);
1738 			}
1739 
1740 next:
1741 			BRIDGE_PSZ_RENTER(s);
1742 			bridge_release_member(sc, bif, &psref);
1743 
1744 			/* Guarantee we don't re-enter the loop as we already
1745 			 * decided we're at the end. */
1746 			if (used)
1747 				break;
1748 		}
1749 		BRIDGE_PSZ_REXIT(s);
1750 
1751 		if (!used)
1752 			m_freem(m);
1753 
1754 		m = n;
1755 	} while (m != NULL);
1756 	return 0;
1757 
1758 unicast_asis:
1759 	/*
1760 	 * XXX Spanning tree consideration here?
1761 	 */
1762 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1763 		m_freem(m);
1764 	else
1765 		bridge_enqueue(sc, dst_if, m, 0);
1766 	return 0;
1767 }
1768 
1769 /*
1770  * bridge_start:
1771  *
1772  *	Start output on a bridge.
1773  *
1774  *	NOTE: This routine should never be called in this implementation.
1775  */
1776 static void
1777 bridge_start(struct ifnet *ifp)
1778 {
1779 
1780 	printf("%s: bridge_start() called\n", ifp->if_xname);
1781 }
1782 
1783 /*
1784  * bridge_forward:
1785  *
1786  *	The forwarding function of the bridge.
1787  */
1788 static void
1789 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1790 {
1791 	struct bridge_iflist *bif;
1792 	struct ifnet *src_if, *dst_if;
1793 	struct ether_header *eh;
1794 	struct psref psref;
1795 	struct psref psref_src;
1796 	DECLARE_LOCK_VARIABLE;
1797 
1798 	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1799 		return;
1800 
1801 	src_if = m_get_rcvif_psref(m, &psref_src);
1802 	if (src_if == NULL) {
1803 		/* Interface is being destroyed? */
1804 		m_freem(m);
1805 		goto out;
1806 	}
1807 
1808 	if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1809 
1810 	/*
1811 	 * Look up the bridge_iflist.
1812 	 */
1813 	bif = bridge_lookup_member_if(sc, src_if, &psref);
1814 	if (bif == NULL) {
1815 		/* Interface is not a bridge member (anymore?) */
1816 		m_freem(m);
1817 		goto out;
1818 	}
1819 
1820 	if (bif->bif_flags & IFBIF_STP) {
1821 		switch (bif->bif_state) {
1822 		case BSTP_IFSTATE_BLOCKING:
1823 		case BSTP_IFSTATE_LISTENING:
1824 		case BSTP_IFSTATE_DISABLED:
1825 			m_freem(m);
1826 			bridge_release_member(sc, bif, &psref);
1827 			goto out;
1828 		}
1829 	}
1830 
1831 	eh = mtod(m, struct ether_header *);
1832 
1833 	/*
1834 	 * If the interface is learning, and the source
1835 	 * address is valid and not multicast, record
1836 	 * the address.
1837 	 */
1838 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1839 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1840 	    (eh->ether_shost[0] == 0 &&
1841 	     eh->ether_shost[1] == 0 &&
1842 	     eh->ether_shost[2] == 0 &&
1843 	     eh->ether_shost[3] == 0 &&
1844 	     eh->ether_shost[4] == 0 &&
1845 	     eh->ether_shost[5] == 0) == 0) {
1846 		(void) bridge_rtupdate(sc, eh->ether_shost,
1847 		    src_if, 0, IFBAF_DYNAMIC);
1848 	}
1849 
1850 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
1851 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
1852 		m_freem(m);
1853 		bridge_release_member(sc, bif, &psref);
1854 		goto out;
1855 	}
1856 
1857 	bridge_release_member(sc, bif, &psref);
1858 
1859 	/*
1860 	 * At this point, the port either doesn't participate
1861 	 * in spanning tree or it is in the forwarding state.
1862 	 */
1863 
1864 	/*
1865 	 * If the packet is unicast, destined for someone on
1866 	 * "this" side of the bridge, drop it.
1867 	 */
1868 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1869 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1870 		if (src_if == dst_if) {
1871 			m_freem(m);
1872 			goto out;
1873 		}
1874 	} else {
1875 		/* ...forward it to all interfaces. */
1876 		if_statinc(&sc->sc_if, if_imcasts);
1877 		dst_if = NULL;
1878 	}
1879 
1880 	if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1881 		m_freem(m);
1882 		goto out;
1883 	}
1884 	if (m == NULL)
1885 		goto out;
1886 
1887 	if (dst_if == NULL) {
1888 		bridge_broadcast(sc, src_if, m);
1889 		goto out;
1890 	}
1891 
1892 	m_put_rcvif_psref(src_if, &psref_src);
1893 	src_if = NULL;
1894 
1895 	/*
1896 	 * At this point, we're dealing with a unicast frame
1897 	 * going to a different interface.
1898 	 */
1899 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1900 		m_freem(m);
1901 		goto out;
1902 	}
1903 
1904 	bif = bridge_lookup_member_if(sc, dst_if, &psref);
1905 	if (bif == NULL) {
1906 		/* Not a member of the bridge (anymore?) */
1907 		m_freem(m);
1908 		goto out;
1909 	}
1910 
1911 	if (bif->bif_flags & IFBIF_STP) {
1912 		switch (bif->bif_state) {
1913 		case BSTP_IFSTATE_DISABLED:
1914 		case BSTP_IFSTATE_BLOCKING:
1915 			m_freem(m);
1916 			bridge_release_member(sc, bif, &psref);
1917 			goto out;
1918 		}
1919 	}
1920 
1921 	bridge_release_member(sc, bif, &psref);
1922 
1923 	/*
1924 	 * Before enqueueing this packet to the destination interface,
1925 	 * clear any in-bound checksum flags to prevent them from being
1926 	 * misused as out-bound flags.
1927 	 */
1928 	m->m_pkthdr.csum_flags = 0;
1929 
1930 	ACQUIRE_GLOBAL_LOCKS();
1931 	bridge_enqueue(sc, dst_if, m, 1);
1932 	RELEASE_GLOBAL_LOCKS();
1933 out:
1934 	if (src_if != NULL)
1935 		m_put_rcvif_psref(src_if, &psref_src);
1936 	return;
1937 }
1938 
1939 static bool
1940 bstp_state_before_learning(struct bridge_iflist *bif)
1941 {
1942 	if (bif->bif_flags & IFBIF_STP) {
1943 		switch (bif->bif_state) {
1944 		case BSTP_IFSTATE_BLOCKING:
1945 		case BSTP_IFSTATE_LISTENING:
1946 		case BSTP_IFSTATE_DISABLED:
1947 			return true;
1948 		}
1949 	}
1950 	return false;
1951 }
1952 
1953 static bool
1954 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1955 {
1956 	uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1957 
1958 	if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1959 #if NCARP > 0
1960 	    || (bif->bif_ifp->if_carp &&
1961 	        carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1962 #endif /* NCARP > 0 */
1963 	    )
1964 		return true;
1965 
1966 	return false;
1967 }
1968 
1969 /*
1970  * bridge_input:
1971  *
1972  *	Receive input from a member interface.  Queue the packet for
1973  *	bridging if it is not for us.
1974  */
1975 static void
1976 bridge_input(struct ifnet *ifp, struct mbuf *m)
1977 {
1978 	struct bridge_softc *sc = ifp->if_bridge;
1979 	struct bridge_iflist *bif;
1980 	struct ether_header *eh;
1981 	struct psref psref;
1982 	int bound;
1983 	DECLARE_LOCK_VARIABLE;
1984 
1985 	KASSERT(!cpu_intr_p());
1986 
1987 	if (__predict_false(sc == NULL) ||
1988 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1989 		ACQUIRE_GLOBAL_LOCKS();
1990 		ether_input(ifp, m);
1991 		RELEASE_GLOBAL_LOCKS();
1992 		return;
1993 	}
1994 
1995 	bound = curlwp_bind();
1996 	bif = bridge_lookup_member_if(sc, ifp, &psref);
1997 	if (bif == NULL) {
1998 		curlwp_bindx(bound);
1999 		ACQUIRE_GLOBAL_LOCKS();
2000 		ether_input(ifp, m);
2001 		RELEASE_GLOBAL_LOCKS();
2002 		return;
2003 	}
2004 
2005 	eh = mtod(m, struct ether_header *);
2006 
2007 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2008 		if (memcmp(etherbroadcastaddr,
2009 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2010 			m->m_flags |= M_BCAST;
2011 		else
2012 			m->m_flags |= M_MCAST;
2013 	}
2014 
2015 	/*
2016 	 * A 'fast' path for packets addressed to interfaces that are
2017 	 * part of this bridge.
2018 	 */
2019 	if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2020 	    !bstp_state_before_learning(bif)) {
2021 		struct bridge_iflist *_bif;
2022 		struct ifnet *_ifp = NULL;
2023 		int s;
2024 		struct psref _psref;
2025 
2026 		BRIDGE_PSZ_RENTER(s);
2027 		BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2028 			/* It is destined for us. */
2029 			if (bridge_ourether(_bif, eh, 0)) {
2030 				bridge_acquire_member(sc, _bif, &_psref);
2031 				BRIDGE_PSZ_REXIT(s);
2032 				if (_bif->bif_flags & IFBIF_LEARNING)
2033 					(void) bridge_rtupdate(sc,
2034 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2035 				m_set_rcvif(m, _bif->bif_ifp);
2036 				_ifp = _bif->bif_ifp;
2037 				bridge_release_member(sc, _bif, &_psref);
2038 				goto out;
2039 			}
2040 
2041 			/* We just received a packet that we sent out. */
2042 			if (bridge_ourether(_bif, eh, 1))
2043 				break;
2044 		}
2045 		BRIDGE_PSZ_REXIT(s);
2046 out:
2047 
2048 		if (_bif != NULL) {
2049 			bridge_release_member(sc, bif, &psref);
2050 			curlwp_bindx(bound);
2051 			if (_ifp != NULL) {
2052 				m->m_flags &= ~M_PROMISC;
2053 				ACQUIRE_GLOBAL_LOCKS();
2054 				ether_input(_ifp, m);
2055 				RELEASE_GLOBAL_LOCKS();
2056 			} else
2057 				m_freem(m);
2058 			return;
2059 		}
2060 	}
2061 
2062 	/* Tap off 802.1D packets; they do not get forwarded. */
2063 	if (bif->bif_flags & IFBIF_STP &&
2064 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2065 		bstp_input(sc, bif, m);
2066 		bridge_release_member(sc, bif, &psref);
2067 		curlwp_bindx(bound);
2068 		return;
2069 	}
2070 
2071 	/*
2072 	 * A normal switch would discard the packet here, but that's not what
2073 	 * we've done historically. This also prevents some obnoxious behaviour.
2074 	 */
2075 	if (bstp_state_before_learning(bif)) {
2076 		bridge_release_member(sc, bif, &psref);
2077 		curlwp_bindx(bound);
2078 		ACQUIRE_GLOBAL_LOCKS();
2079 		ether_input(ifp, m);
2080 		RELEASE_GLOBAL_LOCKS();
2081 		return;
2082 	}
2083 
2084 	bridge_release_member(sc, bif, &psref);
2085 
2086 	bridge_forward(sc, m);
2087 
2088 	curlwp_bindx(bound);
2089 }
2090 
2091 /*
2092  * bridge_broadcast:
2093  *
2094  *	Send a frame to all interfaces that are members of
2095  *	the bridge, except for the one on which the packet
2096  *	arrived.
2097  */
2098 static void
2099 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2100     struct mbuf *m)
2101 {
2102 	struct bridge_iflist *bif;
2103 	struct mbuf *mc;
2104 	struct ifnet *dst_if;
2105 	bool bmcast;
2106 	int s;
2107 	DECLARE_LOCK_VARIABLE;
2108 
2109 	bmcast = m->m_flags & (M_BCAST|M_MCAST);
2110 
2111 	BRIDGE_PSZ_RENTER(s);
2112 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2113 		struct psref psref;
2114 
2115 		bridge_acquire_member(sc, bif, &psref);
2116 		BRIDGE_PSZ_REXIT(s);
2117 
2118 		dst_if = bif->bif_ifp;
2119 
2120 		if (bif->bif_flags & IFBIF_STP) {
2121 			switch (bif->bif_state) {
2122 			case BSTP_IFSTATE_BLOCKING:
2123 			case BSTP_IFSTATE_DISABLED:
2124 				goto next;
2125 			}
2126 		}
2127 
2128 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2129 			goto next;
2130 
2131 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2132 			goto next;
2133 
2134 		if (dst_if != src_if) {
2135 			mc = m_copypacket(m, M_DONTWAIT);
2136 			if (mc == NULL) {
2137 				if_statinc(&sc->sc_if, if_oerrors);
2138 				goto next;
2139 			}
2140 			/*
2141 			 * Before enqueueing this packet to the destination
2142 			 * interface, clear any in-bound checksum flags to
2143 			 * prevent them from being misused as out-bound flags.
2144 			 */
2145 			mc->m_pkthdr.csum_flags = 0;
2146 
2147 			ACQUIRE_GLOBAL_LOCKS();
2148 			bridge_enqueue(sc, dst_if, mc, 1);
2149 			RELEASE_GLOBAL_LOCKS();
2150 		}
2151 
2152 		if (bmcast) {
2153 			mc = m_copypacket(m, M_DONTWAIT);
2154 			if (mc == NULL) {
2155 				if_statinc(&sc->sc_if, if_oerrors);
2156 				goto next;
2157 			}
2158 			/*
2159 			 * Before enqueueing this packet to the destination
2160 			 * interface, clear any in-bound checksum flags to
2161 			 * prevent them from being misused as out-bound flags.
2162 			 */
2163 			mc->m_pkthdr.csum_flags = 0;
2164 
2165 			m_set_rcvif(mc, dst_if);
2166 			mc->m_flags &= ~M_PROMISC;
2167 
2168 			ACQUIRE_GLOBAL_LOCKS();
2169 			ether_input(dst_if, mc);
2170 			RELEASE_GLOBAL_LOCKS();
2171 		}
2172 next:
2173 		BRIDGE_PSZ_RENTER(s);
2174 		bridge_release_member(sc, bif, &psref);
2175 	}
2176 	BRIDGE_PSZ_REXIT(s);
2177 
2178 	m_freem(m);
2179 }
2180 
2181 static int
2182 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2183     struct bridge_rtnode **brtp)
2184 {
2185 	struct bridge_rtnode *brt;
2186 	int error;
2187 
2188 	if (sc->sc_brtcnt >= sc->sc_brtmax)
2189 		return ENOSPC;
2190 
2191 	/*
2192 	 * Allocate a new bridge forwarding node, and
2193 	 * initialize the expiration time and Ethernet
2194 	 * address.
2195 	 */
2196 	brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2197 	if (brt == NULL)
2198 		return ENOMEM;
2199 
2200 	memset(brt, 0, sizeof(*brt));
2201 	brt->brt_expire = time_uptime + sc->sc_brttimeout;
2202 	brt->brt_flags = IFBAF_DYNAMIC;
2203 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2204 	PSLIST_ENTRY_INIT(brt, brt_list);
2205 	PSLIST_ENTRY_INIT(brt, brt_hash);
2206 
2207 	BRIDGE_RT_LOCK(sc);
2208 	error = bridge_rtnode_insert(sc, brt);
2209 	BRIDGE_RT_UNLOCK(sc);
2210 
2211 	if (error != 0) {
2212 		pool_put(&bridge_rtnode_pool, brt);
2213 		return error;
2214 	}
2215 
2216 	*brtp = brt;
2217 	return 0;
2218 }
2219 
2220 /*
2221  * bridge_rtupdate:
2222  *
2223  *	Add a bridge routing entry.
2224  */
2225 static int
2226 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2227     struct ifnet *dst_if, int setflags, uint8_t flags)
2228 {
2229 	struct bridge_rtnode *brt;
2230 	int s;
2231 
2232 again:
2233 	/*
2234 	 * A route for this destination might already exist.  If so,
2235 	 * update it, otherwise create a new one.
2236 	 */
2237 	BRIDGE_RT_RENTER(s);
2238 	brt = bridge_rtnode_lookup(sc, dst);
2239 
2240 	if (brt != NULL) {
2241 		brt->brt_ifp = dst_if;
2242 		if (setflags) {
2243 			brt->brt_flags = flags;
2244 			if (flags & IFBAF_STATIC)
2245 				brt->brt_expire = 0;
2246 			else
2247 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2248 		} else {
2249 			if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2250 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2251 		}
2252 	}
2253 	BRIDGE_RT_REXIT(s);
2254 
2255 	if (brt == NULL) {
2256 		int r;
2257 
2258 		r = bridge_rtalloc(sc, dst, &brt);
2259 		if (r != 0)
2260 			return r;
2261 		goto again;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 /*
2268  * bridge_rtlookup:
2269  *
2270  *	Lookup the destination interface for an address.
2271  */
2272 static struct ifnet *
2273 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2274 {
2275 	struct bridge_rtnode *brt;
2276 	struct ifnet *ifs = NULL;
2277 	int s;
2278 
2279 	BRIDGE_RT_RENTER(s);
2280 	brt = bridge_rtnode_lookup(sc, addr);
2281 	if (brt != NULL)
2282 		ifs = brt->brt_ifp;
2283 	BRIDGE_RT_REXIT(s);
2284 
2285 	return ifs;
2286 }
2287 
2288 typedef bool (*bridge_iterate_cb_t)
2289     (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2290 
2291 /*
2292  * bridge_rtlist_iterate_remove:
2293  *
2294  *	It iterates on sc->sc_rtlist and removes rtnodes of it which func
2295  *	callback judges to remove. Removals of rtnodes are done in a manner
2296  *	of pserialize. To this end, all kmem_* operations are placed out of
2297  *	mutexes.
2298  */
2299 static void
2300 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2301 {
2302 	struct bridge_rtnode *brt;
2303 	struct bridge_rtnode **brt_list;
2304 	int i, count;
2305 
2306 retry:
2307 	count = sc->sc_brtcnt;
2308 	if (count == 0)
2309 		return;
2310 	brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2311 
2312 	BRIDGE_RT_LOCK(sc);
2313 	if (__predict_false(sc->sc_brtcnt > count)) {
2314 		/* The rtnodes increased, we need more memory */
2315 		BRIDGE_RT_UNLOCK(sc);
2316 		kmem_free(brt_list, sizeof(*brt_list) * count);
2317 		goto retry;
2318 	}
2319 
2320 	i = 0;
2321 	/*
2322 	 * We don't need to use a _SAFE variant here because we know
2323 	 * that a removed item keeps its next pointer as-is thanks to
2324 	 * pslist(9) and isn't freed in the loop.
2325 	 */
2326 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2327 		bool need_break = false;
2328 		if (func(sc, brt, &need_break, arg)) {
2329 			bridge_rtnode_remove(sc, brt);
2330 			brt_list[i++] = brt;
2331 		}
2332 		if (need_break)
2333 			break;
2334 	}
2335 
2336 	if (i > 0)
2337 		BRIDGE_RT_PSZ_PERFORM(sc);
2338 	BRIDGE_RT_UNLOCK(sc);
2339 
2340 	while (--i >= 0)
2341 		bridge_rtnode_destroy(brt_list[i]);
2342 
2343 	kmem_free(brt_list, sizeof(*brt_list) * count);
2344 }
2345 
2346 static bool
2347 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2348     bool *need_break, void *arg)
2349 {
2350 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2351 		/* Take into account of the subsequent removal */
2352 		if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2353 			*need_break = true;
2354 		return true;
2355 	} else
2356 		return false;
2357 }
2358 
2359 static void
2360 bridge_rttrim0(struct bridge_softc *sc)
2361 {
2362 	bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2363 }
2364 
2365 /*
2366  * bridge_rttrim:
2367  *
2368  *	Trim the routine table so that we have a number
2369  *	of routing entries less than or equal to the
2370  *	maximum number.
2371  */
2372 static void
2373 bridge_rttrim(struct bridge_softc *sc)
2374 {
2375 
2376 	/* Make sure we actually need to do this. */
2377 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2378 		return;
2379 
2380 	/* Force an aging cycle; this might trim enough addresses. */
2381 	bridge_rtage(sc);
2382 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2383 		return;
2384 
2385 	bridge_rttrim0(sc);
2386 
2387 	return;
2388 }
2389 
2390 /*
2391  * bridge_timer:
2392  *
2393  *	Aging timer for the bridge.
2394  */
2395 static void
2396 bridge_timer(void *arg)
2397 {
2398 	struct bridge_softc *sc = arg;
2399 
2400 	workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2401 }
2402 
2403 static void
2404 bridge_rtage_work(struct work *wk, void *arg)
2405 {
2406 	struct bridge_softc *sc = arg;
2407 
2408 	KASSERT(wk == &sc->sc_rtage_wk);
2409 
2410 	bridge_rtage(sc);
2411 
2412 	if (sc->sc_if.if_flags & IFF_RUNNING)
2413 		callout_reset(&sc->sc_brcallout,
2414 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2415 }
2416 
2417 static bool
2418 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2419     bool *need_break, void *arg)
2420 {
2421 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2422 	    time_uptime >= brt->brt_expire)
2423 		return true;
2424 	else
2425 		return false;
2426 }
2427 
2428 /*
2429  * bridge_rtage:
2430  *
2431  *	Perform an aging cycle.
2432  */
2433 static void
2434 bridge_rtage(struct bridge_softc *sc)
2435 {
2436 	bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2437 }
2438 
2439 
2440 static bool
2441 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2442     bool *need_break, void *arg)
2443 {
2444 	int full = *(int*)arg;
2445 
2446 	if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2447 		return true;
2448 	else
2449 		return false;
2450 }
2451 
2452 /*
2453  * bridge_rtflush:
2454  *
2455  *	Remove all dynamic addresses from the bridge.
2456  */
2457 static void
2458 bridge_rtflush(struct bridge_softc *sc, int full)
2459 {
2460 	bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2461 }
2462 
2463 /*
2464  * bridge_rtdaddr:
2465  *
2466  *	Remove an address from the table.
2467  */
2468 static int
2469 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2470 {
2471 	struct bridge_rtnode *brt;
2472 
2473 	BRIDGE_RT_LOCK(sc);
2474 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2475 		BRIDGE_RT_UNLOCK(sc);
2476 		return ENOENT;
2477 	}
2478 	bridge_rtnode_remove(sc, brt);
2479 	BRIDGE_RT_PSZ_PERFORM(sc);
2480 	BRIDGE_RT_UNLOCK(sc);
2481 
2482 	bridge_rtnode_destroy(brt);
2483 
2484 	return 0;
2485 }
2486 
2487 /*
2488  * bridge_rtdelete:
2489  *
2490  *	Delete routes to a speicifc member interface.
2491  */
2492 static void
2493 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2494 {
2495 	struct bridge_rtnode *brt;
2496 
2497 	/* XXX pserialize_perform for each entry is slow */
2498 again:
2499 	BRIDGE_RT_LOCK(sc);
2500 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2501 		if (brt->brt_ifp == ifp)
2502 			break;
2503 	}
2504 	if (brt == NULL) {
2505 		BRIDGE_RT_UNLOCK(sc);
2506 		return;
2507 	}
2508 	bridge_rtnode_remove(sc, brt);
2509 	BRIDGE_RT_PSZ_PERFORM(sc);
2510 	BRIDGE_RT_UNLOCK(sc);
2511 
2512 	bridge_rtnode_destroy(brt);
2513 
2514 	goto again;
2515 }
2516 
2517 /*
2518  * bridge_rtable_init:
2519  *
2520  *	Initialize the route table for this bridge.
2521  */
2522 static void
2523 bridge_rtable_init(struct bridge_softc *sc)
2524 {
2525 	int i;
2526 
2527 	sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2528 	    KM_SLEEP);
2529 
2530 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2531 		PSLIST_INIT(&sc->sc_rthash[i]);
2532 
2533 	sc->sc_rthash_key = cprng_fast32();
2534 
2535 	PSLIST_INIT(&sc->sc_rtlist);
2536 
2537 	sc->sc_rtlist_psz = pserialize_create();
2538 	sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2539 }
2540 
2541 /*
2542  * bridge_rtable_fini:
2543  *
2544  *	Deconstruct the route table for this bridge.
2545  */
2546 static void
2547 bridge_rtable_fini(struct bridge_softc *sc)
2548 {
2549 
2550 	kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2551 	mutex_obj_free(sc->sc_rtlist_lock);
2552 	pserialize_destroy(sc->sc_rtlist_psz);
2553 }
2554 
2555 /*
2556  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2557  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2558  */
2559 #define	mix(a, b, c)							\
2560 do {									\
2561 	a -= b; a -= c; a ^= (c >> 13);					\
2562 	b -= c; b -= a; b ^= (a << 8);					\
2563 	c -= a; c -= b; c ^= (b >> 13);					\
2564 	a -= b; a -= c; a ^= (c >> 12);					\
2565 	b -= c; b -= a; b ^= (a << 16);					\
2566 	c -= a; c -= b; c ^= (b >> 5);					\
2567 	a -= b; a -= c; a ^= (c >> 3);					\
2568 	b -= c; b -= a; b ^= (a << 10);					\
2569 	c -= a; c -= b; c ^= (b >> 15);					\
2570 } while (/*CONSTCOND*/0)
2571 
2572 static inline uint32_t
2573 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2574 {
2575 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2576 
2577 	b += addr[5] << 8;
2578 	b += addr[4];
2579 	a += (uint32_t)addr[3] << 24;
2580 	a += addr[2] << 16;
2581 	a += addr[1] << 8;
2582 	a += addr[0];
2583 
2584 	mix(a, b, c);
2585 
2586 	return (c & BRIDGE_RTHASH_MASK);
2587 }
2588 
2589 #undef mix
2590 
2591 /*
2592  * bridge_rtnode_lookup:
2593  *
2594  *	Look up a bridge route node for the specified destination.
2595  */
2596 static struct bridge_rtnode *
2597 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2598 {
2599 	struct bridge_rtnode *brt;
2600 	uint32_t hash;
2601 	int dir;
2602 
2603 	hash = bridge_rthash(sc, addr);
2604 	BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2605 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2606 		if (dir == 0)
2607 			return brt;
2608 		if (dir > 0)
2609 			return NULL;
2610 	}
2611 
2612 	return NULL;
2613 }
2614 
2615 /*
2616  * bridge_rtnode_insert:
2617  *
2618  *	Insert the specified bridge node into the route table.  We
2619  *	assume the entry is not already in the table.
2620  */
2621 static int
2622 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2623 {
2624 	struct bridge_rtnode *lbrt, *prev = NULL;
2625 	uint32_t hash;
2626 
2627 	KASSERT(BRIDGE_RT_LOCKED(sc));
2628 
2629 	hash = bridge_rthash(sc, brt->brt_addr);
2630 	BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2631 		int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2632 		if (dir == 0)
2633 			return EEXIST;
2634 		if (dir > 0)
2635 			break;
2636 		prev = lbrt;
2637 	}
2638 	if (prev == NULL)
2639 		BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2640 	else
2641 		BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2642 
2643 	BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2644 	sc->sc_brtcnt++;
2645 
2646 	return 0;
2647 }
2648 
2649 /*
2650  * bridge_rtnode_remove:
2651  *
2652  *	Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2653  */
2654 static void
2655 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2656 {
2657 
2658 	KASSERT(BRIDGE_RT_LOCKED(sc));
2659 
2660 	BRIDGE_RTHASH_WRITER_REMOVE(brt);
2661 	BRIDGE_RTLIST_WRITER_REMOVE(brt);
2662 	sc->sc_brtcnt--;
2663 }
2664 
2665 /*
2666  * bridge_rtnode_destroy:
2667  *
2668  *	Destroy a bridge rtnode.
2669  */
2670 static void
2671 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2672 {
2673 
2674 	PSLIST_ENTRY_DESTROY(brt, brt_list);
2675 	PSLIST_ENTRY_DESTROY(brt, brt_hash);
2676 	pool_put(&bridge_rtnode_pool, brt);
2677 }
2678 
2679 extern pfil_head_t *inet_pfil_hook;                 /* XXX */
2680 extern pfil_head_t *inet6_pfil_hook;                /* XXX */
2681 
2682 /*
2683  * Send bridge packets through IPF if they are one of the types IPF can deal
2684  * with, or if they are ARP or REVARP.  (IPF will pass ARP and REVARP without
2685  * question.)
2686  */
2687 static int
2688 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2689 {
2690 	int snap, error;
2691 	struct ether_header *eh1, eh2;
2692 	struct llc llc1;
2693 	uint16_t ether_type;
2694 
2695 	snap = 0;
2696 	error = -1;	/* Default error if not error == 0 */
2697 	eh1 = mtod(*mp, struct ether_header *);
2698 	ether_type = ntohs(eh1->ether_type);
2699 
2700 	/*
2701 	 * Check for SNAP/LLC.
2702 	 */
2703 	if (ether_type < ETHERMTU) {
2704 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2705 
2706 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2707 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2708 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2709 		    llc2->llc_control == LLC_UI) {
2710 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2711 			snap = 1;
2712 		}
2713 	}
2714 
2715 	/* drop VLAN traffic untagged by hardware offloading */
2716 	if (vlan_has_tag(*mp))
2717 		goto bad;
2718 
2719 	/*
2720 	 * If we're trying to filter bridge traffic, don't look at anything
2721 	 * other than IP and ARP traffic.  If the filter doesn't understand
2722 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2723 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2724 	 * but of course we don't have an AppleTalk filter to begin with.
2725 	 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2726 	 * ARP traffic.)
2727 	 */
2728 	switch (ether_type) {
2729 		case ETHERTYPE_ARP:
2730 		case ETHERTYPE_REVARP:
2731 			return 0; /* Automatically pass */
2732 		case ETHERTYPE_IP:
2733 # ifdef INET6
2734 		case ETHERTYPE_IPV6:
2735 # endif /* INET6 */
2736 			break;
2737 		default:
2738 			goto bad;
2739 	}
2740 
2741 	/* Strip off the Ethernet header and keep a copy. */
2742 	m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2743 	m_adj(*mp, ETHER_HDR_LEN);
2744 
2745 	/* Strip off snap header, if present */
2746 	if (snap) {
2747 		m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2748 		m_adj(*mp, sizeof(struct llc));
2749 	}
2750 
2751 	/*
2752 	 * Check basic packet sanity and run IPF through pfil.
2753 	 */
2754 	KASSERT(!cpu_intr_p());
2755 	switch (ether_type)
2756 	{
2757 	case ETHERTYPE_IP :
2758 		error = bridge_ip_checkbasic(mp);
2759 		if (error == 0)
2760 			error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2761 		break;
2762 # ifdef INET6
2763 	case ETHERTYPE_IPV6 :
2764 		error = bridge_ip6_checkbasic(mp);
2765 		if (error == 0)
2766 			error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2767 		break;
2768 # endif
2769 	default :
2770 		error = 0;
2771 		break;
2772 	}
2773 
2774 	if (*mp == NULL)
2775 		return error;
2776 	if (error != 0)
2777 		goto bad;
2778 
2779 	error = -1;
2780 
2781 	/*
2782 	 * Finally, put everything back the way it was and return
2783 	 */
2784 	if (snap) {
2785 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2786 		if (*mp == NULL)
2787 			return error;
2788 		bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2789 	}
2790 
2791 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2792 	if (*mp == NULL)
2793 		return error;
2794 	bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2795 
2796 	return 0;
2797 
2798     bad:
2799 	m_freem(*mp);
2800 	*mp = NULL;
2801 	return error;
2802 }
2803 
2804 /*
2805  * Perform basic checks on header size since
2806  * IPF assumes ip_input has already processed
2807  * it for it.  Cut-and-pasted from ip_input.c.
2808  * Given how simple the IPv6 version is,
2809  * does the IPv4 version really need to be
2810  * this complicated?
2811  *
2812  * XXX Should we update ipstat here, or not?
2813  * XXX Right now we update ipstat but not
2814  * XXX csum_counter.
2815  */
2816 static int
2817 bridge_ip_checkbasic(struct mbuf **mp)
2818 {
2819 	struct mbuf *m = *mp;
2820 	struct ip *ip;
2821 	int len, hlen;
2822 
2823 	if (*mp == NULL)
2824 		return -1;
2825 
2826 	if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2827 		/* XXXJRT new stat, please */
2828 		ip_statinc(IP_STAT_TOOSMALL);
2829 		goto bad;
2830 	}
2831 	ip = mtod(m, struct ip *);
2832 	if (ip == NULL) goto bad;
2833 
2834 	if (ip->ip_v != IPVERSION) {
2835 		ip_statinc(IP_STAT_BADVERS);
2836 		goto bad;
2837 	}
2838 	hlen = ip->ip_hl << 2;
2839 	if (hlen < sizeof(struct ip)) { /* minimum header length */
2840 		ip_statinc(IP_STAT_BADHLEN);
2841 		goto bad;
2842 	}
2843 	if (hlen > m->m_len) {
2844 		if ((m = m_pullup(m, hlen)) == 0) {
2845 			ip_statinc(IP_STAT_BADHLEN);
2846 			goto bad;
2847 		}
2848 		ip = mtod(m, struct ip *);
2849 		if (ip == NULL) goto bad;
2850 	}
2851 
2852 	switch (m->m_pkthdr.csum_flags &
2853 	        ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2854 	         M_CSUM_IPv4_BAD)) {
2855 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2856 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2857 		goto bad;
2858 
2859 	case M_CSUM_IPv4:
2860 		/* Checksum was okay. */
2861 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2862 		break;
2863 
2864 	default:
2865 		/* Must compute it ourselves. */
2866 		/* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2867 		if (in_cksum(m, hlen) != 0)
2868 			goto bad;
2869 		break;
2870 	}
2871 
2872 	/* Retrieve the packet length. */
2873 	len = ntohs(ip->ip_len);
2874 
2875 	/*
2876 	 * Check for additional length bogosity
2877 	 */
2878 	if (len < hlen) {
2879 		ip_statinc(IP_STAT_BADLEN);
2880 		goto bad;
2881 	}
2882 
2883 	/*
2884 	 * Check that the amount of data in the buffers
2885 	 * is as at least much as the IP header would have us expect.
2886 	 * Drop packet if shorter than we expect.
2887 	 */
2888 	if (m->m_pkthdr.len < len) {
2889 		ip_statinc(IP_STAT_TOOSHORT);
2890 		goto bad;
2891 	}
2892 
2893 	/* Checks out, proceed */
2894 	*mp = m;
2895 	return 0;
2896 
2897     bad:
2898 	*mp = m;
2899 	return -1;
2900 }
2901 
2902 # ifdef INET6
2903 /*
2904  * Same as above, but for IPv6.
2905  * Cut-and-pasted from ip6_input.c.
2906  * XXX Should we update ip6stat, or not?
2907  */
2908 static int
2909 bridge_ip6_checkbasic(struct mbuf **mp)
2910 {
2911 	struct mbuf *m = *mp;
2912 	struct ip6_hdr *ip6;
2913 
2914 	/*
2915 	 * If the IPv6 header is not aligned, slurp it up into a new
2916 	 * mbuf with space for link headers, in the event we forward
2917 	 * it.  Otherwise, if it is aligned, make sure the entire base
2918 	 * IPv6 header is in the first mbuf of the chain.
2919 	 */
2920 	if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2921 		struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2922 		/* XXXJRT new stat, please */
2923 		ip6_statinc(IP6_STAT_TOOSMALL);
2924 		in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2925 		goto bad;
2926 	}
2927 
2928 	ip6 = mtod(m, struct ip6_hdr *);
2929 
2930 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2931 		ip6_statinc(IP6_STAT_BADVERS);
2932 		in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2933 		goto bad;
2934 	}
2935 
2936 	/* Checks out, proceed */
2937 	*mp = m;
2938 	return 0;
2939 
2940     bad:
2941 	*mp = m;
2942 	return -1;
2943 }
2944 # endif /* INET6 */
2945