xref: /netbsd-src/sys/net/if_bridge.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: if_bridge.c,v 1.170 2020/03/27 16:47:00 jdolecek Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.170 2020/03/27 16:47:00 jdolecek Exp $");
84 
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #include "opt_net_mpsafe.h"
89 #endif /* _KERNEL_OPT */
90 
91 #include <sys/param.h>
92 #include <sys/kernel.h>
93 #include <sys/mbuf.h>
94 #include <sys/queue.h>
95 #include <sys/socket.h>
96 #include <sys/socketvar.h> /* for softnet_lock */
97 #include <sys/sockio.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/pool.h>
101 #include <sys/kauth.h>
102 #include <sys/cpu.h>
103 #include <sys/cprng.h>
104 #include <sys/mutex.h>
105 #include <sys/kmem.h>
106 
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112 
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 #include <net/ether_sw_offload.h>
116 
117 #if defined(BRIDGE_IPF)
118 /* Used for bridge_ip[6]_checkbasic */
119 #include <netinet/in.h>
120 #include <netinet/in_systm.h>
121 #include <netinet/ip.h>
122 #include <netinet/ip_var.h>
123 #include <netinet/ip_private.h>		/* XXX */
124 
125 #include <netinet/ip6.h>
126 #include <netinet6/in6_var.h>
127 #include <netinet6/ip6_var.h>
128 #include <netinet6/ip6_private.h>	/* XXX */
129 #endif /* BRIDGE_IPF */
130 
131 /*
132  * Size of the route hash table.  Must be a power of two.
133  */
134 #ifndef BRIDGE_RTHASH_SIZE
135 #define	BRIDGE_RTHASH_SIZE		1024
136 #endif
137 
138 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
139 
140 #include "carp.h"
141 #if NCARP > 0
142 #include <netinet/in.h>
143 #include <netinet/in_var.h>
144 #include <netinet/ip_carp.h>
145 #endif
146 
147 #include "ioconf.h"
148 
149 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
151 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
152 
153 /*
154  * Maximum number of addresses to cache.
155  */
156 #ifndef BRIDGE_RTABLE_MAX
157 #define	BRIDGE_RTABLE_MAX		100
158 #endif
159 
160 /*
161  * Spanning tree defaults.
162  */
163 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
164 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
165 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
166 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
167 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
168 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
169 #define	BSTP_DEFAULT_PATH_COST		55
170 
171 /*
172  * Timeout (in seconds) for entries learned dynamically.
173  */
174 #ifndef BRIDGE_RTABLE_TIMEOUT
175 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
176 #endif
177 
178 /*
179  * Number of seconds between walks of the route list.
180  */
181 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
182 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
183 #endif
184 
185 #define BRIDGE_RT_LOCK(_sc)	mutex_enter((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_UNLOCK(_sc)	mutex_exit((_sc)->sc_rtlist_lock)
187 #define BRIDGE_RT_LOCKED(_sc)	mutex_owned((_sc)->sc_rtlist_lock)
188 
189 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
190 				pserialize_perform((_sc)->sc_rtlist_psz)
191 
192 #define BRIDGE_RT_RENTER(__s)	do { __s = pserialize_read_enter(); } while (0)
193 #define BRIDGE_RT_REXIT(__s)	do { pserialize_read_exit(__s); } while (0)
194 
195 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc)			\
196 	PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
197 	    struct bridge_rtnode, brt_list)
198 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc)			\
199 	PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
200 	    struct bridge_rtnode, brt_list)
201 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt)			\
202 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
203 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt)				\
204 	PSLIST_WRITER_REMOVE((_brt), brt_list)
205 
206 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash)			\
207 	PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
208 	    struct bridge_rtnode, brt_hash)
209 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash)			\
210 	PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
211 	    struct bridge_rtnode, brt_hash)
212 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt)		\
213 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
214 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new)			\
215 	PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
216 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt)				\
217 	PSLIST_WRITER_REMOVE((_brt), brt_hash)
218 
219 #ifdef NET_MPSAFE
220 #define DECLARE_LOCK_VARIABLE
221 #define ACQUIRE_GLOBAL_LOCKS()	do { } while (0)
222 #define RELEASE_GLOBAL_LOCKS()	do { } while (0)
223 #else
224 #define DECLARE_LOCK_VARIABLE	int __s
225 #define ACQUIRE_GLOBAL_LOCKS()	do {					\
226 					KERNEL_LOCK(1, NULL);		\
227 					mutex_enter(softnet_lock);	\
228 					__s = splsoftnet();		\
229 				} while (0)
230 #define RELEASE_GLOBAL_LOCKS()	do {					\
231 					splx(__s);			\
232 					mutex_exit(softnet_lock);	\
233 					KERNEL_UNLOCK_ONE(NULL);	\
234 				} while (0)
235 #endif
236 
237 struct psref_class *bridge_psref_class __read_mostly;
238 
239 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
240 
241 static struct pool bridge_rtnode_pool;
242 
243 static int	bridge_clone_create(struct if_clone *, int);
244 static int	bridge_clone_destroy(struct ifnet *);
245 
246 static int	bridge_ioctl(struct ifnet *, u_long, void *);
247 static int	bridge_init(struct ifnet *);
248 static void	bridge_stop(struct ifnet *, int);
249 static void	bridge_start(struct ifnet *);
250 
251 static void	bridge_input(struct ifnet *, struct mbuf *);
252 static void	bridge_forward(struct bridge_softc *, struct mbuf *);
253 
254 static void	bridge_timer(void *);
255 
256 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
257 				 struct mbuf *);
258 
259 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
260 				struct ifnet *, int, uint8_t);
261 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
262 static void	bridge_rttrim(struct bridge_softc *);
263 static void	bridge_rtage(struct bridge_softc *);
264 static void	bridge_rtage_work(struct work *, void *);
265 static void	bridge_rtflush(struct bridge_softc *, int);
266 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
267 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
268 
269 static void	bridge_rtable_init(struct bridge_softc *);
270 static void	bridge_rtable_fini(struct bridge_softc *);
271 
272 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
273 						  const uint8_t *);
274 static int	bridge_rtnode_insert(struct bridge_softc *,
275 				     struct bridge_rtnode *);
276 static void	bridge_rtnode_remove(struct bridge_softc *,
277 				     struct bridge_rtnode *);
278 static void	bridge_rtnode_destroy(struct bridge_rtnode *);
279 
280 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
281 						  const char *name,
282 						  struct psref *);
283 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
284 						     struct ifnet *ifp,
285 						     struct psref *);
286 static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
287                                       struct psref *);
288 static void	bridge_delete_member(struct bridge_softc *,
289 				     struct bridge_iflist *);
290 static void	bridge_acquire_member(struct bridge_softc *sc,
291                                       struct bridge_iflist *,
292                                       struct psref *);
293 
294 static int	bridge_ioctl_add(struct bridge_softc *, void *);
295 static int	bridge_ioctl_del(struct bridge_softc *, void *);
296 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
297 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
298 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
299 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
300 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
301 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
302 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
303 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
304 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
305 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
306 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
307 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
308 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
309 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
310 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
311 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
313 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
314 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
315 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
316 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
317 #if defined(BRIDGE_IPF)
318 static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
319 static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
320 static int	bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
321 static int	bridge_ip_checkbasic(struct mbuf **mp);
322 # ifdef INET6
323 static int	bridge_ip6_checkbasic(struct mbuf **mp);
324 # endif /* INET6 */
325 #endif /* BRIDGE_IPF */
326 
327 struct bridge_control {
328 	int	(*bc_func)(struct bridge_softc *, void *);
329 	int	bc_argsize;
330 	int	bc_flags;
331 };
332 
333 #define	BC_F_COPYIN		0x01	/* copy arguments in */
334 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
335 #define	BC_F_SUSER		0x04	/* do super-user check */
336 #define BC_F_XLATEIN		0x08	/* xlate arguments in */
337 #define BC_F_XLATEOUT		0x10	/* xlate arguments out */
338 
339 static const struct bridge_control bridge_control_table[] = {
340 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
341 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
342 
343 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
344 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
345 
346 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
347 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 
349 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
350 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
351 
352 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
353 
354 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
355 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
356 
357 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
358 
359 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
360 
361 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
362 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
363 
364 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
365 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
366 
367 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
368 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
369 
370 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
371 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
372 
373 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
374 
375 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
376 #if defined(BRIDGE_IPF)
377 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
378 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
379 #endif /* BRIDGE_IPF */
380 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
381 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
382 };
383 
384 static const int bridge_control_table_size = __arraycount(bridge_control_table);
385 
386 static struct if_clone bridge_cloner =
387     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
388 
389 /*
390  * bridgeattach:
391  *
392  *	Pseudo-device attach routine.
393  */
394 void
395 bridgeattach(int n)
396 {
397 
398 	pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
399 	    0, 0, 0, "brtpl", NULL, IPL_NET);
400 
401 	bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
402 
403 	if_clone_attach(&bridge_cloner);
404 }
405 
406 /*
407  * bridge_clone_create:
408  *
409  *	Create a new bridge instance.
410  */
411 static int
412 bridge_clone_create(struct if_clone *ifc, int unit)
413 {
414 	struct bridge_softc *sc;
415 	struct ifnet *ifp;
416 	int error;
417 
418 	sc = kmem_zalloc(sizeof(*sc),  KM_SLEEP);
419 	ifp = &sc->sc_if;
420 
421 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
422 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
423 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
424 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
425 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
426 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
427 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
428 	sc->sc_filter_flags = 0;
429 
430 	/* Initialize our routing table. */
431 	bridge_rtable_init(sc);
432 
433 	error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
434 	    bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
435 	if (error)
436 		panic("%s: workqueue_create %d\n", __func__, error);
437 
438 	callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
439 	callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
440 
441 	mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
442 	PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
443 	sc->sc_iflist_psref.bip_psz = pserialize_create();
444 
445 	if_initname(ifp, ifc->ifc_name, unit);
446 	ifp->if_softc = sc;
447 	ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
448 #ifdef NET_MPSAFE
449 	ifp->if_extflags |= IFEF_MPSAFE;
450 #endif
451 	ifp->if_mtu = ETHERMTU;
452 	ifp->if_ioctl = bridge_ioctl;
453 	ifp->if_output = bridge_output;
454 	ifp->if_start = bridge_start;
455 	ifp->if_stop = bridge_stop;
456 	ifp->if_init = bridge_init;
457 	ifp->if_type = IFT_BRIDGE;
458 	ifp->if_addrlen = 0;
459 	ifp->if_dlt = DLT_EN10MB;
460 	ifp->if_hdrlen = ETHER_HDR_LEN;
461 
462 	error = if_initialize(ifp);
463 	if (error != 0) {
464 		pserialize_destroy(sc->sc_iflist_psref.bip_psz);
465 		mutex_destroy(&sc->sc_iflist_psref.bip_lock);
466 		callout_destroy(&sc->sc_brcallout);
467 		callout_destroy(&sc->sc_bstpcallout);
468 		workqueue_destroy(sc->sc_rtage_wq);
469 		bridge_rtable_fini(sc);
470 		kmem_free(sc, sizeof(*sc));
471 
472 		return error;
473 	}
474 	if_alloc_sadl(ifp);
475 	if_register(ifp);
476 
477 	return 0;
478 }
479 
480 /*
481  * bridge_clone_destroy:
482  *
483  *	Destroy a bridge instance.
484  */
485 static int
486 bridge_clone_destroy(struct ifnet *ifp)
487 {
488 	struct bridge_softc *sc = ifp->if_softc;
489 	struct bridge_iflist *bif;
490 
491 	if ((ifp->if_flags & IFF_RUNNING) != 0)
492 		bridge_stop(ifp, 1);
493 
494 	BRIDGE_LOCK(sc);
495 	for (;;) {
496 		bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
497 		    bif_next);
498 		if (bif == NULL)
499 			break;
500 		bridge_delete_member(sc, bif);
501 	}
502 	PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
503 	BRIDGE_UNLOCK(sc);
504 
505 	if_detach(ifp);
506 
507 	/* Tear down the routing table. */
508 	bridge_rtable_fini(sc);
509 
510 	pserialize_destroy(sc->sc_iflist_psref.bip_psz);
511 	mutex_destroy(&sc->sc_iflist_psref.bip_lock);
512 	callout_destroy(&sc->sc_brcallout);
513 	callout_destroy(&sc->sc_bstpcallout);
514 	workqueue_destroy(sc->sc_rtage_wq);
515 	kmem_free(sc, sizeof(*sc));
516 
517 	return 0;
518 }
519 
520 /*
521  * bridge_ioctl:
522  *
523  *	Handle a control request from the operator.
524  */
525 static int
526 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
527 {
528 	struct bridge_softc *sc = ifp->if_softc;
529 	struct lwp *l = curlwp;	/* XXX */
530 	union {
531 		struct ifbreq ifbreq;
532 		struct ifbifconf ifbifconf;
533 		struct ifbareq ifbareq;
534 		struct ifbaconf ifbaconf;
535 		struct ifbrparam ifbrparam;
536 	} args;
537 	struct ifdrv *ifd = (struct ifdrv *) data;
538 	const struct bridge_control *bc = NULL; /* XXXGCC */
539 	int s, error = 0;
540 
541 	/* Authorize command before calling splsoftnet(). */
542 	switch (cmd) {
543 	case SIOCGDRVSPEC:
544 	case SIOCSDRVSPEC:
545 		if (ifd->ifd_cmd >= bridge_control_table_size
546 		    || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
547 			error = EINVAL;
548 			return error;
549 		}
550 
551 		/* We only care about BC_F_SUSER at this point. */
552 		if ((bc->bc_flags & BC_F_SUSER) == 0)
553 			break;
554 
555 		error = kauth_authorize_network(l->l_cred,
556 		    KAUTH_NETWORK_INTERFACE_BRIDGE,
557 		    cmd == SIOCGDRVSPEC ?
558 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
559 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
560 		     ifd, NULL, NULL);
561 		if (error)
562 			return error;
563 
564 		break;
565 	}
566 
567 	s = splsoftnet();
568 
569 	switch (cmd) {
570 	case SIOCGDRVSPEC:
571 	case SIOCSDRVSPEC:
572 		KASSERT(bc != NULL);
573 		if (cmd == SIOCGDRVSPEC &&
574 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
575 			error = EINVAL;
576 			break;
577 		}
578 		else if (cmd == SIOCSDRVSPEC &&
579 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
580 			error = EINVAL;
581 			break;
582 		}
583 
584 		/* BC_F_SUSER is checked above, before splsoftnet(). */
585 
586 		if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
587 		    && (ifd->ifd_len != bc->bc_argsize
588 			|| ifd->ifd_len > sizeof(args))) {
589 			error = EINVAL;
590 			break;
591 		}
592 
593 		memset(&args, 0, sizeof(args));
594 		if (bc->bc_flags & BC_F_COPYIN) {
595 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
596 			if (error)
597 				break;
598 		} else if (bc->bc_flags & BC_F_XLATEIN) {
599 			args.ifbifconf.ifbic_len = ifd->ifd_len;
600 			args.ifbifconf.ifbic_buf = ifd->ifd_data;
601 		}
602 
603 		error = (*bc->bc_func)(sc, &args);
604 		if (error)
605 			break;
606 
607 		if (bc->bc_flags & BC_F_COPYOUT) {
608 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
609 		} else if (bc->bc_flags & BC_F_XLATEOUT) {
610 			ifd->ifd_len = args.ifbifconf.ifbic_len;
611 			ifd->ifd_data = args.ifbifconf.ifbic_buf;
612 		}
613 		break;
614 
615 	case SIOCSIFFLAGS:
616 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
617 			break;
618 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
619 		case IFF_RUNNING:
620 			/*
621 			 * If interface is marked down and it is running,
622 			 * then stop and disable it.
623 			 */
624 			(*ifp->if_stop)(ifp, 1);
625 			break;
626 		case IFF_UP:
627 			/*
628 			 * If interface is marked up and it is stopped, then
629 			 * start it.
630 			 */
631 			error = (*ifp->if_init)(ifp);
632 			break;
633 		default:
634 			break;
635 		}
636 		break;
637 
638 	case SIOCSIFMTU:
639 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
640 			error = 0;
641 		break;
642 
643 	default:
644 		error = ifioctl_common(ifp, cmd, data);
645 		break;
646 	}
647 
648 	splx(s);
649 
650 	return error;
651 }
652 
653 /*
654  * bridge_lookup_member:
655  *
656  *	Lookup a bridge member interface.
657  */
658 static struct bridge_iflist *
659 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
660 {
661 	struct bridge_iflist *bif;
662 	struct ifnet *ifp;
663 	int s;
664 
665 	BRIDGE_PSZ_RENTER(s);
666 
667 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
668 		ifp = bif->bif_ifp;
669 		if (strcmp(ifp->if_xname, name) == 0)
670 			break;
671 	}
672 	if (bif != NULL)
673 		bridge_acquire_member(sc, bif, psref);
674 
675 	BRIDGE_PSZ_REXIT(s);
676 
677 	return bif;
678 }
679 
680 /*
681  * bridge_lookup_member_if:
682  *
683  *	Lookup a bridge member interface by ifnet*.
684  */
685 static struct bridge_iflist *
686 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
687     struct psref *psref)
688 {
689 	struct bridge_iflist *bif;
690 	int s;
691 
692 	BRIDGE_PSZ_RENTER(s);
693 
694 	bif = member_ifp->if_bridgeif;
695 	if (bif != NULL) {
696 		psref_acquire(psref, &bif->bif_psref,
697 		    bridge_psref_class);
698 	}
699 
700 	BRIDGE_PSZ_REXIT(s);
701 
702 	return bif;
703 }
704 
705 static void
706 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
707     struct psref *psref)
708 {
709 
710 	psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
711 }
712 
713 /*
714  * bridge_release_member:
715  *
716  *	Release the specified member interface.
717  */
718 static void
719 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
720     struct psref *psref)
721 {
722 
723 	psref_release(psref, &bif->bif_psref, bridge_psref_class);
724 }
725 
726 /*
727  * bridge_delete_member:
728  *
729  *	Delete the specified member interface.
730  */
731 static void
732 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
733 {
734 	struct ifnet *ifs = bif->bif_ifp;
735 
736 	KASSERT(BRIDGE_LOCKED(sc));
737 
738 	ifs->_if_input = ether_input;
739 	ifs->if_bridge = NULL;
740 	ifs->if_bridgeif = NULL;
741 
742 	PSLIST_WRITER_REMOVE(bif, bif_next);
743 	BRIDGE_PSZ_PERFORM(sc);
744 	BRIDGE_UNLOCK(sc);
745 
746 	switch (ifs->if_type) {
747 	case IFT_ETHER:
748 	case IFT_L2TP:
749 		/*
750 		 * Take the interface out of promiscuous mode.
751 		 * Don't call it with holding a spin lock.
752 		 */
753 		(void) ifpromisc(ifs, 0);
754 		IFNET_LOCK(ifs);
755 		(void) ether_disable_vlan_mtu(ifs);
756 		IFNET_UNLOCK(ifs);
757 		break;
758 	default:
759 #ifdef DIAGNOSTIC
760 		panic("%s: impossible", __func__);
761 #endif
762 		break;
763 	}
764 
765 	psref_target_destroy(&bif->bif_psref, bridge_psref_class);
766 
767 	PSLIST_ENTRY_DESTROY(bif, bif_next);
768 	kmem_free(bif, sizeof(*bif));
769 
770 	BRIDGE_LOCK(sc);
771 }
772 
773 /*
774  * bridge_calc_csum_flags:
775  *
776  *	Calculate logical and b/w csum flags each member interface supports.
777  */
778 void
779 bridge_calc_csum_flags(struct bridge_softc *sc)
780 {
781 	struct bridge_iflist *bif;
782 	struct ifnet *ifs;
783 	int flags = ~0;
784 
785 	BRIDGE_LOCK(sc);
786 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
787 		ifs = bif->bif_ifp;
788 		flags &= ifs->if_csum_flags_tx;
789 	}
790 	sc->sc_csum_flags_tx = flags;
791 	BRIDGE_UNLOCK(sc);
792 }
793 
794 static int
795 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
796 {
797 	struct ifbreq *req = arg;
798 	struct bridge_iflist *bif = NULL;
799 	struct ifnet *ifs;
800 	int error = 0;
801 	struct psref psref;
802 
803 	ifs = if_get(req->ifbr_ifsname, &psref);
804 	if (ifs == NULL)
805 		return ENOENT;
806 
807 	if (ifs->if_bridge == sc) {
808 		error = EEXIST;
809 		goto out;
810 	}
811 
812 	if (ifs->if_bridge != NULL) {
813 		error = EBUSY;
814 		goto out;
815 	}
816 
817 	if (ifs->_if_input != ether_input) {
818 		error = EINVAL;
819 		goto out;
820 	}
821 
822 	/* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
823 	if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
824 		error = EINVAL;
825 		goto out;
826 	}
827 
828 	bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
829 
830 	switch (ifs->if_type) {
831 	case IFT_ETHER:
832 		if (sc->sc_if.if_mtu != ifs->if_mtu) {
833 			error = EINVAL;
834 			goto out;
835 		}
836 		/* FALLTHROUGH */
837 	case IFT_L2TP:
838 		IFNET_LOCK(ifs);
839 		error = ether_enable_vlan_mtu(ifs);
840 		IFNET_UNLOCK(ifs);
841 		if (error > 0)
842 			goto out;
843 		/*
844 		 * Place the interface into promiscuous mode.
845 		 */
846 		error = ifpromisc(ifs, 1);
847 		if (error)
848 			goto out;
849 		break;
850 	default:
851 		error = EINVAL;
852 		goto out;
853 	}
854 
855 	bif->bif_ifp = ifs;
856 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
857 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
858 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
859 	PSLIST_ENTRY_INIT(bif, bif_next);
860 	psref_target_init(&bif->bif_psref, bridge_psref_class);
861 
862 	BRIDGE_LOCK(sc);
863 
864 	ifs->if_bridge = sc;
865 	ifs->if_bridgeif = bif;
866 	PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
867 	ifs->_if_input = bridge_input;
868 
869 	BRIDGE_UNLOCK(sc);
870 
871 	bridge_calc_csum_flags(sc);
872 
873 	if (sc->sc_if.if_flags & IFF_RUNNING)
874 		bstp_initialization(sc);
875 	else
876 		bstp_stop(sc);
877 
878 out:
879 	if_put(ifs, &psref);
880 	if (error) {
881 		if (bif != NULL)
882 			kmem_free(bif, sizeof(*bif));
883 	}
884 	return error;
885 }
886 
887 static int
888 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
889 {
890 	struct ifbreq *req = arg;
891 	const char *name = req->ifbr_ifsname;
892 	struct bridge_iflist *bif;
893 	struct ifnet *ifs;
894 
895 	BRIDGE_LOCK(sc);
896 
897 	/*
898 	 * Don't use bridge_lookup_member. We want to get a member
899 	 * with bif_refs == 0.
900 	 */
901 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
902 		ifs = bif->bif_ifp;
903 		if (strcmp(ifs->if_xname, name) == 0)
904 			break;
905 	}
906 
907 	if (bif == NULL) {
908 		BRIDGE_UNLOCK(sc);
909 		return ENOENT;
910 	}
911 
912 	bridge_delete_member(sc, bif);
913 
914 	BRIDGE_UNLOCK(sc);
915 
916 	bridge_rtdelete(sc, ifs);
917 	bridge_calc_csum_flags(sc);
918 
919 	if (sc->sc_if.if_flags & IFF_RUNNING)
920 		bstp_initialization(sc);
921 
922 	return 0;
923 }
924 
925 static int
926 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
927 {
928 	struct ifbreq *req = arg;
929 	struct bridge_iflist *bif;
930 	struct psref psref;
931 
932 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
933 	if (bif == NULL)
934 		return ENOENT;
935 
936 	req->ifbr_ifsflags = bif->bif_flags;
937 	req->ifbr_state = bif->bif_state;
938 	req->ifbr_priority = bif->bif_priority;
939 	req->ifbr_path_cost = bif->bif_path_cost;
940 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
941 
942 	bridge_release_member(sc, bif, &psref);
943 
944 	return 0;
945 }
946 
947 static int
948 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
949 {
950 	struct ifbreq *req = arg;
951 	struct bridge_iflist *bif;
952 	struct psref psref;
953 
954 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
955 	if (bif == NULL)
956 		return ENOENT;
957 
958 	if (req->ifbr_ifsflags & IFBIF_STP) {
959 		switch (bif->bif_ifp->if_type) {
960 		case IFT_ETHER:
961 		case IFT_L2TP:
962 			/* These can do spanning tree. */
963 			break;
964 
965 		default:
966 			/* Nothing else can. */
967 			bridge_release_member(sc, bif, &psref);
968 			return EINVAL;
969 		}
970 	}
971 
972 	bif->bif_flags = req->ifbr_ifsflags;
973 
974 	bridge_release_member(sc, bif, &psref);
975 
976 	if (sc->sc_if.if_flags & IFF_RUNNING)
977 		bstp_initialization(sc);
978 
979 	return 0;
980 }
981 
982 static int
983 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
984 {
985 	struct ifbrparam *param = arg;
986 
987 	sc->sc_brtmax = param->ifbrp_csize;
988 	bridge_rttrim(sc);
989 
990 	return 0;
991 }
992 
993 static int
994 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
995 {
996 	struct ifbrparam *param = arg;
997 
998 	param->ifbrp_csize = sc->sc_brtmax;
999 
1000 	return 0;
1001 }
1002 
1003 static int
1004 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1005 {
1006 	struct ifbifconf *bifc = arg;
1007 	struct bridge_iflist *bif;
1008 	struct ifbreq *breqs;
1009 	int i, count, error = 0;
1010 
1011 retry:
1012 	BRIDGE_LOCK(sc);
1013 	count = 0;
1014 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1015 		count++;
1016 	BRIDGE_UNLOCK(sc);
1017 
1018 	if (count == 0) {
1019 		bifc->ifbic_len = 0;
1020 		return 0;
1021 	}
1022 
1023 	if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1024 		/* Tell that a larger buffer is needed */
1025 		bifc->ifbic_len = sizeof(*breqs) * count;
1026 		return 0;
1027 	}
1028 
1029 	breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1030 
1031 	BRIDGE_LOCK(sc);
1032 
1033 	i = 0;
1034 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1035 		i++;
1036 	if (i > count) {
1037 		/*
1038 		 * The number of members has been increased.
1039 		 * We need more memory!
1040 		 */
1041 		BRIDGE_UNLOCK(sc);
1042 		kmem_free(breqs, sizeof(*breqs) * count);
1043 		goto retry;
1044 	}
1045 
1046 	i = 0;
1047 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1048 		struct ifbreq *breq = &breqs[i++];
1049 		memset(breq, 0, sizeof(*breq));
1050 
1051 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1052 		    sizeof(breq->ifbr_ifsname));
1053 		breq->ifbr_ifsflags = bif->bif_flags;
1054 		breq->ifbr_state = bif->bif_state;
1055 		breq->ifbr_priority = bif->bif_priority;
1056 		breq->ifbr_path_cost = bif->bif_path_cost;
1057 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1058 	}
1059 
1060 	/* Don't call copyout with holding the mutex */
1061 	BRIDGE_UNLOCK(sc);
1062 
1063 	for (i = 0; i < count; i++) {
1064 		error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1065 		if (error)
1066 			break;
1067 	}
1068 	bifc->ifbic_len = sizeof(*breqs) * i;
1069 
1070 	kmem_free(breqs, sizeof(*breqs) * count);
1071 
1072 	return error;
1073 }
1074 
1075 static int
1076 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1077 {
1078 	struct ifbaconf *bac = arg;
1079 	struct bridge_rtnode *brt;
1080 	struct ifbareq bareq;
1081 	int count = 0, error = 0, len;
1082 
1083 	if (bac->ifbac_len == 0)
1084 		return 0;
1085 
1086 	BRIDGE_RT_LOCK(sc);
1087 
1088 	/* The passed buffer is not enough, tell a required size. */
1089 	if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1090 		count = sc->sc_brtcnt;
1091 		goto out;
1092 	}
1093 
1094 	len = bac->ifbac_len;
1095 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1096 		if (len < sizeof(bareq))
1097 			goto out;
1098 		memset(&bareq, 0, sizeof(bareq));
1099 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1100 		    sizeof(bareq.ifba_ifsname));
1101 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1102 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1103 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1104 		} else
1105 			bareq.ifba_expire = 0;
1106 		bareq.ifba_flags = brt->brt_flags;
1107 
1108 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1109 		if (error)
1110 			goto out;
1111 		count++;
1112 		len -= sizeof(bareq);
1113 	}
1114 out:
1115 	BRIDGE_RT_UNLOCK(sc);
1116 
1117 	bac->ifbac_len = sizeof(bareq) * count;
1118 	return error;
1119 }
1120 
1121 static int
1122 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1123 {
1124 	struct ifbareq *req = arg;
1125 	struct bridge_iflist *bif;
1126 	int error;
1127 	struct psref psref;
1128 
1129 	bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1130 	if (bif == NULL)
1131 		return ENOENT;
1132 
1133 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1134 	    req->ifba_flags);
1135 
1136 	bridge_release_member(sc, bif, &psref);
1137 
1138 	return error;
1139 }
1140 
1141 static int
1142 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1143 {
1144 	struct ifbrparam *param = arg;
1145 
1146 	sc->sc_brttimeout = param->ifbrp_ctime;
1147 
1148 	return 0;
1149 }
1150 
1151 static int
1152 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1153 {
1154 	struct ifbrparam *param = arg;
1155 
1156 	param->ifbrp_ctime = sc->sc_brttimeout;
1157 
1158 	return 0;
1159 }
1160 
1161 static int
1162 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1163 {
1164 	struct ifbareq *req = arg;
1165 
1166 	return (bridge_rtdaddr(sc, req->ifba_dst));
1167 }
1168 
1169 static int
1170 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1171 {
1172 	struct ifbreq *req = arg;
1173 
1174 	bridge_rtflush(sc, req->ifbr_ifsflags);
1175 
1176 	return 0;
1177 }
1178 
1179 static int
1180 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1181 {
1182 	struct ifbrparam *param = arg;
1183 
1184 	param->ifbrp_prio = sc->sc_bridge_priority;
1185 
1186 	return 0;
1187 }
1188 
1189 static int
1190 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1191 {
1192 	struct ifbrparam *param = arg;
1193 
1194 	sc->sc_bridge_priority = param->ifbrp_prio;
1195 
1196 	if (sc->sc_if.if_flags & IFF_RUNNING)
1197 		bstp_initialization(sc);
1198 
1199 	return 0;
1200 }
1201 
1202 static int
1203 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1204 {
1205 	struct ifbrparam *param = arg;
1206 
1207 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1208 
1209 	return 0;
1210 }
1211 
1212 static int
1213 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1214 {
1215 	struct ifbrparam *param = arg;
1216 
1217 	if (param->ifbrp_hellotime == 0)
1218 		return EINVAL;
1219 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1220 
1221 	if (sc->sc_if.if_flags & IFF_RUNNING)
1222 		bstp_initialization(sc);
1223 
1224 	return 0;
1225 }
1226 
1227 static int
1228 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1229 {
1230 	struct ifbrparam *param = arg;
1231 
1232 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1233 
1234 	return 0;
1235 }
1236 
1237 static int
1238 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1239 {
1240 	struct ifbrparam *param = arg;
1241 
1242 	if (param->ifbrp_fwddelay == 0)
1243 		return EINVAL;
1244 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1245 
1246 	if (sc->sc_if.if_flags & IFF_RUNNING)
1247 		bstp_initialization(sc);
1248 
1249 	return 0;
1250 }
1251 
1252 static int
1253 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1254 {
1255 	struct ifbrparam *param = arg;
1256 
1257 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1258 
1259 	return 0;
1260 }
1261 
1262 static int
1263 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1264 {
1265 	struct ifbrparam *param = arg;
1266 
1267 	if (param->ifbrp_maxage == 0)
1268 		return EINVAL;
1269 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1270 
1271 	if (sc->sc_if.if_flags & IFF_RUNNING)
1272 		bstp_initialization(sc);
1273 
1274 	return 0;
1275 }
1276 
1277 static int
1278 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1279 {
1280 	struct ifbreq *req = arg;
1281 	struct bridge_iflist *bif;
1282 	struct psref psref;
1283 
1284 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1285 	if (bif == NULL)
1286 		return ENOENT;
1287 
1288 	bif->bif_priority = req->ifbr_priority;
1289 
1290 	if (sc->sc_if.if_flags & IFF_RUNNING)
1291 		bstp_initialization(sc);
1292 
1293 	bridge_release_member(sc, bif, &psref);
1294 
1295 	return 0;
1296 }
1297 
1298 #if defined(BRIDGE_IPF)
1299 static int
1300 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1301 {
1302 	struct ifbrparam *param = arg;
1303 
1304 	param->ifbrp_filter = sc->sc_filter_flags;
1305 
1306 	return 0;
1307 }
1308 
1309 static int
1310 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1311 {
1312 	struct ifbrparam *param = arg;
1313 	uint32_t nflags, oflags;
1314 
1315 	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1316 		return EINVAL;
1317 
1318 	nflags = param->ifbrp_filter;
1319 	oflags = sc->sc_filter_flags;
1320 
1321 	if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1322 		pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1323 			sc->sc_if.if_pfil);
1324 	}
1325 	if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1326 		pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1327 			sc->sc_if.if_pfil);
1328 	}
1329 
1330 	sc->sc_filter_flags = nflags;
1331 
1332 	return 0;
1333 }
1334 #endif /* BRIDGE_IPF */
1335 
1336 static int
1337 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1338 {
1339 	struct ifbreq *req = arg;
1340 	struct bridge_iflist *bif;
1341 	struct psref psref;
1342 
1343 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1344 	if (bif == NULL)
1345 		return ENOENT;
1346 
1347 	bif->bif_path_cost = req->ifbr_path_cost;
1348 
1349 	if (sc->sc_if.if_flags & IFF_RUNNING)
1350 		bstp_initialization(sc);
1351 
1352 	bridge_release_member(sc, bif, &psref);
1353 
1354 	return 0;
1355 }
1356 
1357 /*
1358  * bridge_ifdetach:
1359  *
1360  *	Detach an interface from a bridge.  Called when a member
1361  *	interface is detaching.
1362  */
1363 void
1364 bridge_ifdetach(struct ifnet *ifp)
1365 {
1366 	struct bridge_softc *sc = ifp->if_bridge;
1367 	struct ifbreq breq;
1368 
1369 	/* ioctl_lock should prevent this from happening */
1370 	KASSERT(sc != NULL);
1371 
1372 	memset(&breq, 0, sizeof(breq));
1373 	strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1374 
1375 	(void) bridge_ioctl_del(sc, &breq);
1376 }
1377 
1378 /*
1379  * bridge_init:
1380  *
1381  *	Initialize a bridge interface.
1382  */
1383 static int
1384 bridge_init(struct ifnet *ifp)
1385 {
1386 	struct bridge_softc *sc = ifp->if_softc;
1387 
1388 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1389 
1390 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1391 	    bridge_timer, sc);
1392 	bstp_initialization(sc);
1393 
1394 	ifp->if_flags |= IFF_RUNNING;
1395 	return 0;
1396 }
1397 
1398 /*
1399  * bridge_stop:
1400  *
1401  *	Stop the bridge interface.
1402  */
1403 static void
1404 bridge_stop(struct ifnet *ifp, int disable)
1405 {
1406 	struct bridge_softc *sc = ifp->if_softc;
1407 
1408 	KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1409 	ifp->if_flags &= ~IFF_RUNNING;
1410 
1411 	callout_halt(&sc->sc_brcallout, NULL);
1412 	workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1413 	bstp_stop(sc);
1414 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1415 }
1416 
1417 /*
1418  * bridge_enqueue:
1419  *
1420  *	Enqueue a packet on a bridge member interface.
1421  */
1422 void
1423 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1424     int runfilt)
1425 {
1426 	int len, error;
1427 	short mflags;
1428 
1429 	if (runfilt) {
1430 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1431 		    dst_ifp, PFIL_OUT) != 0) {
1432 			if (m != NULL)
1433 				m_freem(m);
1434 			return;
1435 		}
1436 		if (m == NULL)
1437 			return;
1438 	}
1439 
1440 #ifdef ALTQ
1441 	KERNEL_LOCK(1, NULL);
1442 	/*
1443 	 * If ALTQ is enabled on the member interface, do
1444 	 * classification; the queueing discipline might
1445 	 * not require classification, but might require
1446 	 * the address family/header pointer in the pktattr.
1447 	 */
1448 	if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1449 		/* XXX IFT_ETHER */
1450 		altq_etherclassify(&dst_ifp->if_snd, m);
1451 	}
1452 	KERNEL_UNLOCK_ONE(NULL);
1453 #endif /* ALTQ */
1454 
1455 	len = m->m_pkthdr.len;
1456 	mflags = m->m_flags;
1457 
1458 	error = if_transmit_lock(dst_ifp, m);
1459 	if (error) {
1460 		/* mbuf is already freed */
1461 		if_statinc(&sc->sc_if, if_oerrors);
1462 		return;
1463 	}
1464 
1465 	net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1466 	if_statinc_ref(nsr, if_opackets);
1467 	if_statadd_ref(nsr, if_obytes, len);
1468 	if (mflags & M_MCAST)
1469 		if_statinc_ref(nsr, if_omcasts);
1470 	IF_STAT_PUTREF(&sc->sc_if);
1471 }
1472 
1473 /*
1474  * bridge_output:
1475  *
1476  *	Send output from a bridge member interface.  This
1477  *	performs the bridging function for locally originated
1478  *	packets.
1479  *
1480  *	The mbuf has the Ethernet header already attached.  We must
1481  *	enqueue or free the mbuf before returning.
1482  */
1483 int
1484 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1485     const struct rtentry *rt)
1486 {
1487 	struct ether_header *eh;
1488 	struct ifnet *dst_if;
1489 	struct bridge_softc *sc;
1490 	struct mbuf *n;
1491 	int s;
1492 
1493 	/*
1494 	 * bridge_output() is called from ether_output(), furthermore
1495 	 * ifp argument doesn't point to bridge(4). So, don't assert
1496 	 * IFEF_MPSAFE here.
1497 	 */
1498 
1499 	KASSERT(m->m_len >= ETHER_HDR_LEN);
1500 
1501 	eh = mtod(m, struct ether_header *);
1502 	sc = ifp->if_bridge;
1503 
1504 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1505 		if (memcmp(etherbroadcastaddr,
1506 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1507 			m->m_flags |= M_BCAST;
1508 		else
1509 			m->m_flags |= M_MCAST;
1510 	}
1511 
1512 	/*
1513 	 * If bridge is down, but the original output interface is up,
1514 	 * go ahead and send out that interface.  Otherwise, the packet
1515 	 * is dropped below.
1516 	 */
1517 	if (__predict_false(sc == NULL) ||
1518 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1519 		dst_if = ifp;
1520 		goto unicast_asis;
1521 	}
1522 
1523 	/*
1524 	 * If the packet is a multicast, or we don't know a better way to
1525 	 * get there, send to all interfaces.
1526 	 */
1527 	if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1528 		dst_if = NULL;
1529 	else
1530 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1531 
1532 	/*
1533 	 * In general, we need to handle TX offload in software before
1534 	 * enqueueing a packet. However, we can send it as is in the
1535 	 * cases of unicast via (1) the source interface, or (2) an
1536 	 * interface which supports the specified offload options.
1537 	 * For multicast or broadcast, send it as is only if (3) all
1538 	 * the member interfaces support the specified options.
1539 	 */
1540 
1541 	/*
1542 	 * Unicast via the source interface.
1543 	 */
1544 	if (dst_if == ifp)
1545 		goto unicast_asis;
1546 
1547 	/*
1548 	 * Unicast via other interface.
1549 	 */
1550 	if (dst_if != NULL) {
1551 		KASSERT(m->m_flags & M_PKTHDR);
1552 		if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1553 		    m->m_pkthdr.csum_flags)) {
1554 			/*
1555 			 * Unicast via an interface which supports the
1556 			 * specified offload options.
1557 			 */
1558 			goto unicast_asis;
1559 		}
1560 
1561 		/*
1562 		 * Handle TX offload in software. For TSO, a packet is
1563 		 * split into multiple chunks. Thus, the return value of
1564 		 * ether_sw_offload_tx() is mbuf queue consists of them.
1565 		 */
1566 		m = ether_sw_offload_tx(ifp, m);
1567 		if (m == NULL)
1568 			return 0;
1569 
1570 		do {
1571 			n = m->m_nextpkt;
1572 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1573 				m_freem(m);
1574 			else
1575 				bridge_enqueue(sc, dst_if, m, 0);
1576 			m = n;
1577 		} while (m != NULL);
1578 
1579 		return 0;
1580 	}
1581 
1582 	/*
1583 	 * Multicast or broadcast.
1584 	 */
1585 	if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1586 	    m->m_pkthdr.csum_flags)) {
1587 		/*
1588 		 * Specified TX offload options are supported by all
1589 		 * the member interfaces of this bridge.
1590 		 */
1591 		m->m_nextpkt = NULL;	/* XXX */
1592 	} else {
1593 		/*
1594 		 * Otherwise, handle TX offload in software.
1595 		 */
1596 		m = ether_sw_offload_tx(ifp, m);
1597 		if (m == NULL)
1598 			return 0;
1599 	}
1600 
1601 	do {
1602 		/* XXX Should call bridge_broadcast, but there are locking
1603 		 * issues which need resolving first. */
1604 		struct bridge_iflist *bif;
1605 		struct mbuf *mc;
1606 		bool used = false;
1607 
1608 		n = m->m_nextpkt;
1609 
1610 		BRIDGE_PSZ_RENTER(s);
1611 		BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1612 			struct psref psref;
1613 
1614 			bridge_acquire_member(sc, bif, &psref);
1615 			BRIDGE_PSZ_REXIT(s);
1616 
1617 			dst_if = bif->bif_ifp;
1618 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1619 				goto next;
1620 
1621 			/*
1622 			 * If this is not the original output interface,
1623 			 * and the interface is participating in spanning
1624 			 * tree, make sure the port is in a state that
1625 			 * allows forwarding.
1626 			 */
1627 			if (dst_if != ifp &&
1628 			    (bif->bif_flags & IFBIF_STP) != 0) {
1629 				switch (bif->bif_state) {
1630 				case BSTP_IFSTATE_BLOCKING:
1631 				case BSTP_IFSTATE_LISTENING:
1632 				case BSTP_IFSTATE_DISABLED:
1633 					goto next;
1634 				}
1635 			}
1636 
1637 			if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1638 			    bif_next) == NULL &&
1639 			    ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1640 			    dst_if == ifp))
1641 			{
1642 				used = true;
1643 				mc = m;
1644 			} else {
1645 				mc = m_copypacket(m, M_DONTWAIT);
1646 				if (mc == NULL) {
1647 					if_statinc(&sc->sc_if, if_oerrors);
1648 					goto next;
1649 				}
1650 			}
1651 
1652 			bridge_enqueue(sc, dst_if, mc, 0);
1653 
1654 			if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1655 			    dst_if != ifp)
1656 			{
1657 				if (PSLIST_READER_NEXT(bif,
1658 				    struct bridge_iflist, bif_next) == NULL)
1659 				{
1660 					used = true;
1661 					mc = m;
1662 				} else {
1663 					mc = m_copypacket(m, M_DONTWAIT);
1664 					if (mc == NULL) {
1665 						if_statinc(&sc->sc_if,
1666 						    if_oerrors);
1667 						goto next;
1668 					}
1669 				}
1670 
1671 				m_set_rcvif(mc, dst_if);
1672 				mc->m_flags &= ~M_PROMISC;
1673 
1674 				s = splsoftnet();
1675 				KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1676 				ether_input(dst_if, mc);
1677 				KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1678 				splx(s);
1679 			}
1680 
1681 next:
1682 			BRIDGE_PSZ_RENTER(s);
1683 			bridge_release_member(sc, bif, &psref);
1684 
1685 			/* Guarantee we don't re-enter the loop as we already
1686 			 * decided we're at the end. */
1687 			if (used)
1688 				break;
1689 		}
1690 		BRIDGE_PSZ_REXIT(s);
1691 
1692 		if (!used)
1693 			m_freem(m);
1694 
1695 		m = n;
1696 	} while (m != NULL);
1697 	return 0;
1698 
1699 unicast_asis:
1700 	/*
1701 	 * XXX Spanning tree consideration here?
1702 	 */
1703 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1704 		m_freem(m);
1705 	else
1706 		bridge_enqueue(sc, dst_if, m, 0);
1707 	return 0;
1708 }
1709 
1710 /*
1711  * bridge_start:
1712  *
1713  *	Start output on a bridge.
1714  *
1715  *	NOTE: This routine should never be called in this implementation.
1716  */
1717 static void
1718 bridge_start(struct ifnet *ifp)
1719 {
1720 
1721 	printf("%s: bridge_start() called\n", ifp->if_xname);
1722 }
1723 
1724 /*
1725  * bridge_forward:
1726  *
1727  *	The forwarding function of the bridge.
1728  */
1729 static void
1730 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1731 {
1732 	struct bridge_iflist *bif;
1733 	struct ifnet *src_if, *dst_if;
1734 	struct ether_header *eh;
1735 	struct psref psref;
1736 	struct psref psref_src;
1737 	DECLARE_LOCK_VARIABLE;
1738 
1739 	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1740 		return;
1741 
1742 	src_if = m_get_rcvif_psref(m, &psref_src);
1743 	if (src_if == NULL) {
1744 		/* Interface is being destroyed? */
1745 		m_freem(m);
1746 		goto out;
1747 	}
1748 
1749 	if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1750 
1751 	/*
1752 	 * Look up the bridge_iflist.
1753 	 */
1754 	bif = bridge_lookup_member_if(sc, src_if, &psref);
1755 	if (bif == NULL) {
1756 		/* Interface is not a bridge member (anymore?) */
1757 		m_freem(m);
1758 		goto out;
1759 	}
1760 
1761 	if (bif->bif_flags & IFBIF_STP) {
1762 		switch (bif->bif_state) {
1763 		case BSTP_IFSTATE_BLOCKING:
1764 		case BSTP_IFSTATE_LISTENING:
1765 		case BSTP_IFSTATE_DISABLED:
1766 			m_freem(m);
1767 			bridge_release_member(sc, bif, &psref);
1768 			goto out;
1769 		}
1770 	}
1771 
1772 	eh = mtod(m, struct ether_header *);
1773 
1774 	/*
1775 	 * If the interface is learning, and the source
1776 	 * address is valid and not multicast, record
1777 	 * the address.
1778 	 */
1779 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1780 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1781 	    (eh->ether_shost[0] == 0 &&
1782 	     eh->ether_shost[1] == 0 &&
1783 	     eh->ether_shost[2] == 0 &&
1784 	     eh->ether_shost[3] == 0 &&
1785 	     eh->ether_shost[4] == 0 &&
1786 	     eh->ether_shost[5] == 0) == 0) {
1787 		(void) bridge_rtupdate(sc, eh->ether_shost,
1788 		    src_if, 0, IFBAF_DYNAMIC);
1789 	}
1790 
1791 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
1792 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
1793 		m_freem(m);
1794 		bridge_release_member(sc, bif, &psref);
1795 		goto out;
1796 	}
1797 
1798 	bridge_release_member(sc, bif, &psref);
1799 
1800 	/*
1801 	 * At this point, the port either doesn't participate
1802 	 * in spanning tree or it is in the forwarding state.
1803 	 */
1804 
1805 	/*
1806 	 * If the packet is unicast, destined for someone on
1807 	 * "this" side of the bridge, drop it.
1808 	 */
1809 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1810 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1811 		if (src_if == dst_if) {
1812 			m_freem(m);
1813 			goto out;
1814 		}
1815 	} else {
1816 		/* ...forward it to all interfaces. */
1817 		if_statinc(&sc->sc_if, if_imcasts);
1818 		dst_if = NULL;
1819 	}
1820 
1821 	if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1822 		if (m != NULL)
1823 			m_freem(m);
1824 		goto out;
1825 	}
1826 	if (m == NULL)
1827 		goto out;
1828 
1829 	if (dst_if == NULL) {
1830 		bridge_broadcast(sc, src_if, m);
1831 		goto out;
1832 	}
1833 
1834 	m_put_rcvif_psref(src_if, &psref_src);
1835 	src_if = NULL;
1836 
1837 	/*
1838 	 * At this point, we're dealing with a unicast frame
1839 	 * going to a different interface.
1840 	 */
1841 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1842 		m_freem(m);
1843 		goto out;
1844 	}
1845 
1846 	bif = bridge_lookup_member_if(sc, dst_if, &psref);
1847 	if (bif == NULL) {
1848 		/* Not a member of the bridge (anymore?) */
1849 		m_freem(m);
1850 		goto out;
1851 	}
1852 
1853 	if (bif->bif_flags & IFBIF_STP) {
1854 		switch (bif->bif_state) {
1855 		case BSTP_IFSTATE_DISABLED:
1856 		case BSTP_IFSTATE_BLOCKING:
1857 			m_freem(m);
1858 			bridge_release_member(sc, bif, &psref);
1859 			goto out;
1860 		}
1861 	}
1862 
1863 	bridge_release_member(sc, bif, &psref);
1864 
1865 	/*
1866 	 * Before enqueueing this packet to the destination interface,
1867 	 * clear any in-bound checksum flags to prevent them from being
1868 	 * misused as out-bound flags.
1869 	 */
1870 	m->m_pkthdr.csum_flags = 0;
1871 
1872 	ACQUIRE_GLOBAL_LOCKS();
1873 	bridge_enqueue(sc, dst_if, m, 1);
1874 	RELEASE_GLOBAL_LOCKS();
1875 out:
1876 	if (src_if != NULL)
1877 		m_put_rcvif_psref(src_if, &psref_src);
1878 	return;
1879 }
1880 
1881 static bool
1882 bstp_state_before_learning(struct bridge_iflist *bif)
1883 {
1884 	if (bif->bif_flags & IFBIF_STP) {
1885 		switch (bif->bif_state) {
1886 		case BSTP_IFSTATE_BLOCKING:
1887 		case BSTP_IFSTATE_LISTENING:
1888 		case BSTP_IFSTATE_DISABLED:
1889 			return true;
1890 		}
1891 	}
1892 	return false;
1893 }
1894 
1895 static bool
1896 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1897 {
1898 	uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1899 
1900 	if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1901 #if NCARP > 0
1902 	    || (bif->bif_ifp->if_carp &&
1903 	        carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1904 #endif /* NCARP > 0 */
1905 	    )
1906 		return true;
1907 
1908 	return false;
1909 }
1910 
1911 /*
1912  * bridge_input:
1913  *
1914  *	Receive input from a member interface.  Queue the packet for
1915  *	bridging if it is not for us.
1916  */
1917 static void
1918 bridge_input(struct ifnet *ifp, struct mbuf *m)
1919 {
1920 	struct bridge_softc *sc = ifp->if_bridge;
1921 	struct bridge_iflist *bif;
1922 	struct ether_header *eh;
1923 	struct psref psref;
1924 	int bound;
1925 	DECLARE_LOCK_VARIABLE;
1926 
1927 	KASSERT(!cpu_intr_p());
1928 
1929 	if (__predict_false(sc == NULL) ||
1930 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1931 		ACQUIRE_GLOBAL_LOCKS();
1932 		ether_input(ifp, m);
1933 		RELEASE_GLOBAL_LOCKS();
1934 		return;
1935 	}
1936 
1937 	bound = curlwp_bind();
1938 	bif = bridge_lookup_member_if(sc, ifp, &psref);
1939 	if (bif == NULL) {
1940 		curlwp_bindx(bound);
1941 		ACQUIRE_GLOBAL_LOCKS();
1942 		ether_input(ifp, m);
1943 		RELEASE_GLOBAL_LOCKS();
1944 		return;
1945 	}
1946 
1947 	eh = mtod(m, struct ether_header *);
1948 
1949 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1950 		if (memcmp(etherbroadcastaddr,
1951 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1952 			m->m_flags |= M_BCAST;
1953 		else
1954 			m->m_flags |= M_MCAST;
1955 	}
1956 
1957 	/*
1958 	 * A 'fast' path for packets addressed to interfaces that are
1959 	 * part of this bridge.
1960 	 */
1961 	if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1962 	    !bstp_state_before_learning(bif)) {
1963 		struct bridge_iflist *_bif;
1964 		struct ifnet *_ifp = NULL;
1965 		int s;
1966 		struct psref _psref;
1967 
1968 		BRIDGE_PSZ_RENTER(s);
1969 		BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
1970 			/* It is destined for us. */
1971 			if (bridge_ourether(_bif, eh, 0)) {
1972 				bridge_acquire_member(sc, _bif, &_psref);
1973 				BRIDGE_PSZ_REXIT(s);
1974 				if (_bif->bif_flags & IFBIF_LEARNING)
1975 					(void) bridge_rtupdate(sc,
1976 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1977 				m_set_rcvif(m, _bif->bif_ifp);
1978 				_ifp = _bif->bif_ifp;
1979 				bridge_release_member(sc, _bif, &_psref);
1980 				goto out;
1981 			}
1982 
1983 			/* We just received a packet that we sent out. */
1984 			if (bridge_ourether(_bif, eh, 1))
1985 				break;
1986 		}
1987 		BRIDGE_PSZ_REXIT(s);
1988 out:
1989 
1990 		if (_bif != NULL) {
1991 			bridge_release_member(sc, bif, &psref);
1992 			curlwp_bindx(bound);
1993 			if (_ifp != NULL) {
1994 				m->m_flags &= ~M_PROMISC;
1995 				ACQUIRE_GLOBAL_LOCKS();
1996 				ether_input(_ifp, m);
1997 				RELEASE_GLOBAL_LOCKS();
1998 			} else
1999 				m_freem(m);
2000 			return;
2001 		}
2002 	}
2003 
2004 	/* Tap off 802.1D packets; they do not get forwarded. */
2005 	if (bif->bif_flags & IFBIF_STP &&
2006 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2007 		bstp_input(sc, bif, m);
2008 		bridge_release_member(sc, bif, &psref);
2009 		curlwp_bindx(bound);
2010 		return;
2011 	}
2012 
2013 	/*
2014 	 * A normal switch would discard the packet here, but that's not what
2015 	 * we've done historically. This also prevents some obnoxious behaviour.
2016 	 */
2017 	if (bstp_state_before_learning(bif)) {
2018 		bridge_release_member(sc, bif, &psref);
2019 		curlwp_bindx(bound);
2020 		ACQUIRE_GLOBAL_LOCKS();
2021 		ether_input(ifp, m);
2022 		RELEASE_GLOBAL_LOCKS();
2023 		return;
2024 	}
2025 
2026 	bridge_release_member(sc, bif, &psref);
2027 
2028 	bridge_forward(sc, m);
2029 
2030 	curlwp_bindx(bound);
2031 }
2032 
2033 /*
2034  * bridge_broadcast:
2035  *
2036  *	Send a frame to all interfaces that are members of
2037  *	the bridge, except for the one on which the packet
2038  *	arrived.
2039  */
2040 static void
2041 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2042     struct mbuf *m)
2043 {
2044 	struct bridge_iflist *bif;
2045 	struct mbuf *mc;
2046 	struct ifnet *dst_if;
2047 	bool bmcast;
2048 	int s;
2049 	DECLARE_LOCK_VARIABLE;
2050 
2051 	bmcast = m->m_flags & (M_BCAST|M_MCAST);
2052 
2053 	BRIDGE_PSZ_RENTER(s);
2054 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2055 		struct psref psref;
2056 
2057 		bridge_acquire_member(sc, bif, &psref);
2058 		BRIDGE_PSZ_REXIT(s);
2059 
2060 		dst_if = bif->bif_ifp;
2061 
2062 		if (bif->bif_flags & IFBIF_STP) {
2063 			switch (bif->bif_state) {
2064 			case BSTP_IFSTATE_BLOCKING:
2065 			case BSTP_IFSTATE_DISABLED:
2066 				goto next;
2067 			}
2068 		}
2069 
2070 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2071 			goto next;
2072 
2073 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2074 			goto next;
2075 
2076 		if (dst_if != src_if) {
2077 			mc = m_copypacket(m, M_DONTWAIT);
2078 			if (mc == NULL) {
2079 				if_statinc(&sc->sc_if, if_oerrors);
2080 				goto next;
2081 			}
2082 			/*
2083 			 * Before enqueueing this packet to the destination
2084 			 * interface, clear any in-bound checksum flags to
2085 			 * prevent them from being misused as out-bound flags.
2086 			 */
2087 			mc->m_pkthdr.csum_flags = 0;
2088 
2089 			ACQUIRE_GLOBAL_LOCKS();
2090 			bridge_enqueue(sc, dst_if, mc, 1);
2091 			RELEASE_GLOBAL_LOCKS();
2092 		}
2093 
2094 		if (bmcast) {
2095 			mc = m_copypacket(m, M_DONTWAIT);
2096 			if (mc == NULL) {
2097 				if_statinc(&sc->sc_if, if_oerrors);
2098 				goto next;
2099 			}
2100 			/*
2101 			 * Before enqueueing this packet to the destination
2102 			 * interface, clear any in-bound checksum flags to
2103 			 * prevent them from being misused as out-bound flags.
2104 			 */
2105 			mc->m_pkthdr.csum_flags = 0;
2106 
2107 			m_set_rcvif(mc, dst_if);
2108 			mc->m_flags &= ~M_PROMISC;
2109 
2110 			ACQUIRE_GLOBAL_LOCKS();
2111 			ether_input(dst_if, mc);
2112 			RELEASE_GLOBAL_LOCKS();
2113 		}
2114 next:
2115 		BRIDGE_PSZ_RENTER(s);
2116 		bridge_release_member(sc, bif, &psref);
2117 	}
2118 	BRIDGE_PSZ_REXIT(s);
2119 
2120 	m_freem(m);
2121 }
2122 
2123 static int
2124 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2125     struct bridge_rtnode **brtp)
2126 {
2127 	struct bridge_rtnode *brt;
2128 	int error;
2129 
2130 	if (sc->sc_brtcnt >= sc->sc_brtmax)
2131 		return ENOSPC;
2132 
2133 	/*
2134 	 * Allocate a new bridge forwarding node, and
2135 	 * initialize the expiration time and Ethernet
2136 	 * address.
2137 	 */
2138 	brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2139 	if (brt == NULL)
2140 		return ENOMEM;
2141 
2142 	memset(brt, 0, sizeof(*brt));
2143 	brt->brt_expire = time_uptime + sc->sc_brttimeout;
2144 	brt->brt_flags = IFBAF_DYNAMIC;
2145 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2146 	PSLIST_ENTRY_INIT(brt, brt_list);
2147 	PSLIST_ENTRY_INIT(brt, brt_hash);
2148 
2149 	BRIDGE_RT_LOCK(sc);
2150 	error = bridge_rtnode_insert(sc, brt);
2151 	BRIDGE_RT_UNLOCK(sc);
2152 
2153 	if (error != 0) {
2154 		pool_put(&bridge_rtnode_pool, brt);
2155 		return error;
2156 	}
2157 
2158 	*brtp = brt;
2159 	return 0;
2160 }
2161 
2162 /*
2163  * bridge_rtupdate:
2164  *
2165  *	Add a bridge routing entry.
2166  */
2167 static int
2168 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2169     struct ifnet *dst_if, int setflags, uint8_t flags)
2170 {
2171 	struct bridge_rtnode *brt;
2172 	int s;
2173 
2174 again:
2175 	/*
2176 	 * A route for this destination might already exist.  If so,
2177 	 * update it, otherwise create a new one.
2178 	 */
2179 	BRIDGE_RT_RENTER(s);
2180 	brt = bridge_rtnode_lookup(sc, dst);
2181 
2182 	if (brt != NULL) {
2183 		brt->brt_ifp = dst_if;
2184 		if (setflags) {
2185 			brt->brt_flags = flags;
2186 			if (flags & IFBAF_STATIC)
2187 				brt->brt_expire = 0;
2188 			else
2189 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2190 		} else {
2191 			if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2192 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2193 		}
2194 	}
2195 	BRIDGE_RT_REXIT(s);
2196 
2197 	if (brt == NULL) {
2198 		int r;
2199 
2200 		r = bridge_rtalloc(sc, dst, &brt);
2201 		if (r != 0)
2202 			return r;
2203 		goto again;
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 /*
2210  * bridge_rtlookup:
2211  *
2212  *	Lookup the destination interface for an address.
2213  */
2214 static struct ifnet *
2215 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2216 {
2217 	struct bridge_rtnode *brt;
2218 	struct ifnet *ifs = NULL;
2219 	int s;
2220 
2221 	BRIDGE_RT_RENTER(s);
2222 	brt = bridge_rtnode_lookup(sc, addr);
2223 	if (brt != NULL)
2224 		ifs = brt->brt_ifp;
2225 	BRIDGE_RT_REXIT(s);
2226 
2227 	return ifs;
2228 }
2229 
2230 typedef bool (*bridge_iterate_cb_t)
2231     (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2232 
2233 /*
2234  * bridge_rtlist_iterate_remove:
2235  *
2236  *	It iterates on sc->sc_rtlist and removes rtnodes of it which func
2237  *	callback judges to remove. Removals of rtnodes are done in a manner
2238  *	of pserialize. To this end, all kmem_* operations are placed out of
2239  *	mutexes.
2240  */
2241 static void
2242 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2243 {
2244 	struct bridge_rtnode *brt;
2245 	struct bridge_rtnode **brt_list;
2246 	int i, count;
2247 
2248 retry:
2249 	count = sc->sc_brtcnt;
2250 	if (count == 0)
2251 		return;
2252 	brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2253 
2254 	BRIDGE_RT_LOCK(sc);
2255 	if (__predict_false(sc->sc_brtcnt > count)) {
2256 		/* The rtnodes increased, we need more memory */
2257 		BRIDGE_RT_UNLOCK(sc);
2258 		kmem_free(brt_list, sizeof(*brt_list) * count);
2259 		goto retry;
2260 	}
2261 
2262 	i = 0;
2263 	/*
2264 	 * We don't need to use a _SAFE variant here because we know
2265 	 * that a removed item keeps its next pointer as-is thanks to
2266 	 * pslist(9) and isn't freed in the loop.
2267 	 */
2268 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2269 		bool need_break = false;
2270 		if (func(sc, brt, &need_break, arg)) {
2271 			bridge_rtnode_remove(sc, brt);
2272 			brt_list[i++] = brt;
2273 		}
2274 		if (need_break)
2275 			break;
2276 	}
2277 
2278 	if (i > 0)
2279 		BRIDGE_RT_PSZ_PERFORM(sc);
2280 	BRIDGE_RT_UNLOCK(sc);
2281 
2282 	while (--i >= 0)
2283 		bridge_rtnode_destroy(brt_list[i]);
2284 
2285 	kmem_free(brt_list, sizeof(*brt_list) * count);
2286 }
2287 
2288 static bool
2289 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2290     bool *need_break, void *arg)
2291 {
2292 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2293 		/* Take into account of the subsequent removal */
2294 		if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2295 			*need_break = true;
2296 		return true;
2297 	} else
2298 		return false;
2299 }
2300 
2301 static void
2302 bridge_rttrim0(struct bridge_softc *sc)
2303 {
2304 	bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2305 }
2306 
2307 /*
2308  * bridge_rttrim:
2309  *
2310  *	Trim the routine table so that we have a number
2311  *	of routing entries less than or equal to the
2312  *	maximum number.
2313  */
2314 static void
2315 bridge_rttrim(struct bridge_softc *sc)
2316 {
2317 
2318 	/* Make sure we actually need to do this. */
2319 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2320 		return;
2321 
2322 	/* Force an aging cycle; this might trim enough addresses. */
2323 	bridge_rtage(sc);
2324 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2325 		return;
2326 
2327 	bridge_rttrim0(sc);
2328 
2329 	return;
2330 }
2331 
2332 /*
2333  * bridge_timer:
2334  *
2335  *	Aging timer for the bridge.
2336  */
2337 static void
2338 bridge_timer(void *arg)
2339 {
2340 	struct bridge_softc *sc = arg;
2341 
2342 	workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2343 }
2344 
2345 static void
2346 bridge_rtage_work(struct work *wk, void *arg)
2347 {
2348 	struct bridge_softc *sc = arg;
2349 
2350 	KASSERT(wk == &sc->sc_rtage_wk);
2351 
2352 	bridge_rtage(sc);
2353 
2354 	if (sc->sc_if.if_flags & IFF_RUNNING)
2355 		callout_reset(&sc->sc_brcallout,
2356 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2357 }
2358 
2359 static bool
2360 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2361     bool *need_break, void *arg)
2362 {
2363 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2364 	    time_uptime >= brt->brt_expire)
2365 		return true;
2366 	else
2367 		return false;
2368 }
2369 
2370 /*
2371  * bridge_rtage:
2372  *
2373  *	Perform an aging cycle.
2374  */
2375 static void
2376 bridge_rtage(struct bridge_softc *sc)
2377 {
2378 	bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2379 }
2380 
2381 
2382 static bool
2383 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2384     bool *need_break, void *arg)
2385 {
2386 	int full = *(int*)arg;
2387 
2388 	if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2389 		return true;
2390 	else
2391 		return false;
2392 }
2393 
2394 /*
2395  * bridge_rtflush:
2396  *
2397  *	Remove all dynamic addresses from the bridge.
2398  */
2399 static void
2400 bridge_rtflush(struct bridge_softc *sc, int full)
2401 {
2402 	bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2403 }
2404 
2405 /*
2406  * bridge_rtdaddr:
2407  *
2408  *	Remove an address from the table.
2409  */
2410 static int
2411 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2412 {
2413 	struct bridge_rtnode *brt;
2414 
2415 	BRIDGE_RT_LOCK(sc);
2416 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2417 		BRIDGE_RT_UNLOCK(sc);
2418 		return ENOENT;
2419 	}
2420 	bridge_rtnode_remove(sc, brt);
2421 	BRIDGE_RT_PSZ_PERFORM(sc);
2422 	BRIDGE_RT_UNLOCK(sc);
2423 
2424 	bridge_rtnode_destroy(brt);
2425 
2426 	return 0;
2427 }
2428 
2429 /*
2430  * bridge_rtdelete:
2431  *
2432  *	Delete routes to a speicifc member interface.
2433  */
2434 static void
2435 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2436 {
2437 	struct bridge_rtnode *brt;
2438 
2439 	/* XXX pserialize_perform for each entry is slow */
2440 again:
2441 	BRIDGE_RT_LOCK(sc);
2442 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2443 		if (brt->brt_ifp == ifp)
2444 			break;
2445 	}
2446 	if (brt == NULL) {
2447 		BRIDGE_RT_UNLOCK(sc);
2448 		return;
2449 	}
2450 	bridge_rtnode_remove(sc, brt);
2451 	BRIDGE_RT_PSZ_PERFORM(sc);
2452 	BRIDGE_RT_UNLOCK(sc);
2453 
2454 	bridge_rtnode_destroy(brt);
2455 
2456 	goto again;
2457 }
2458 
2459 /*
2460  * bridge_rtable_init:
2461  *
2462  *	Initialize the route table for this bridge.
2463  */
2464 static void
2465 bridge_rtable_init(struct bridge_softc *sc)
2466 {
2467 	int i;
2468 
2469 	sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2470 	    KM_SLEEP);
2471 
2472 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2473 		PSLIST_INIT(&sc->sc_rthash[i]);
2474 
2475 	sc->sc_rthash_key = cprng_fast32();
2476 
2477 	PSLIST_INIT(&sc->sc_rtlist);
2478 
2479 	sc->sc_rtlist_psz = pserialize_create();
2480 	sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2481 }
2482 
2483 /*
2484  * bridge_rtable_fini:
2485  *
2486  *	Deconstruct the route table for this bridge.
2487  */
2488 static void
2489 bridge_rtable_fini(struct bridge_softc *sc)
2490 {
2491 
2492 	kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2493 	mutex_obj_free(sc->sc_rtlist_lock);
2494 	pserialize_destroy(sc->sc_rtlist_psz);
2495 }
2496 
2497 /*
2498  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2499  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2500  */
2501 #define	mix(a, b, c)							\
2502 do {									\
2503 	a -= b; a -= c; a ^= (c >> 13);					\
2504 	b -= c; b -= a; b ^= (a << 8);					\
2505 	c -= a; c -= b; c ^= (b >> 13);					\
2506 	a -= b; a -= c; a ^= (c >> 12);					\
2507 	b -= c; b -= a; b ^= (a << 16);					\
2508 	c -= a; c -= b; c ^= (b >> 5);					\
2509 	a -= b; a -= c; a ^= (c >> 3);					\
2510 	b -= c; b -= a; b ^= (a << 10);					\
2511 	c -= a; c -= b; c ^= (b >> 15);					\
2512 } while (/*CONSTCOND*/0)
2513 
2514 static inline uint32_t
2515 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2516 {
2517 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2518 
2519 	b += addr[5] << 8;
2520 	b += addr[4];
2521 	a += (uint32_t)addr[3] << 24;
2522 	a += addr[2] << 16;
2523 	a += addr[1] << 8;
2524 	a += addr[0];
2525 
2526 	mix(a, b, c);
2527 
2528 	return (c & BRIDGE_RTHASH_MASK);
2529 }
2530 
2531 #undef mix
2532 
2533 /*
2534  * bridge_rtnode_lookup:
2535  *
2536  *	Look up a bridge route node for the specified destination.
2537  */
2538 static struct bridge_rtnode *
2539 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2540 {
2541 	struct bridge_rtnode *brt;
2542 	uint32_t hash;
2543 	int dir;
2544 
2545 	hash = bridge_rthash(sc, addr);
2546 	BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2547 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2548 		if (dir == 0)
2549 			return brt;
2550 		if (dir > 0)
2551 			return NULL;
2552 	}
2553 
2554 	return NULL;
2555 }
2556 
2557 /*
2558  * bridge_rtnode_insert:
2559  *
2560  *	Insert the specified bridge node into the route table.  We
2561  *	assume the entry is not already in the table.
2562  */
2563 static int
2564 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2565 {
2566 	struct bridge_rtnode *lbrt, *prev = NULL;
2567 	uint32_t hash;
2568 
2569 	KASSERT(BRIDGE_RT_LOCKED(sc));
2570 
2571 	hash = bridge_rthash(sc, brt->brt_addr);
2572 	BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2573 		int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2574 		if (dir == 0)
2575 			return EEXIST;
2576 		if (dir > 0)
2577 			break;
2578 		prev = lbrt;
2579 	}
2580 	if (prev == NULL)
2581 		BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2582 	else
2583 		BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2584 
2585 	BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2586 	sc->sc_brtcnt++;
2587 
2588 	return 0;
2589 }
2590 
2591 /*
2592  * bridge_rtnode_remove:
2593  *
2594  *	Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2595  */
2596 static void
2597 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2598 {
2599 
2600 	KASSERT(BRIDGE_RT_LOCKED(sc));
2601 
2602 	BRIDGE_RTHASH_WRITER_REMOVE(brt);
2603 	BRIDGE_RTLIST_WRITER_REMOVE(brt);
2604 	sc->sc_brtcnt--;
2605 }
2606 
2607 /*
2608  * bridge_rtnode_destroy:
2609  *
2610  *	Destroy a bridge rtnode.
2611  */
2612 static void
2613 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2614 {
2615 
2616 	PSLIST_ENTRY_DESTROY(brt, brt_list);
2617 	PSLIST_ENTRY_DESTROY(brt, brt_hash);
2618 	pool_put(&bridge_rtnode_pool, brt);
2619 }
2620 
2621 #if defined(BRIDGE_IPF)
2622 extern pfil_head_t *inet_pfil_hook;                 /* XXX */
2623 extern pfil_head_t *inet6_pfil_hook;                /* XXX */
2624 
2625 /*
2626  * Send bridge packets through IPF if they are one of the types IPF can deal
2627  * with, or if they are ARP or REVARP.  (IPF will pass ARP and REVARP without
2628  * question.)
2629  */
2630 static int
2631 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2632 {
2633 	int snap, error;
2634 	struct ether_header *eh1, eh2;
2635 	struct llc llc1;
2636 	uint16_t ether_type;
2637 
2638 	snap = 0;
2639 	error = -1;	/* Default error if not error == 0 */
2640 	eh1 = mtod(*mp, struct ether_header *);
2641 	ether_type = ntohs(eh1->ether_type);
2642 
2643 	/*
2644 	 * Check for SNAP/LLC.
2645 	 */
2646 	if (ether_type < ETHERMTU) {
2647 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2648 
2649 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2650 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2651 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2652 		    llc2->llc_control == LLC_UI) {
2653 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2654 			snap = 1;
2655 		}
2656 	}
2657 
2658 	/*
2659 	 * If we're trying to filter bridge traffic, don't look at anything
2660 	 * other than IP and ARP traffic.  If the filter doesn't understand
2661 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2662 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2663 	 * but of course we don't have an AppleTalk filter to begin with.
2664 	 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2665 	 * ARP traffic.)
2666 	 */
2667 	switch (ether_type) {
2668 		case ETHERTYPE_ARP:
2669 		case ETHERTYPE_REVARP:
2670 			return 0; /* Automatically pass */
2671 		case ETHERTYPE_IP:
2672 # ifdef INET6
2673 		case ETHERTYPE_IPV6:
2674 # endif /* INET6 */
2675 			break;
2676 		default:
2677 			goto bad;
2678 	}
2679 
2680 	/* Strip off the Ethernet header and keep a copy. */
2681 	m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2682 	m_adj(*mp, ETHER_HDR_LEN);
2683 
2684 	/* Strip off snap header, if present */
2685 	if (snap) {
2686 		m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2687 		m_adj(*mp, sizeof(struct llc));
2688 	}
2689 
2690 	/*
2691 	 * Check basic packet sanity and run IPF through pfil.
2692 	 */
2693 	KASSERT(!cpu_intr_p());
2694 	switch (ether_type)
2695 	{
2696 	case ETHERTYPE_IP :
2697 		error = bridge_ip_checkbasic(mp);
2698 		if (error == 0)
2699 			error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2700 		break;
2701 # ifdef INET6
2702 	case ETHERTYPE_IPV6 :
2703 		error = bridge_ip6_checkbasic(mp);
2704 		if (error == 0)
2705 			error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2706 		break;
2707 # endif
2708 	default :
2709 		error = 0;
2710 		break;
2711 	}
2712 
2713 	if (*mp == NULL)
2714 		return error;
2715 	if (error != 0)
2716 		goto bad;
2717 
2718 	error = -1;
2719 
2720 	/*
2721 	 * Finally, put everything back the way it was and return
2722 	 */
2723 	if (snap) {
2724 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2725 		if (*mp == NULL)
2726 			return error;
2727 		bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2728 	}
2729 
2730 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2731 	if (*mp == NULL)
2732 		return error;
2733 	bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2734 
2735 	return 0;
2736 
2737     bad:
2738 	m_freem(*mp);
2739 	*mp = NULL;
2740 	return error;
2741 }
2742 
2743 /*
2744  * Perform basic checks on header size since
2745  * IPF assumes ip_input has already processed
2746  * it for it.  Cut-and-pasted from ip_input.c.
2747  * Given how simple the IPv6 version is,
2748  * does the IPv4 version really need to be
2749  * this complicated?
2750  *
2751  * XXX Should we update ipstat here, or not?
2752  * XXX Right now we update ipstat but not
2753  * XXX csum_counter.
2754  */
2755 static int
2756 bridge_ip_checkbasic(struct mbuf **mp)
2757 {
2758 	struct mbuf *m = *mp;
2759 	struct ip *ip;
2760 	int len, hlen;
2761 
2762 	if (*mp == NULL)
2763 		return -1;
2764 
2765 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2766 		if ((m = m_copyup(m, sizeof(struct ip),
2767 			(max_linkhdr + 3) & ~3)) == NULL) {
2768 			/* XXXJRT new stat, please */
2769 			ip_statinc(IP_STAT_TOOSMALL);
2770 			goto bad;
2771 		}
2772 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
2773 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2774 			ip_statinc(IP_STAT_TOOSMALL);
2775 			goto bad;
2776 		}
2777 	}
2778 	ip = mtod(m, struct ip *);
2779 	if (ip == NULL) goto bad;
2780 
2781 	if (ip->ip_v != IPVERSION) {
2782 		ip_statinc(IP_STAT_BADVERS);
2783 		goto bad;
2784 	}
2785 	hlen = ip->ip_hl << 2;
2786 	if (hlen < sizeof(struct ip)) { /* minimum header length */
2787 		ip_statinc(IP_STAT_BADHLEN);
2788 		goto bad;
2789 	}
2790 	if (hlen > m->m_len) {
2791 		if ((m = m_pullup(m, hlen)) == 0) {
2792 			ip_statinc(IP_STAT_BADHLEN);
2793 			goto bad;
2794 		}
2795 		ip = mtod(m, struct ip *);
2796 		if (ip == NULL) goto bad;
2797 	}
2798 
2799 	switch (m->m_pkthdr.csum_flags &
2800 	        ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2801 	         M_CSUM_IPv4_BAD)) {
2802 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2803 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2804 		goto bad;
2805 
2806 	case M_CSUM_IPv4:
2807 		/* Checksum was okay. */
2808 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2809 		break;
2810 
2811 	default:
2812 		/* Must compute it ourselves. */
2813 		/* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2814 		if (in_cksum(m, hlen) != 0)
2815 			goto bad;
2816 		break;
2817 	}
2818 
2819 	/* Retrieve the packet length. */
2820 	len = ntohs(ip->ip_len);
2821 
2822 	/*
2823 	 * Check for additional length bogosity
2824 	 */
2825 	if (len < hlen) {
2826 		ip_statinc(IP_STAT_BADLEN);
2827 		goto bad;
2828 	}
2829 
2830 	/*
2831 	 * Check that the amount of data in the buffers
2832 	 * is as at least much as the IP header would have us expect.
2833 	 * Drop packet if shorter than we expect.
2834 	 */
2835 	if (m->m_pkthdr.len < len) {
2836 		ip_statinc(IP_STAT_TOOSHORT);
2837 		goto bad;
2838 	}
2839 
2840 	/* Checks out, proceed */
2841 	*mp = m;
2842 	return 0;
2843 
2844     bad:
2845 	*mp = m;
2846 	return -1;
2847 }
2848 
2849 # ifdef INET6
2850 /*
2851  * Same as above, but for IPv6.
2852  * Cut-and-pasted from ip6_input.c.
2853  * XXX Should we update ip6stat, or not?
2854  */
2855 static int
2856 bridge_ip6_checkbasic(struct mbuf **mp)
2857 {
2858 	struct mbuf *m = *mp;
2859 	struct ip6_hdr *ip6;
2860 
2861 	/*
2862 	 * If the IPv6 header is not aligned, slurp it up into a new
2863 	 * mbuf with space for link headers, in the event we forward
2864 	 * it.  Otherwise, if it is aligned, make sure the entire base
2865 	 * IPv6 header is in the first mbuf of the chain.
2866 	 */
2867 	if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2868 		struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2869 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2870 		                  (max_linkhdr + 3) & ~3)) == NULL) {
2871 			/* XXXJRT new stat, please */
2872 			ip6_statinc(IP6_STAT_TOOSMALL);
2873 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2874 			goto bad;
2875 		}
2876 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2877 		struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2878 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2879 			ip6_statinc(IP6_STAT_TOOSMALL);
2880 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2881 			goto bad;
2882 		}
2883 	}
2884 
2885 	ip6 = mtod(m, struct ip6_hdr *);
2886 
2887 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2888 		ip6_statinc(IP6_STAT_BADVERS);
2889 		in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2890 		goto bad;
2891 	}
2892 
2893 	/* Checks out, proceed */
2894 	*mp = m;
2895 	return 0;
2896 
2897     bad:
2898 	*mp = m;
2899 	return -1;
2900 }
2901 # endif /* INET6 */
2902 #endif /* BRIDGE_IPF */
2903