xref: /netbsd-src/sys/net/if_bridge.c (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /*	$NetBSD: if_bridge.c,v 1.98 2015/04/16 08:54:15 ozaki-r Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.98 2015/04/16 08:54:15 ozaki-r Exp $");
84 
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #endif /* _KERNEL_OPT */
89 
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 #include <net/pktqueue.h>
112 
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h>		/* XXX */
123 
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h>	/* XXX */
128 #endif /* BRIDGE_IPF */
129 
130 /*
131  * Size of the route hash table.  Must be a power of two.
132  */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define	BRIDGE_RTHASH_SIZE		1024
135 #endif
136 
137 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
138 
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145 
146 /*
147  * Maximum number of addresses to cache.
148  */
149 #ifndef BRIDGE_RTABLE_MAX
150 #define	BRIDGE_RTABLE_MAX		100
151 #endif
152 
153 /*
154  * Spanning tree defaults.
155  */
156 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
157 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
158 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
159 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
160 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
161 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
162 #define	BSTP_DEFAULT_PATH_COST		55
163 
164 /*
165  * Timeout (in seconds) for entries learned dynamically.
166  */
167 #ifndef BRIDGE_RTABLE_TIMEOUT
168 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
169 #endif
170 
171 /*
172  * Number of seconds between walks of the route list.
173  */
174 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
175 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
176 #endif
177 
178 #define BRIDGE_RT_INTR_LOCK(_sc)	mutex_enter((_sc)->sc_rtlist_intr_lock)
179 #define BRIDGE_RT_INTR_UNLOCK(_sc)	mutex_exit((_sc)->sc_rtlist_intr_lock)
180 #define BRIDGE_RT_INTR_LOCKED(_sc)	mutex_owned((_sc)->sc_rtlist_intr_lock)
181 
182 #define BRIDGE_RT_LOCK(_sc)	if ((_sc)->sc_rtlist_lock) \
183 					mutex_enter((_sc)->sc_rtlist_lock)
184 #define BRIDGE_RT_UNLOCK(_sc)	if ((_sc)->sc_rtlist_lock) \
185 					mutex_exit((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_LOCKED(_sc)	(!(_sc)->sc_rtlist_lock || \
187 				 mutex_owned((_sc)->sc_rtlist_lock))
188 
189 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
190 				if ((_sc)->sc_rtlist_psz != NULL) \
191 					pserialize_perform((_sc)->sc_rtlist_psz);
192 
193 #ifdef BRIDGE_MPSAFE
194 #define BRIDGE_RT_RENTER(__s)	do { \
195 					if (!cpu_intr_p()) \
196 						__s = pserialize_read_enter(); \
197 					else \
198 						__s = splhigh(); \
199 				} while (0)
200 #define BRIDGE_RT_REXIT(__s)	do { \
201 					if (!cpu_intr_p()) \
202 						pserialize_read_exit(__s); \
203 					else \
204 						splx(__s); \
205 				} while (0)
206 #else /* BRIDGE_MPSAFE */
207 #define BRIDGE_RT_RENTER(__s)	do { __s = 0; } while (0)
208 #define BRIDGE_RT_REXIT(__s)	do { (void)__s; } while (0)
209 #endif /* BRIDGE_MPSAFE */
210 
211 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
212 
213 static struct pool bridge_rtnode_pool;
214 static struct work bridge_rtage_wk;
215 
216 void	bridgeattach(int);
217 
218 static int	bridge_clone_create(struct if_clone *, int);
219 static int	bridge_clone_destroy(struct ifnet *);
220 
221 static int	bridge_ioctl(struct ifnet *, u_long, void *);
222 static int	bridge_init(struct ifnet *);
223 static void	bridge_stop(struct ifnet *, int);
224 static void	bridge_start(struct ifnet *);
225 
226 static void	bridge_input(struct ifnet *, struct mbuf *);
227 static void	bridge_forward(void *);
228 
229 static void	bridge_timer(void *);
230 
231 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
232 				 struct mbuf *);
233 
234 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
235 				struct ifnet *, int, uint8_t);
236 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
237 static void	bridge_rttrim(struct bridge_softc *);
238 static void	bridge_rtage(struct bridge_softc *);
239 static void	bridge_rtage_work(struct work *, void *);
240 static void	bridge_rtflush(struct bridge_softc *, int);
241 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
242 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
243 
244 static void	bridge_rtable_init(struct bridge_softc *);
245 static void	bridge_rtable_fini(struct bridge_softc *);
246 
247 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
248 						  const uint8_t *);
249 static int	bridge_rtnode_insert(struct bridge_softc *,
250 				     struct bridge_rtnode *);
251 static void	bridge_rtnode_remove(struct bridge_softc *,
252 				     struct bridge_rtnode *);
253 static void	bridge_rtnode_destroy(struct bridge_rtnode *);
254 
255 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
256 						  const char *name);
257 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
258 						     struct ifnet *ifp);
259 static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *);
260 static void	bridge_delete_member(struct bridge_softc *,
261 				     struct bridge_iflist *);
262 static struct bridge_iflist *bridge_try_hold_bif(struct bridge_iflist *);
263 
264 static int	bridge_ioctl_add(struct bridge_softc *, void *);
265 static int	bridge_ioctl_del(struct bridge_softc *, void *);
266 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
267 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
268 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
269 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
270 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
271 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
272 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
273 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
274 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
275 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
276 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
277 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
278 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
279 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
280 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
281 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
282 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
283 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
284 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
285 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
286 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
287 #if defined(BRIDGE_IPF)
288 static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
289 static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
290 static int	bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
291 static int	bridge_ip_checkbasic(struct mbuf **mp);
292 # ifdef INET6
293 static int	bridge_ip6_checkbasic(struct mbuf **mp);
294 # endif /* INET6 */
295 #endif /* BRIDGE_IPF */
296 
297 static void bridge_sysctl_fwdq_setup(struct sysctllog **clog,
298     struct bridge_softc *sc);
299 
300 struct bridge_control {
301 	int	(*bc_func)(struct bridge_softc *, void *);
302 	int	bc_argsize;
303 	int	bc_flags;
304 };
305 
306 #define	BC_F_COPYIN		0x01	/* copy arguments in */
307 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
308 #define	BC_F_SUSER		0x04	/* do super-user check */
309 
310 static const struct bridge_control bridge_control_table[] = {
311 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
312 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
313 
314 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
315 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
316 
317 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
318 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
319 
320 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
321 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
322 
323 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
324 
325 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
326 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
327 
328 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
329 
330 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
331 
332 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
333 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
334 
335 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
336 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
337 
338 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
339 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
340 
341 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343 
344 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
345 
346 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
347 #if defined(BRIDGE_IPF)
348 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
349 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
350 #endif /* BRIDGE_IPF */
351 };
352 static const int bridge_control_table_size = __arraycount(bridge_control_table);
353 
354 static LIST_HEAD(, bridge_softc) bridge_list;
355 static kmutex_t bridge_list_lock;
356 
357 static struct if_clone bridge_cloner =
358     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
359 
360 /*
361  * bridgeattach:
362  *
363  *	Pseudo-device attach routine.
364  */
365 void
366 bridgeattach(int n)
367 {
368 
369 	pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
370 	    0, 0, 0, "brtpl", NULL, IPL_NET);
371 
372 	LIST_INIT(&bridge_list);
373 	mutex_init(&bridge_list_lock, MUTEX_DEFAULT, IPL_NET);
374 	if_clone_attach(&bridge_cloner);
375 }
376 
377 /*
378  * bridge_clone_create:
379  *
380  *	Create a new bridge instance.
381  */
382 static int
383 bridge_clone_create(struct if_clone *ifc, int unit)
384 {
385 	struct bridge_softc *sc;
386 	struct ifnet *ifp;
387 	int error, flags;
388 
389 	sc = kmem_zalloc(sizeof(*sc),  KM_SLEEP);
390 	ifp = &sc->sc_if;
391 
392 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
393 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
394 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
395 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
396 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
397 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
398 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
399 	sc->sc_filter_flags = 0;
400 
401 	/* Initialize our routing table. */
402 	bridge_rtable_init(sc);
403 
404 #ifdef BRIDGE_MPSAFE
405 	flags = WQ_MPSAFE;
406 #else
407 	flags = 0;
408 #endif
409 	error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
410 	    bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, flags);
411 	if (error)
412 		panic("%s: workqueue_create %d\n", __func__, error);
413 
414 	callout_init(&sc->sc_brcallout, 0);
415 	callout_init(&sc->sc_bstpcallout, 0);
416 
417 	LIST_INIT(&sc->sc_iflist);
418 #ifdef BRIDGE_MPSAFE
419 	sc->sc_iflist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
420 	sc->sc_iflist_psz = pserialize_create();
421 	sc->sc_iflist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
422 #else
423 	sc->sc_iflist_intr_lock = NULL;
424 	sc->sc_iflist_psz = NULL;
425 	sc->sc_iflist_lock = NULL;
426 #endif
427 	cv_init(&sc->sc_iflist_cv, "if_bridge_cv");
428 
429 	if_initname(ifp, ifc->ifc_name, unit);
430 	ifp->if_softc = sc;
431 	ifp->if_mtu = ETHERMTU;
432 	ifp->if_ioctl = bridge_ioctl;
433 	ifp->if_output = bridge_output;
434 	ifp->if_start = bridge_start;
435 	ifp->if_stop = bridge_stop;
436 	ifp->if_init = bridge_init;
437 	ifp->if_type = IFT_BRIDGE;
438 	ifp->if_addrlen = 0;
439 	ifp->if_dlt = DLT_EN10MB;
440 	ifp->if_hdrlen = ETHER_HDR_LEN;
441 
442 	sc->sc_fwd_pktq = pktq_create(IFQ_MAXLEN, bridge_forward, sc);
443 	KASSERT(sc->sc_fwd_pktq != NULL);
444 
445 	bridge_sysctl_fwdq_setup(&ifp->if_sysctl_log, sc);
446 
447 	if_attach(ifp);
448 
449 	if_alloc_sadl(ifp);
450 
451 	mutex_enter(&bridge_list_lock);
452 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
453 	mutex_exit(&bridge_list_lock);
454 
455 	return (0);
456 }
457 
458 /*
459  * bridge_clone_destroy:
460  *
461  *	Destroy a bridge instance.
462  */
463 static int
464 bridge_clone_destroy(struct ifnet *ifp)
465 {
466 	struct bridge_softc *sc = ifp->if_softc;
467 	struct bridge_iflist *bif;
468 	int s;
469 
470 	/* Must be called during IFF_RUNNING, i.e., before bridge_stop */
471 	pktq_barrier(sc->sc_fwd_pktq);
472 
473 	s = splnet();
474 
475 	bridge_stop(ifp, 1);
476 
477 	BRIDGE_LOCK(sc);
478 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
479 		bridge_delete_member(sc, bif);
480 	BRIDGE_UNLOCK(sc);
481 
482 	mutex_enter(&bridge_list_lock);
483 	LIST_REMOVE(sc, sc_list);
484 	mutex_exit(&bridge_list_lock);
485 
486 	splx(s);
487 
488 	if_detach(ifp);
489 
490 	/* Should be called after if_detach for safe */
491 	pktq_flush(sc->sc_fwd_pktq);
492 	pktq_destroy(sc->sc_fwd_pktq);
493 
494 	/* Tear down the routing table. */
495 	bridge_rtable_fini(sc);
496 
497 	cv_destroy(&sc->sc_iflist_cv);
498 	if (sc->sc_iflist_intr_lock)
499 		mutex_obj_free(sc->sc_iflist_intr_lock);
500 
501 	if (sc->sc_iflist_psz)
502 		pserialize_destroy(sc->sc_iflist_psz);
503 	if (sc->sc_iflist_lock)
504 		mutex_obj_free(sc->sc_iflist_lock);
505 
506 	workqueue_destroy(sc->sc_rtage_wq);
507 
508 	kmem_free(sc, sizeof(*sc));
509 
510 	return (0);
511 }
512 
513 static int
514 bridge_sysctl_fwdq_maxlen(SYSCTLFN_ARGS)
515 {
516 	struct sysctlnode node = *rnode;
517 	const struct bridge_softc *sc =	node.sysctl_data;
518 	return sysctl_pktq_maxlen(SYSCTLFN_CALL(rnode), sc->sc_fwd_pktq);
519 }
520 
521 #define	SYSCTL_BRIDGE_PKTQ(cn, c)					\
522 	static int							\
523 	bridge_sysctl_fwdq_##cn(SYSCTLFN_ARGS)				\
524 	{								\
525 		struct sysctlnode node = *rnode;			\
526 		const struct bridge_softc *sc =	node.sysctl_data;	\
527 		return sysctl_pktq_count(SYSCTLFN_CALL(rnode),		\
528 					 sc->sc_fwd_pktq, c);		\
529 	}
530 
531 SYSCTL_BRIDGE_PKTQ(items, PKTQ_NITEMS)
532 SYSCTL_BRIDGE_PKTQ(drops, PKTQ_DROPS)
533 
534 static void
535 bridge_sysctl_fwdq_setup(struct sysctllog **clog, struct bridge_softc *sc)
536 {
537 	const struct sysctlnode *cnode, *rnode;
538 	sysctlfn len_func = NULL, maxlen_func = NULL, drops_func = NULL;
539 	const char *ifname = sc->sc_if.if_xname;
540 
541 	len_func = bridge_sysctl_fwdq_items;
542 	maxlen_func = bridge_sysctl_fwdq_maxlen;
543 	drops_func = bridge_sysctl_fwdq_drops;
544 
545 	if (sysctl_createv(clog, 0, NULL, &rnode,
546 			   CTLFLAG_PERMANENT,
547 			   CTLTYPE_NODE, "interfaces",
548 			   SYSCTL_DESCR("Per-interface controls"),
549 			   NULL, 0, NULL, 0,
550 			   CTL_NET, CTL_CREATE, CTL_EOL) != 0)
551 		goto bad;
552 
553 	if (sysctl_createv(clog, 0, &rnode, &rnode,
554 			   CTLFLAG_PERMANENT,
555 			   CTLTYPE_NODE, ifname,
556 			   SYSCTL_DESCR("Interface controls"),
557 			   NULL, 0, NULL, 0,
558 			   CTL_CREATE, CTL_EOL) != 0)
559 		goto bad;
560 
561 	if (sysctl_createv(clog, 0, &rnode, &rnode,
562 			   CTLFLAG_PERMANENT,
563 			   CTLTYPE_NODE, "fwdq",
564 			   SYSCTL_DESCR("Protocol input queue controls"),
565 			   NULL, 0, NULL, 0,
566 			   CTL_CREATE, CTL_EOL) != 0)
567 		goto bad;
568 
569 	if (sysctl_createv(clog, 0, &rnode, &cnode,
570 			   CTLFLAG_PERMANENT,
571 			   CTLTYPE_INT, "len",
572 			   SYSCTL_DESCR("Current forwarding queue length"),
573 			   len_func, 0, (void *)sc, 0,
574 			   CTL_CREATE, IFQCTL_LEN, CTL_EOL) != 0)
575 		goto bad;
576 
577 	if (sysctl_createv(clog, 0, &rnode, &cnode,
578 			   CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
579 			   CTLTYPE_INT, "maxlen",
580 			   SYSCTL_DESCR("Maximum allowed forwarding queue length"),
581 			   maxlen_func, 0, (void *)sc, 0,
582 			   CTL_CREATE, IFQCTL_MAXLEN, CTL_EOL) != 0)
583 		goto bad;
584 
585 	if (sysctl_createv(clog, 0, &rnode, &cnode,
586 			   CTLFLAG_PERMANENT,
587 			   CTLTYPE_INT, "drops",
588 			   SYSCTL_DESCR("Packets dropped due to full forwarding queue"),
589 			   drops_func, 0, (void *)sc, 0,
590 			   CTL_CREATE, IFQCTL_DROPS, CTL_EOL) != 0)
591 		goto bad;
592 
593 	return;
594 bad:
595 	aprint_error("%s: could not attach sysctl nodes\n", ifname);
596 	return;
597 }
598 
599 /*
600  * bridge_ioctl:
601  *
602  *	Handle a control request from the operator.
603  */
604 static int
605 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
606 {
607 	struct bridge_softc *sc = ifp->if_softc;
608 	struct lwp *l = curlwp;	/* XXX */
609 	union {
610 		struct ifbreq ifbreq;
611 		struct ifbifconf ifbifconf;
612 		struct ifbareq ifbareq;
613 		struct ifbaconf ifbaconf;
614 		struct ifbrparam ifbrparam;
615 	} args;
616 	struct ifdrv *ifd = (struct ifdrv *) data;
617 	const struct bridge_control *bc = NULL; /* XXXGCC */
618 	int s, error = 0;
619 
620 	/* Authorize command before calling splnet(). */
621 	switch (cmd) {
622 	case SIOCGDRVSPEC:
623 	case SIOCSDRVSPEC:
624 		if (ifd->ifd_cmd >= bridge_control_table_size) {
625 			error = EINVAL;
626 			return error;
627 		}
628 
629 		bc = &bridge_control_table[ifd->ifd_cmd];
630 
631 		/* We only care about BC_F_SUSER at this point. */
632 		if ((bc->bc_flags & BC_F_SUSER) == 0)
633 			break;
634 
635 		error = kauth_authorize_network(l->l_cred,
636 		    KAUTH_NETWORK_INTERFACE_BRIDGE,
637 		    cmd == SIOCGDRVSPEC ?
638 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
639 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
640 		     ifd, NULL, NULL);
641 		if (error)
642 			return (error);
643 
644 		break;
645 	}
646 
647 	s = splnet();
648 
649 	switch (cmd) {
650 	case SIOCGDRVSPEC:
651 	case SIOCSDRVSPEC:
652 		KASSERT(bc != NULL);
653 		if (cmd == SIOCGDRVSPEC &&
654 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
655 			error = EINVAL;
656 			break;
657 		}
658 		else if (cmd == SIOCSDRVSPEC &&
659 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
660 			error = EINVAL;
661 			break;
662 		}
663 
664 		/* BC_F_SUSER is checked above, before splnet(). */
665 
666 		if (ifd->ifd_len != bc->bc_argsize ||
667 		    ifd->ifd_len > sizeof(args)) {
668 			error = EINVAL;
669 			break;
670 		}
671 
672 		memset(&args, 0, sizeof(args));
673 		if (bc->bc_flags & BC_F_COPYIN) {
674 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
675 			if (error)
676 				break;
677 		}
678 
679 		error = (*bc->bc_func)(sc, &args);
680 		if (error)
681 			break;
682 
683 		if (bc->bc_flags & BC_F_COPYOUT)
684 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
685 
686 		break;
687 
688 	case SIOCSIFFLAGS:
689 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
690 			break;
691 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
692 		case IFF_RUNNING:
693 			/*
694 			 * If interface is marked down and it is running,
695 			 * then stop and disable it.
696 			 */
697 			(*ifp->if_stop)(ifp, 1);
698 			break;
699 		case IFF_UP:
700 			/*
701 			 * If interface is marked up and it is stopped, then
702 			 * start it.
703 			 */
704 			error = (*ifp->if_init)(ifp);
705 			break;
706 		default:
707 			break;
708 		}
709 		break;
710 
711 	case SIOCSIFMTU:
712 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
713 			error = 0;
714 		break;
715 
716 	default:
717 		error = ifioctl_common(ifp, cmd, data);
718 		break;
719 	}
720 
721 	splx(s);
722 
723 	return (error);
724 }
725 
726 /*
727  * bridge_lookup_member:
728  *
729  *	Lookup a bridge member interface.
730  */
731 static struct bridge_iflist *
732 bridge_lookup_member(struct bridge_softc *sc, const char *name)
733 {
734 	struct bridge_iflist *bif;
735 	struct ifnet *ifp;
736 	int s;
737 
738 	BRIDGE_PSZ_RENTER(s);
739 
740 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
741 		ifp = bif->bif_ifp;
742 		if (strcmp(ifp->if_xname, name) == 0)
743 			break;
744 	}
745 	bif = bridge_try_hold_bif(bif);
746 
747 	BRIDGE_PSZ_REXIT(s);
748 
749 	return bif;
750 }
751 
752 /*
753  * bridge_lookup_member_if:
754  *
755  *	Lookup a bridge member interface by ifnet*.
756  */
757 static struct bridge_iflist *
758 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
759 {
760 	struct bridge_iflist *bif;
761 	int s;
762 
763 	BRIDGE_PSZ_RENTER(s);
764 
765 	bif = member_ifp->if_bridgeif;
766 	bif = bridge_try_hold_bif(bif);
767 
768 	BRIDGE_PSZ_REXIT(s);
769 
770 	return bif;
771 }
772 
773 static struct bridge_iflist *
774 bridge_try_hold_bif(struct bridge_iflist *bif)
775 {
776 #ifdef BRIDGE_MPSAFE
777 	if (bif != NULL) {
778 		if (bif->bif_waiting)
779 			bif = NULL;
780 		else
781 			atomic_inc_32(&bif->bif_refs);
782 	}
783 #endif
784 	return bif;
785 }
786 
787 /*
788  * bridge_release_member:
789  *
790  *	Release the specified member interface.
791  */
792 static void
793 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif)
794 {
795 #ifdef BRIDGE_MPSAFE
796 	uint32_t refs;
797 
798 	refs = atomic_dec_uint_nv(&bif->bif_refs);
799 	if (__predict_false(refs == 0 && bif->bif_waiting)) {
800 		BRIDGE_INTR_LOCK(sc);
801 		cv_broadcast(&sc->sc_iflist_cv);
802 		BRIDGE_INTR_UNLOCK(sc);
803 	}
804 #else
805 	(void)sc;
806 	(void)bif;
807 #endif
808 }
809 
810 /*
811  * bridge_delete_member:
812  *
813  *	Delete the specified member interface.
814  */
815 static void
816 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
817 {
818 	struct ifnet *ifs = bif->bif_ifp;
819 
820 	KASSERT(BRIDGE_LOCKED(sc));
821 
822 	ifs->if_input = ether_input;
823 	ifs->if_bridge = NULL;
824 	ifs->if_bridgeif = NULL;
825 
826 	LIST_REMOVE(bif, bif_next);
827 
828 	BRIDGE_PSZ_PERFORM(sc);
829 
830 	BRIDGE_UNLOCK(sc);
831 
832 #ifdef BRIDGE_MPSAFE
833 	BRIDGE_INTR_LOCK(sc);
834 	bif->bif_waiting = true;
835 	membar_sync();
836 	while (bif->bif_refs > 0) {
837 		aprint_debug("%s: cv_wait on iflist\n", __func__);
838 		cv_wait(&sc->sc_iflist_cv, sc->sc_iflist_intr_lock);
839 	}
840 	bif->bif_waiting = false;
841 	BRIDGE_INTR_UNLOCK(sc);
842 #endif
843 
844 	kmem_free(bif, sizeof(*bif));
845 
846 	BRIDGE_LOCK(sc);
847 }
848 
849 static int
850 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
851 {
852 	struct ifbreq *req = arg;
853 	struct bridge_iflist *bif = NULL;
854 	struct ifnet *ifs;
855 	int error = 0;
856 
857 	ifs = ifunit(req->ifbr_ifsname);
858 	if (ifs == NULL)
859 		return (ENOENT);
860 
861 	if (sc->sc_if.if_mtu != ifs->if_mtu)
862 		return (EINVAL);
863 
864 	if (ifs->if_bridge == sc)
865 		return (EEXIST);
866 
867 	if (ifs->if_bridge != NULL)
868 		return (EBUSY);
869 
870 	if (ifs->if_input != ether_input)
871 		return EINVAL;
872 
873 	/* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
874 	if ((ifs->if_flags & IFF_SIMPLEX) == 0)
875 		return EINVAL;
876 
877 	bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
878 
879 	switch (ifs->if_type) {
880 	case IFT_ETHER:
881 		/*
882 		 * Place the interface into promiscuous mode.
883 		 */
884 		error = ifpromisc(ifs, 1);
885 		if (error)
886 			goto out;
887 		break;
888 	default:
889 		error = EINVAL;
890 		goto out;
891 	}
892 
893 	bif->bif_ifp = ifs;
894 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
895 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
896 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
897 	bif->bif_refs = 0;
898 	bif->bif_waiting = false;
899 
900 	BRIDGE_LOCK(sc);
901 
902 	ifs->if_bridge = sc;
903 	ifs->if_bridgeif = bif;
904 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
905 	ifs->if_input = bridge_input;
906 
907 	BRIDGE_UNLOCK(sc);
908 
909 	if (sc->sc_if.if_flags & IFF_RUNNING)
910 		bstp_initialization(sc);
911 	else
912 		bstp_stop(sc);
913 
914  out:
915 	if (error) {
916 		if (bif != NULL)
917 			kmem_free(bif, sizeof(*bif));
918 	}
919 	return (error);
920 }
921 
922 static int
923 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
924 {
925 	struct ifbreq *req = arg;
926 	const char *name = req->ifbr_ifsname;
927 	struct bridge_iflist *bif;
928 	struct ifnet *ifs;
929 
930 	BRIDGE_LOCK(sc);
931 
932 	/*
933 	 * Don't use bridge_lookup_member. We want to get a member
934 	 * with bif_refs == 0.
935 	 */
936 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
937 		ifs = bif->bif_ifp;
938 		if (strcmp(ifs->if_xname, name) == 0)
939 			break;
940 	}
941 
942 	if (bif == NULL) {
943 		BRIDGE_UNLOCK(sc);
944 		return ENOENT;
945 	}
946 
947 	bridge_delete_member(sc, bif);
948 
949 	BRIDGE_UNLOCK(sc);
950 
951 	switch (ifs->if_type) {
952 	case IFT_ETHER:
953 		/*
954 		 * Take the interface out of promiscuous mode.
955 		 * Don't call it with holding a spin lock.
956 		 */
957 		(void) ifpromisc(ifs, 0);
958 		break;
959 	default:
960 #ifdef DIAGNOSTIC
961 		panic("bridge_delete_member: impossible");
962 #endif
963 		break;
964 	}
965 
966 	bridge_rtdelete(sc, ifs);
967 
968 	if (sc->sc_if.if_flags & IFF_RUNNING)
969 		bstp_initialization(sc);
970 
971 	return 0;
972 }
973 
974 static int
975 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
976 {
977 	struct ifbreq *req = arg;
978 	struct bridge_iflist *bif;
979 
980 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
981 	if (bif == NULL)
982 		return (ENOENT);
983 
984 	req->ifbr_ifsflags = bif->bif_flags;
985 	req->ifbr_state = bif->bif_state;
986 	req->ifbr_priority = bif->bif_priority;
987 	req->ifbr_path_cost = bif->bif_path_cost;
988 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
989 
990 	bridge_release_member(sc, bif);
991 
992 	return (0);
993 }
994 
995 static int
996 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
997 {
998 	struct ifbreq *req = arg;
999 	struct bridge_iflist *bif;
1000 
1001 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1002 	if (bif == NULL)
1003 		return (ENOENT);
1004 
1005 	if (req->ifbr_ifsflags & IFBIF_STP) {
1006 		switch (bif->bif_ifp->if_type) {
1007 		case IFT_ETHER:
1008 			/* These can do spanning tree. */
1009 			break;
1010 
1011 		default:
1012 			/* Nothing else can. */
1013 			bridge_release_member(sc, bif);
1014 			return (EINVAL);
1015 		}
1016 	}
1017 
1018 	bif->bif_flags = req->ifbr_ifsflags;
1019 
1020 	bridge_release_member(sc, bif);
1021 
1022 	if (sc->sc_if.if_flags & IFF_RUNNING)
1023 		bstp_initialization(sc);
1024 
1025 	return (0);
1026 }
1027 
1028 static int
1029 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1030 {
1031 	struct ifbrparam *param = arg;
1032 
1033 	sc->sc_brtmax = param->ifbrp_csize;
1034 	bridge_rttrim(sc);
1035 
1036 	return (0);
1037 }
1038 
1039 static int
1040 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1041 {
1042 	struct ifbrparam *param = arg;
1043 
1044 	param->ifbrp_csize = sc->sc_brtmax;
1045 
1046 	return (0);
1047 }
1048 
1049 static int
1050 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1051 {
1052 	struct ifbifconf *bifc = arg;
1053 	struct bridge_iflist *bif;
1054 	struct ifbreq *breqs;
1055 	int i, count, error = 0;
1056 
1057 retry:
1058 	BRIDGE_LOCK(sc);
1059 	count = 0;
1060 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1061 		count++;
1062 	BRIDGE_UNLOCK(sc);
1063 
1064 	if (count == 0) {
1065 		bifc->ifbic_len = 0;
1066 		return 0;
1067 	}
1068 
1069 	if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1070 		/* Tell that a larger buffer is needed */
1071 		bifc->ifbic_len = sizeof(*breqs) * count;
1072 		return 0;
1073 	}
1074 
1075 	breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1076 
1077 	BRIDGE_LOCK(sc);
1078 
1079 	i = 0;
1080 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1081 		i++;
1082 	if (i > count) {
1083 		/*
1084 		 * The number of members has been increased.
1085 		 * We need more memory!
1086 		 */
1087 		BRIDGE_UNLOCK(sc);
1088 		kmem_free(breqs, sizeof(*breqs) * count);
1089 		goto retry;
1090 	}
1091 
1092 	i = 0;
1093 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1094 		struct ifbreq *breq = &breqs[i++];
1095 		memset(breq, 0, sizeof(*breq));
1096 
1097 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1098 		    sizeof(breq->ifbr_ifsname));
1099 		breq->ifbr_ifsflags = bif->bif_flags;
1100 		breq->ifbr_state = bif->bif_state;
1101 		breq->ifbr_priority = bif->bif_priority;
1102 		breq->ifbr_path_cost = bif->bif_path_cost;
1103 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1104 	}
1105 
1106 	/* Don't call copyout with holding the mutex */
1107 	BRIDGE_UNLOCK(sc);
1108 
1109 	for (i = 0; i < count; i++) {
1110 		error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1111 		if (error)
1112 			break;
1113 	}
1114 	bifc->ifbic_len = sizeof(*breqs) * i;
1115 
1116 	kmem_free(breqs, sizeof(*breqs) * count);
1117 
1118 	return error;
1119 }
1120 
1121 static int
1122 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1123 {
1124 	struct ifbaconf *bac = arg;
1125 	struct bridge_rtnode *brt;
1126 	struct ifbareq bareq;
1127 	int count = 0, error = 0, len;
1128 
1129 	if (bac->ifbac_len == 0)
1130 		return (0);
1131 
1132 	BRIDGE_RT_INTR_LOCK(sc);
1133 
1134 	len = bac->ifbac_len;
1135 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1136 		if (len < sizeof(bareq))
1137 			goto out;
1138 		memset(&bareq, 0, sizeof(bareq));
1139 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1140 		    sizeof(bareq.ifba_ifsname));
1141 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1142 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1143 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1144 		} else
1145 			bareq.ifba_expire = 0;
1146 		bareq.ifba_flags = brt->brt_flags;
1147 
1148 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1149 		if (error)
1150 			goto out;
1151 		count++;
1152 		len -= sizeof(bareq);
1153 	}
1154  out:
1155 	BRIDGE_RT_INTR_UNLOCK(sc);
1156 
1157 	bac->ifbac_len = sizeof(bareq) * count;
1158 	return (error);
1159 }
1160 
1161 static int
1162 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1163 {
1164 	struct ifbareq *req = arg;
1165 	struct bridge_iflist *bif;
1166 	int error;
1167 
1168 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1169 	if (bif == NULL)
1170 		return (ENOENT);
1171 
1172 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1173 	    req->ifba_flags);
1174 
1175 	bridge_release_member(sc, bif);
1176 
1177 	return (error);
1178 }
1179 
1180 static int
1181 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1182 {
1183 	struct ifbrparam *param = arg;
1184 
1185 	sc->sc_brttimeout = param->ifbrp_ctime;
1186 
1187 	return (0);
1188 }
1189 
1190 static int
1191 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1192 {
1193 	struct ifbrparam *param = arg;
1194 
1195 	param->ifbrp_ctime = sc->sc_brttimeout;
1196 
1197 	return (0);
1198 }
1199 
1200 static int
1201 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1202 {
1203 	struct ifbareq *req = arg;
1204 
1205 	return (bridge_rtdaddr(sc, req->ifba_dst));
1206 }
1207 
1208 static int
1209 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1210 {
1211 	struct ifbreq *req = arg;
1212 
1213 	bridge_rtflush(sc, req->ifbr_ifsflags);
1214 
1215 	return (0);
1216 }
1217 
1218 static int
1219 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1220 {
1221 	struct ifbrparam *param = arg;
1222 
1223 	param->ifbrp_prio = sc->sc_bridge_priority;
1224 
1225 	return (0);
1226 }
1227 
1228 static int
1229 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1230 {
1231 	struct ifbrparam *param = arg;
1232 
1233 	sc->sc_bridge_priority = param->ifbrp_prio;
1234 
1235 	if (sc->sc_if.if_flags & IFF_RUNNING)
1236 		bstp_initialization(sc);
1237 
1238 	return (0);
1239 }
1240 
1241 static int
1242 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1243 {
1244 	struct ifbrparam *param = arg;
1245 
1246 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1247 
1248 	return (0);
1249 }
1250 
1251 static int
1252 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1253 {
1254 	struct ifbrparam *param = arg;
1255 
1256 	if (param->ifbrp_hellotime == 0)
1257 		return (EINVAL);
1258 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1259 
1260 	if (sc->sc_if.if_flags & IFF_RUNNING)
1261 		bstp_initialization(sc);
1262 
1263 	return (0);
1264 }
1265 
1266 static int
1267 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1268 {
1269 	struct ifbrparam *param = arg;
1270 
1271 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1272 
1273 	return (0);
1274 }
1275 
1276 static int
1277 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1278 {
1279 	struct ifbrparam *param = arg;
1280 
1281 	if (param->ifbrp_fwddelay == 0)
1282 		return (EINVAL);
1283 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1284 
1285 	if (sc->sc_if.if_flags & IFF_RUNNING)
1286 		bstp_initialization(sc);
1287 
1288 	return (0);
1289 }
1290 
1291 static int
1292 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1293 {
1294 	struct ifbrparam *param = arg;
1295 
1296 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1297 
1298 	return (0);
1299 }
1300 
1301 static int
1302 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1303 {
1304 	struct ifbrparam *param = arg;
1305 
1306 	if (param->ifbrp_maxage == 0)
1307 		return (EINVAL);
1308 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1309 
1310 	if (sc->sc_if.if_flags & IFF_RUNNING)
1311 		bstp_initialization(sc);
1312 
1313 	return (0);
1314 }
1315 
1316 static int
1317 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1318 {
1319 	struct ifbreq *req = arg;
1320 	struct bridge_iflist *bif;
1321 
1322 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1323 	if (bif == NULL)
1324 		return (ENOENT);
1325 
1326 	bif->bif_priority = req->ifbr_priority;
1327 
1328 	if (sc->sc_if.if_flags & IFF_RUNNING)
1329 		bstp_initialization(sc);
1330 
1331 	bridge_release_member(sc, bif);
1332 
1333 	return (0);
1334 }
1335 
1336 #if defined(BRIDGE_IPF)
1337 static int
1338 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1339 {
1340 	struct ifbrparam *param = arg;
1341 
1342 	param->ifbrp_filter = sc->sc_filter_flags;
1343 
1344 	return (0);
1345 }
1346 
1347 static int
1348 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1349 {
1350 	struct ifbrparam *param = arg;
1351 	uint32_t nflags, oflags;
1352 
1353 	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1354 		return (EINVAL);
1355 
1356 	nflags = param->ifbrp_filter;
1357 	oflags = sc->sc_filter_flags;
1358 
1359 	if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1360 		pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1361 			sc->sc_if.if_pfil);
1362 	}
1363 	if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1364 		pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1365 			sc->sc_if.if_pfil);
1366 	}
1367 
1368 	sc->sc_filter_flags = nflags;
1369 
1370 	return (0);
1371 }
1372 #endif /* BRIDGE_IPF */
1373 
1374 static int
1375 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1376 {
1377 	struct ifbreq *req = arg;
1378 	struct bridge_iflist *bif;
1379 
1380 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1381 	if (bif == NULL)
1382 		return (ENOENT);
1383 
1384 	bif->bif_path_cost = req->ifbr_path_cost;
1385 
1386 	if (sc->sc_if.if_flags & IFF_RUNNING)
1387 		bstp_initialization(sc);
1388 
1389 	bridge_release_member(sc, bif);
1390 
1391 	return (0);
1392 }
1393 
1394 /*
1395  * bridge_ifdetach:
1396  *
1397  *	Detach an interface from a bridge.  Called when a member
1398  *	interface is detaching.
1399  */
1400 void
1401 bridge_ifdetach(struct ifnet *ifp)
1402 {
1403 	struct bridge_softc *sc = ifp->if_bridge;
1404 	struct ifbreq breq;
1405 
1406 	/* ioctl_lock should prevent this from happening */
1407 	KASSERT(sc != NULL);
1408 
1409 	memset(&breq, 0, sizeof(breq));
1410 	strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1411 
1412 	(void) bridge_ioctl_del(sc, &breq);
1413 }
1414 
1415 /*
1416  * bridge_init:
1417  *
1418  *	Initialize a bridge interface.
1419  */
1420 static int
1421 bridge_init(struct ifnet *ifp)
1422 {
1423 	struct bridge_softc *sc = ifp->if_softc;
1424 
1425 	if (ifp->if_flags & IFF_RUNNING)
1426 		return (0);
1427 
1428 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1429 	    bridge_timer, sc);
1430 
1431 	ifp->if_flags |= IFF_RUNNING;
1432 	bstp_initialization(sc);
1433 	return (0);
1434 }
1435 
1436 /*
1437  * bridge_stop:
1438  *
1439  *	Stop the bridge interface.
1440  */
1441 static void
1442 bridge_stop(struct ifnet *ifp, int disable)
1443 {
1444 	struct bridge_softc *sc = ifp->if_softc;
1445 
1446 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1447 		return;
1448 
1449 	callout_stop(&sc->sc_brcallout);
1450 	bstp_stop(sc);
1451 
1452 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1453 
1454 	ifp->if_flags &= ~IFF_RUNNING;
1455 }
1456 
1457 /*
1458  * bridge_enqueue:
1459  *
1460  *	Enqueue a packet on a bridge member interface.
1461  */
1462 void
1463 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1464     int runfilt)
1465 {
1466 	ALTQ_DECL(struct altq_pktattr pktattr;)
1467 	int len, error;
1468 	short mflags;
1469 
1470 	/*
1471 	 * Clear any in-bound checksum flags for this packet.
1472 	 */
1473 	m->m_pkthdr.csum_flags = 0;
1474 
1475 	if (runfilt) {
1476 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1477 		    dst_ifp, PFIL_OUT) != 0) {
1478 			if (m != NULL)
1479 				m_freem(m);
1480 			return;
1481 		}
1482 		if (m == NULL)
1483 			return;
1484 	}
1485 
1486 #ifdef ALTQ
1487 	/*
1488 	 * If ALTQ is enabled on the member interface, do
1489 	 * classification; the queueing discipline might
1490 	 * not require classification, but might require
1491 	 * the address family/header pointer in the pktattr.
1492 	 */
1493 	if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1494 		/* XXX IFT_ETHER */
1495 		altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
1496 	}
1497 #endif /* ALTQ */
1498 
1499 	len = m->m_pkthdr.len;
1500 	m->m_flags |= M_PROTO1;
1501 	mflags = m->m_flags;
1502 
1503 	IFQ_ENQUEUE(&dst_ifp->if_snd, m, &pktattr, error);
1504 
1505 	if (error) {
1506 		/* mbuf is already freed */
1507 		sc->sc_if.if_oerrors++;
1508 		return;
1509 	}
1510 
1511 	sc->sc_if.if_opackets++;
1512 	sc->sc_if.if_obytes += len;
1513 
1514 	dst_ifp->if_obytes += len;
1515 
1516 	if (mflags & M_MCAST) {
1517 		sc->sc_if.if_omcasts++;
1518 		dst_ifp->if_omcasts++;
1519 	}
1520 
1521 	if ((dst_ifp->if_flags & IFF_OACTIVE) == 0)
1522 		(*dst_ifp->if_start)(dst_ifp);
1523 }
1524 
1525 /*
1526  * bridge_output:
1527  *
1528  *	Send output from a bridge member interface.  This
1529  *	performs the bridging function for locally originated
1530  *	packets.
1531  *
1532  *	The mbuf has the Ethernet header already attached.  We must
1533  *	enqueue or free the mbuf before returning.
1534  */
1535 int
1536 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1537     struct rtentry *rt)
1538 {
1539 	struct ether_header *eh;
1540 	struct ifnet *dst_if;
1541 	struct bridge_softc *sc;
1542 #ifndef BRIDGE_MPSAFE
1543 	int s;
1544 #endif
1545 
1546 	if (m->m_len < ETHER_HDR_LEN) {
1547 		m = m_pullup(m, ETHER_HDR_LEN);
1548 		if (m == NULL)
1549 			return (0);
1550 	}
1551 
1552 	eh = mtod(m, struct ether_header *);
1553 	sc = ifp->if_bridge;
1554 
1555 #ifndef BRIDGE_MPSAFE
1556 	s = splnet();
1557 #endif
1558 
1559 	/*
1560 	 * If bridge is down, but the original output interface is up,
1561 	 * go ahead and send out that interface.  Otherwise, the packet
1562 	 * is dropped below.
1563 	 */
1564 	if (__predict_false(sc == NULL) ||
1565 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1566 		dst_if = ifp;
1567 		goto sendunicast;
1568 	}
1569 
1570 	/*
1571 	 * If the packet is a multicast, or we don't know a better way to
1572 	 * get there, send to all interfaces.
1573 	 */
1574 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1575 		dst_if = NULL;
1576 	else
1577 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1578 	if (dst_if == NULL) {
1579 		struct bridge_iflist *bif;
1580 		struct mbuf *mc;
1581 		int used = 0;
1582 		int ss;
1583 
1584 		BRIDGE_PSZ_RENTER(ss);
1585 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1586 			bif = bridge_try_hold_bif(bif);
1587 			if (bif == NULL)
1588 				continue;
1589 			BRIDGE_PSZ_REXIT(ss);
1590 
1591 			dst_if = bif->bif_ifp;
1592 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1593 				goto next;
1594 
1595 			/*
1596 			 * If this is not the original output interface,
1597 			 * and the interface is participating in spanning
1598 			 * tree, make sure the port is in a state that
1599 			 * allows forwarding.
1600 			 */
1601 			if (dst_if != ifp &&
1602 			    (bif->bif_flags & IFBIF_STP) != 0) {
1603 				switch (bif->bif_state) {
1604 				case BSTP_IFSTATE_BLOCKING:
1605 				case BSTP_IFSTATE_LISTENING:
1606 				case BSTP_IFSTATE_DISABLED:
1607 					goto next;
1608 				}
1609 			}
1610 
1611 			if (LIST_NEXT(bif, bif_next) == NULL) {
1612 				used = 1;
1613 				mc = m;
1614 			} else {
1615 				mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1616 				if (mc == NULL) {
1617 					sc->sc_if.if_oerrors++;
1618 					goto next;
1619 				}
1620 			}
1621 
1622 			bridge_enqueue(sc, dst_if, mc, 0);
1623 next:
1624 			bridge_release_member(sc, bif);
1625 			BRIDGE_PSZ_RENTER(ss);
1626 		}
1627 		BRIDGE_PSZ_REXIT(ss);
1628 
1629 		if (used == 0)
1630 			m_freem(m);
1631 #ifndef BRIDGE_MPSAFE
1632 		splx(s);
1633 #endif
1634 		return (0);
1635 	}
1636 
1637  sendunicast:
1638 	/*
1639 	 * XXX Spanning tree consideration here?
1640 	 */
1641 
1642 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1643 		m_freem(m);
1644 #ifndef BRIDGE_MPSAFE
1645 		splx(s);
1646 #endif
1647 		return (0);
1648 	}
1649 
1650 	bridge_enqueue(sc, dst_if, m, 0);
1651 
1652 #ifndef BRIDGE_MPSAFE
1653 	splx(s);
1654 #endif
1655 	return (0);
1656 }
1657 
1658 /*
1659  * bridge_start:
1660  *
1661  *	Start output on a bridge.
1662  *
1663  *	NOTE: This routine should never be called in this implementation.
1664  */
1665 static void
1666 bridge_start(struct ifnet *ifp)
1667 {
1668 
1669 	printf("%s: bridge_start() called\n", ifp->if_xname);
1670 }
1671 
1672 /*
1673  * bridge_forward:
1674  *
1675  *	The forwarding function of the bridge.
1676  */
1677 static void
1678 bridge_forward(void *v)
1679 {
1680 	struct bridge_softc *sc = v;
1681 	struct mbuf *m;
1682 	struct bridge_iflist *bif;
1683 	struct ifnet *src_if, *dst_if;
1684 	struct ether_header *eh;
1685 #ifndef BRIDGE_MPSAFE
1686 	int s;
1687 
1688 	KERNEL_LOCK(1, NULL);
1689 	mutex_enter(softnet_lock);
1690 #endif
1691 
1692 	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1693 #ifndef BRIDGE_MPSAFE
1694 		mutex_exit(softnet_lock);
1695 		KERNEL_UNLOCK_ONE(NULL);
1696 #endif
1697 		return;
1698 	}
1699 
1700 #ifndef BRIDGE_MPSAFE
1701 	s = splnet();
1702 #endif
1703 	while ((m = pktq_dequeue(sc->sc_fwd_pktq)) != NULL) {
1704 		src_if = m->m_pkthdr.rcvif;
1705 
1706 		sc->sc_if.if_ipackets++;
1707 		sc->sc_if.if_ibytes += m->m_pkthdr.len;
1708 
1709 		/*
1710 		 * Look up the bridge_iflist.
1711 		 */
1712 		bif = bridge_lookup_member_if(sc, src_if);
1713 		if (bif == NULL) {
1714 			/* Interface is not a bridge member (anymore?) */
1715 			m_freem(m);
1716 			continue;
1717 		}
1718 
1719 		if (bif->bif_flags & IFBIF_STP) {
1720 			switch (bif->bif_state) {
1721 			case BSTP_IFSTATE_BLOCKING:
1722 			case BSTP_IFSTATE_LISTENING:
1723 			case BSTP_IFSTATE_DISABLED:
1724 				m_freem(m);
1725 				bridge_release_member(sc, bif);
1726 				continue;
1727 			}
1728 		}
1729 
1730 		eh = mtod(m, struct ether_header *);
1731 
1732 		/*
1733 		 * If the interface is learning, and the source
1734 		 * address is valid and not multicast, record
1735 		 * the address.
1736 		 */
1737 		if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1738 		    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1739 		    (eh->ether_shost[0] == 0 &&
1740 		     eh->ether_shost[1] == 0 &&
1741 		     eh->ether_shost[2] == 0 &&
1742 		     eh->ether_shost[3] == 0 &&
1743 		     eh->ether_shost[4] == 0 &&
1744 		     eh->ether_shost[5] == 0) == 0) {
1745 			(void) bridge_rtupdate(sc, eh->ether_shost,
1746 			    src_if, 0, IFBAF_DYNAMIC);
1747 		}
1748 
1749 		if ((bif->bif_flags & IFBIF_STP) != 0 &&
1750 		    bif->bif_state == BSTP_IFSTATE_LEARNING) {
1751 			m_freem(m);
1752 			bridge_release_member(sc, bif);
1753 			continue;
1754 		}
1755 
1756 		bridge_release_member(sc, bif);
1757 
1758 		/*
1759 		 * At this point, the port either doesn't participate
1760 		 * in spanning tree or it is in the forwarding state.
1761 		 */
1762 
1763 		/*
1764 		 * If the packet is unicast, destined for someone on
1765 		 * "this" side of the bridge, drop it.
1766 		 */
1767 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1768 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1769 			if (src_if == dst_if) {
1770 				m_freem(m);
1771 				continue;
1772 			}
1773 		} else {
1774 			/* ...forward it to all interfaces. */
1775 			sc->sc_if.if_imcasts++;
1776 			dst_if = NULL;
1777 		}
1778 
1779 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1780 		    m->m_pkthdr.rcvif, PFIL_IN) != 0) {
1781 			if (m != NULL)
1782 				m_freem(m);
1783 			continue;
1784 		}
1785 		if (m == NULL)
1786 			continue;
1787 
1788 		if (dst_if == NULL) {
1789 			bridge_broadcast(sc, src_if, m);
1790 			continue;
1791 		}
1792 
1793 		/*
1794 		 * At this point, we're dealing with a unicast frame
1795 		 * going to a different interface.
1796 		 */
1797 		if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1798 			m_freem(m);
1799 			continue;
1800 		}
1801 
1802 		bif = bridge_lookup_member_if(sc, dst_if);
1803 		if (bif == NULL) {
1804 			/* Not a member of the bridge (anymore?) */
1805 			m_freem(m);
1806 			continue;
1807 		}
1808 
1809 		if (bif->bif_flags & IFBIF_STP) {
1810 			switch (bif->bif_state) {
1811 			case BSTP_IFSTATE_DISABLED:
1812 			case BSTP_IFSTATE_BLOCKING:
1813 				m_freem(m);
1814 				bridge_release_member(sc, bif);
1815 				continue;
1816 			}
1817 		}
1818 
1819 		bridge_release_member(sc, bif);
1820 
1821 		bridge_enqueue(sc, dst_if, m, 1);
1822 	}
1823 #ifndef BRIDGE_MPSAFE
1824 	splx(s);
1825 	mutex_exit(softnet_lock);
1826 	KERNEL_UNLOCK_ONE(NULL);
1827 #endif
1828 }
1829 
1830 static bool
1831 bstp_state_before_learning(struct bridge_iflist *bif)
1832 {
1833 	if (bif->bif_flags & IFBIF_STP) {
1834 		switch (bif->bif_state) {
1835 		case BSTP_IFSTATE_BLOCKING:
1836 		case BSTP_IFSTATE_LISTENING:
1837 		case BSTP_IFSTATE_DISABLED:
1838 			return true;
1839 		}
1840 	}
1841 	return false;
1842 }
1843 
1844 static bool
1845 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1846 {
1847 	uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1848 
1849 	if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1850 #if NCARP > 0
1851 	    || (bif->bif_ifp->if_carp &&
1852 	        carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1853 #endif /* NCARP > 0 */
1854 	    )
1855 		return true;
1856 
1857 	return false;
1858 }
1859 
1860 /*
1861  * bridge_input:
1862  *
1863  *	Receive input from a member interface.  Queue the packet for
1864  *	bridging if it is not for us.
1865  */
1866 static void
1867 bridge_input(struct ifnet *ifp, struct mbuf *m)
1868 {
1869 	struct bridge_softc *sc = ifp->if_bridge;
1870 	struct bridge_iflist *bif;
1871 	struct ether_header *eh;
1872 
1873 	if (__predict_false(sc == NULL) ||
1874 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1875 		ether_input(ifp, m);
1876 		return;
1877 	}
1878 
1879 	bif = bridge_lookup_member_if(sc, ifp);
1880 	if (bif == NULL) {
1881 		ether_input(ifp, m);
1882 		return;
1883 	}
1884 
1885 	eh = mtod(m, struct ether_header *);
1886 
1887 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1888 		if (memcmp(etherbroadcastaddr,
1889 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1890 			m->m_flags |= M_BCAST;
1891 		else
1892 			m->m_flags |= M_MCAST;
1893 	}
1894 
1895 	/*
1896 	 * A 'fast' path for packets addressed to interfaces that are
1897 	 * part of this bridge.
1898 	 */
1899 	if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1900 	    !bstp_state_before_learning(bif)) {
1901 		struct bridge_iflist *_bif;
1902 		struct ifnet *_ifp = NULL;
1903 		int s;
1904 
1905 		BRIDGE_PSZ_RENTER(s);
1906 		LIST_FOREACH(_bif, &sc->sc_iflist, bif_next) {
1907 			/* It is destined for us. */
1908 			if (bridge_ourether(_bif, eh, 0)) {
1909 				_bif = bridge_try_hold_bif(_bif);
1910 				BRIDGE_PSZ_REXIT(s);
1911 				if (_bif == NULL)
1912 					goto out;
1913 				if (_bif->bif_flags & IFBIF_LEARNING)
1914 					(void) bridge_rtupdate(sc,
1915 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1916 				_ifp = m->m_pkthdr.rcvif = _bif->bif_ifp;
1917 				bridge_release_member(sc, _bif);
1918 				goto out;
1919 			}
1920 
1921 			/* We just received a packet that we sent out. */
1922 			if (bridge_ourether(_bif, eh, 1))
1923 				break;
1924 		}
1925 		BRIDGE_PSZ_REXIT(s);
1926 out:
1927 
1928 		if (_bif != NULL) {
1929 			bridge_release_member(sc, bif);
1930 			if (_ifp != NULL)
1931 				ether_input(_ifp, m);
1932 			else
1933 				m_freem(m);
1934 			return;
1935 		}
1936 	}
1937 
1938 	/* Tap off 802.1D packets; they do not get forwarded. */
1939 	if (bif->bif_flags & IFBIF_STP &&
1940 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1941 		bstp_input(sc, bif, m);
1942 		bridge_release_member(sc, bif);
1943 		return;
1944 	}
1945 
1946 	/*
1947 	 * A normal switch would discard the packet here, but that's not what
1948 	 * we've done historically. This also prevents some obnoxious behaviour.
1949 	 */
1950 	if (bstp_state_before_learning(bif)) {
1951 		bridge_release_member(sc, bif);
1952 		ether_input(ifp, m);
1953 		return;
1954 	}
1955 
1956 	bridge_release_member(sc, bif);
1957 
1958 	/* Queue the packet for bridge forwarding. */
1959 	if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, 0)))
1960 		m_freem(m);
1961 }
1962 
1963 /*
1964  * bridge_broadcast:
1965  *
1966  *	Send a frame to all interfaces that are members of
1967  *	the bridge, except for the one on which the packet
1968  *	arrived.
1969  */
1970 static void
1971 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1972     struct mbuf *m)
1973 {
1974 	struct bridge_iflist *bif;
1975 	struct mbuf *mc;
1976 	struct ifnet *dst_if;
1977 	bool used, bmcast;
1978 	int s;
1979 
1980 	used = bmcast = m->m_flags & (M_BCAST|M_MCAST);
1981 
1982 	BRIDGE_PSZ_RENTER(s);
1983 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1984 		bif = bridge_try_hold_bif(bif);
1985 		if (bif == NULL)
1986 			continue;
1987 		BRIDGE_PSZ_REXIT(s);
1988 
1989 		dst_if = bif->bif_ifp;
1990 		if (dst_if == src_if)
1991 			goto next;
1992 
1993 		if (bif->bif_flags & IFBIF_STP) {
1994 			switch (bif->bif_state) {
1995 			case BSTP_IFSTATE_BLOCKING:
1996 			case BSTP_IFSTATE_DISABLED:
1997 				goto next;
1998 			}
1999 		}
2000 
2001 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2002 			goto next;
2003 
2004 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2005 			goto next;
2006 
2007 		if (!used && LIST_NEXT(bif, bif_next) == NULL) {
2008 			mc = m;
2009 			used = true;
2010 		} else {
2011 			mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
2012 			if (mc == NULL) {
2013 				sc->sc_if.if_oerrors++;
2014 				goto next;
2015 			}
2016 		}
2017 
2018 		bridge_enqueue(sc, dst_if, mc, 1);
2019 next:
2020 		bridge_release_member(sc, bif);
2021 		BRIDGE_PSZ_RENTER(s);
2022 	}
2023 	BRIDGE_PSZ_REXIT(s);
2024 
2025 	if (bmcast)
2026 		ether_input(src_if, m);
2027 	else if (!used)
2028 		m_freem(m);
2029 }
2030 
2031 static int
2032 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2033     struct bridge_rtnode **brtp)
2034 {
2035 	struct bridge_rtnode *brt;
2036 	int error;
2037 
2038 	if (sc->sc_brtcnt >= sc->sc_brtmax)
2039 		return ENOSPC;
2040 
2041 	/*
2042 	 * Allocate a new bridge forwarding node, and
2043 	 * initialize the expiration time and Ethernet
2044 	 * address.
2045 	 */
2046 	brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2047 	if (brt == NULL)
2048 		return ENOMEM;
2049 
2050 	memset(brt, 0, sizeof(*brt));
2051 	brt->brt_expire = time_uptime + sc->sc_brttimeout;
2052 	brt->brt_flags = IFBAF_DYNAMIC;
2053 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2054 
2055 	BRIDGE_RT_INTR_LOCK(sc);
2056 	error = bridge_rtnode_insert(sc, brt);
2057 	BRIDGE_RT_INTR_UNLOCK(sc);
2058 
2059 	if (error != 0) {
2060 		pool_put(&bridge_rtnode_pool, brt);
2061 		return error;
2062 	}
2063 
2064 	*brtp = brt;
2065 	return 0;
2066 }
2067 
2068 /*
2069  * bridge_rtupdate:
2070  *
2071  *	Add a bridge routing entry.
2072  */
2073 static int
2074 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2075     struct ifnet *dst_if, int setflags, uint8_t flags)
2076 {
2077 	struct bridge_rtnode *brt;
2078 	int s;
2079 
2080 again:
2081 	/*
2082 	 * A route for this destination might already exist.  If so,
2083 	 * update it, otherwise create a new one.
2084 	 */
2085 	BRIDGE_RT_RENTER(s);
2086 	brt = bridge_rtnode_lookup(sc, dst);
2087 
2088 	if (brt != NULL) {
2089 		brt->brt_ifp = dst_if;
2090 		if (setflags) {
2091 			brt->brt_flags = flags;
2092 			if (flags & IFBAF_STATIC)
2093 				brt->brt_expire = 0;
2094 			else
2095 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2096 		} else {
2097 			if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2098 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2099 		}
2100 	}
2101 	BRIDGE_RT_REXIT(s);
2102 
2103 	if (brt == NULL) {
2104 		int r;
2105 
2106 		r = bridge_rtalloc(sc, dst, &brt);
2107 		if (r != 0)
2108 			return r;
2109 		goto again;
2110 	}
2111 
2112 	return 0;
2113 }
2114 
2115 /*
2116  * bridge_rtlookup:
2117  *
2118  *	Lookup the destination interface for an address.
2119  */
2120 static struct ifnet *
2121 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2122 {
2123 	struct bridge_rtnode *brt;
2124 	struct ifnet *ifs = NULL;
2125 	int s;
2126 
2127 	BRIDGE_RT_RENTER(s);
2128 	brt = bridge_rtnode_lookup(sc, addr);
2129 	if (brt != NULL)
2130 		ifs = brt->brt_ifp;
2131 	BRIDGE_RT_REXIT(s);
2132 
2133 	return ifs;
2134 }
2135 
2136 typedef bool (*bridge_iterate_cb_t)
2137     (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2138 
2139 /*
2140  * bridge_rtlist_iterate_remove:
2141  *
2142  *	It iterates on sc->sc_rtlist and removes rtnodes of it which func
2143  *	callback judges to remove. Removals of rtnodes are done in a manner
2144  *	of pserialize. To this end, all kmem_* operations are placed out of
2145  *	mutexes.
2146  */
2147 static void
2148 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2149 {
2150 	struct bridge_rtnode *brt, *nbrt;
2151 	struct bridge_rtnode **brt_list;
2152 	int i, count;
2153 
2154 retry:
2155 	count = sc->sc_brtcnt;
2156 	if (count == 0)
2157 		return;
2158 	brt_list = kmem_alloc(sizeof(struct bridge_rtnode *) * count, KM_SLEEP);
2159 
2160 	BRIDGE_RT_LOCK(sc);
2161 	BRIDGE_RT_INTR_LOCK(sc);
2162 	if (__predict_false(sc->sc_brtcnt > count)) {
2163 		/* The rtnodes increased, we need more memory */
2164 		BRIDGE_RT_INTR_UNLOCK(sc);
2165 		BRIDGE_RT_UNLOCK(sc);
2166 		kmem_free(brt_list, sizeof(*brt_list) * count);
2167 		goto retry;
2168 	}
2169 
2170 	i = 0;
2171 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2172 		bool need_break = false;
2173 		if (func(sc, brt, &need_break, arg)) {
2174 			bridge_rtnode_remove(sc, brt);
2175 			brt_list[i++] = brt;
2176 		}
2177 		if (need_break)
2178 			break;
2179 	}
2180 	BRIDGE_RT_INTR_UNLOCK(sc);
2181 
2182 	if (i > 0)
2183 		BRIDGE_RT_PSZ_PERFORM(sc);
2184 	BRIDGE_RT_UNLOCK(sc);
2185 
2186 	while (--i >= 0)
2187 		bridge_rtnode_destroy(brt_list[i]);
2188 
2189 	kmem_free(brt_list, sizeof(*brt_list) * count);
2190 }
2191 
2192 static bool
2193 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2194     bool *need_break, void *arg)
2195 {
2196 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2197 		/* Take into account of the subsequent removal */
2198 		if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2199 			*need_break = true;
2200 		return true;
2201 	} else
2202 		return false;
2203 }
2204 
2205 static void
2206 bridge_rttrim0(struct bridge_softc *sc)
2207 {
2208 	bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2209 }
2210 
2211 /*
2212  * bridge_rttrim:
2213  *
2214  *	Trim the routine table so that we have a number
2215  *	of routing entries less than or equal to the
2216  *	maximum number.
2217  */
2218 static void
2219 bridge_rttrim(struct bridge_softc *sc)
2220 {
2221 
2222 	/* Make sure we actually need to do this. */
2223 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2224 		return;
2225 
2226 	/* Force an aging cycle; this might trim enough addresses. */
2227 	bridge_rtage(sc);
2228 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2229 		return;
2230 
2231 	bridge_rttrim0(sc);
2232 
2233 	return;
2234 }
2235 
2236 /*
2237  * bridge_timer:
2238  *
2239  *	Aging timer for the bridge.
2240  */
2241 static void
2242 bridge_timer(void *arg)
2243 {
2244 	struct bridge_softc *sc = arg;
2245 
2246 	workqueue_enqueue(sc->sc_rtage_wq, &bridge_rtage_wk, NULL);
2247 }
2248 
2249 static void
2250 bridge_rtage_work(struct work *wk, void *arg)
2251 {
2252 	struct bridge_softc *sc = arg;
2253 
2254 	KASSERT(wk == &bridge_rtage_wk);
2255 
2256 	bridge_rtage(sc);
2257 
2258 	if (sc->sc_if.if_flags & IFF_RUNNING)
2259 		callout_reset(&sc->sc_brcallout,
2260 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2261 }
2262 
2263 static bool
2264 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2265     bool *need_break, void *arg)
2266 {
2267 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2268 	    time_uptime >= brt->brt_expire)
2269 		return true;
2270 	else
2271 		return false;
2272 }
2273 
2274 /*
2275  * bridge_rtage:
2276  *
2277  *	Perform an aging cycle.
2278  */
2279 static void
2280 bridge_rtage(struct bridge_softc *sc)
2281 {
2282 	bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2283 }
2284 
2285 
2286 static bool
2287 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2288     bool *need_break, void *arg)
2289 {
2290 	int full = *(int*)arg;
2291 
2292 	if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2293 		return true;
2294 	else
2295 		return false;
2296 }
2297 
2298 /*
2299  * bridge_rtflush:
2300  *
2301  *	Remove all dynamic addresses from the bridge.
2302  */
2303 static void
2304 bridge_rtflush(struct bridge_softc *sc, int full)
2305 {
2306 	bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2307 }
2308 
2309 /*
2310  * bridge_rtdaddr:
2311  *
2312  *	Remove an address from the table.
2313  */
2314 static int
2315 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2316 {
2317 	struct bridge_rtnode *brt;
2318 
2319 	BRIDGE_RT_LOCK(sc);
2320 	BRIDGE_RT_INTR_LOCK(sc);
2321 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2322 		BRIDGE_RT_INTR_UNLOCK(sc);
2323 		BRIDGE_RT_UNLOCK(sc);
2324 		return ENOENT;
2325 	}
2326 	bridge_rtnode_remove(sc, brt);
2327 	BRIDGE_RT_INTR_UNLOCK(sc);
2328 	BRIDGE_RT_PSZ_PERFORM(sc);
2329 	BRIDGE_RT_UNLOCK(sc);
2330 
2331 	bridge_rtnode_destroy(brt);
2332 
2333 	return 0;
2334 }
2335 
2336 /*
2337  * bridge_rtdelete:
2338  *
2339  *	Delete routes to a speicifc member interface.
2340  */
2341 static void
2342 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2343 {
2344 	struct bridge_rtnode *brt, *nbrt;
2345 
2346 	BRIDGE_RT_LOCK(sc);
2347 	BRIDGE_RT_INTR_LOCK(sc);
2348 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2349 		if (brt->brt_ifp == ifp)
2350 			break;
2351 	}
2352 	if (brt == NULL) {
2353 		BRIDGE_RT_INTR_UNLOCK(sc);
2354 		BRIDGE_RT_UNLOCK(sc);
2355 		return;
2356 	}
2357 	bridge_rtnode_remove(sc, brt);
2358 	BRIDGE_RT_INTR_UNLOCK(sc);
2359 	BRIDGE_RT_PSZ_PERFORM(sc);
2360 	BRIDGE_RT_UNLOCK(sc);
2361 
2362 	bridge_rtnode_destroy(brt);
2363 }
2364 
2365 /*
2366  * bridge_rtable_init:
2367  *
2368  *	Initialize the route table for this bridge.
2369  */
2370 static void
2371 bridge_rtable_init(struct bridge_softc *sc)
2372 {
2373 	int i;
2374 
2375 	sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2376 	    KM_SLEEP);
2377 
2378 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2379 		LIST_INIT(&sc->sc_rthash[i]);
2380 
2381 	sc->sc_rthash_key = cprng_fast32();
2382 
2383 	LIST_INIT(&sc->sc_rtlist);
2384 
2385 	sc->sc_rtlist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2386 #ifdef BRIDGE_MPSAFE
2387 	sc->sc_rtlist_psz = pserialize_create();
2388 	sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2389 #else
2390 	sc->sc_rtlist_psz = NULL;
2391 	sc->sc_rtlist_lock = NULL;
2392 #endif
2393 }
2394 
2395 /*
2396  * bridge_rtable_fini:
2397  *
2398  *	Deconstruct the route table for this bridge.
2399  */
2400 static void
2401 bridge_rtable_fini(struct bridge_softc *sc)
2402 {
2403 
2404 	kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2405 	if (sc->sc_rtlist_intr_lock)
2406 		mutex_obj_free(sc->sc_rtlist_intr_lock);
2407 	if (sc->sc_rtlist_lock)
2408 		mutex_obj_free(sc->sc_rtlist_lock);
2409 	if (sc->sc_rtlist_psz)
2410 		pserialize_destroy(sc->sc_rtlist_psz);
2411 }
2412 
2413 /*
2414  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2415  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2416  */
2417 #define	mix(a, b, c)							\
2418 do {									\
2419 	a -= b; a -= c; a ^= (c >> 13);					\
2420 	b -= c; b -= a; b ^= (a << 8);					\
2421 	c -= a; c -= b; c ^= (b >> 13);					\
2422 	a -= b; a -= c; a ^= (c >> 12);					\
2423 	b -= c; b -= a; b ^= (a << 16);					\
2424 	c -= a; c -= b; c ^= (b >> 5);					\
2425 	a -= b; a -= c; a ^= (c >> 3);					\
2426 	b -= c; b -= a; b ^= (a << 10);					\
2427 	c -= a; c -= b; c ^= (b >> 15);					\
2428 } while (/*CONSTCOND*/0)
2429 
2430 static inline uint32_t
2431 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2432 {
2433 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2434 
2435 	b += addr[5] << 8;
2436 	b += addr[4];
2437 	a += addr[3] << 24;
2438 	a += addr[2] << 16;
2439 	a += addr[1] << 8;
2440 	a += addr[0];
2441 
2442 	mix(a, b, c);
2443 
2444 	return (c & BRIDGE_RTHASH_MASK);
2445 }
2446 
2447 #undef mix
2448 
2449 /*
2450  * bridge_rtnode_lookup:
2451  *
2452  *	Look up a bridge route node for the specified destination.
2453  */
2454 static struct bridge_rtnode *
2455 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2456 {
2457 	struct bridge_rtnode *brt;
2458 	uint32_t hash;
2459 	int dir;
2460 
2461 	hash = bridge_rthash(sc, addr);
2462 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2463 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2464 		if (dir == 0)
2465 			return (brt);
2466 		if (dir > 0)
2467 			return (NULL);
2468 	}
2469 
2470 	return (NULL);
2471 }
2472 
2473 /*
2474  * bridge_rtnode_insert:
2475  *
2476  *	Insert the specified bridge node into the route table.  We
2477  *	assume the entry is not already in the table.
2478  */
2479 static int
2480 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2481 {
2482 	struct bridge_rtnode *lbrt;
2483 	uint32_t hash;
2484 	int dir;
2485 
2486 	KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2487 
2488 	hash = bridge_rthash(sc, brt->brt_addr);
2489 
2490 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2491 	if (lbrt == NULL) {
2492 		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2493 		goto out;
2494 	}
2495 
2496 	do {
2497 		dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2498 		if (dir == 0)
2499 			return (EEXIST);
2500 		if (dir > 0) {
2501 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2502 			goto out;
2503 		}
2504 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2505 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2506 			goto out;
2507 		}
2508 		lbrt = LIST_NEXT(lbrt, brt_hash);
2509 	} while (lbrt != NULL);
2510 
2511 #ifdef DIAGNOSTIC
2512 	panic("bridge_rtnode_insert: impossible");
2513 #endif
2514 
2515  out:
2516 	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2517 	sc->sc_brtcnt++;
2518 
2519 	return (0);
2520 }
2521 
2522 /*
2523  * bridge_rtnode_remove:
2524  *
2525  *	Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2526  */
2527 static void
2528 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2529 {
2530 
2531 	KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2532 
2533 	LIST_REMOVE(brt, brt_hash);
2534 	LIST_REMOVE(brt, brt_list);
2535 	sc->sc_brtcnt--;
2536 }
2537 
2538 /*
2539  * bridge_rtnode_destroy:
2540  *
2541  *	Destroy a bridge rtnode.
2542  */
2543 static void
2544 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2545 {
2546 
2547 	pool_put(&bridge_rtnode_pool, brt);
2548 }
2549 
2550 #if defined(BRIDGE_IPF)
2551 extern pfil_head_t *inet_pfil_hook;                 /* XXX */
2552 extern pfil_head_t *inet6_pfil_hook;                /* XXX */
2553 
2554 /*
2555  * Send bridge packets through IPF if they are one of the types IPF can deal
2556  * with, or if they are ARP or REVARP.  (IPF will pass ARP and REVARP without
2557  * question.)
2558  */
2559 static int
2560 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2561 {
2562 	int snap, error;
2563 	struct ether_header *eh1, eh2;
2564 	struct llc llc1;
2565 	uint16_t ether_type;
2566 
2567 	snap = 0;
2568 	error = -1;	/* Default error if not error == 0 */
2569 	eh1 = mtod(*mp, struct ether_header *);
2570 	ether_type = ntohs(eh1->ether_type);
2571 
2572 	/*
2573 	 * Check for SNAP/LLC.
2574 	 */
2575         if (ether_type < ETHERMTU) {
2576                 struct llc *llc2 = (struct llc *)(eh1 + 1);
2577 
2578                 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2579                     llc2->llc_dsap == LLC_SNAP_LSAP &&
2580                     llc2->llc_ssap == LLC_SNAP_LSAP &&
2581                     llc2->llc_control == LLC_UI) {
2582                 	ether_type = htons(llc2->llc_un.type_snap.ether_type);
2583 			snap = 1;
2584                 }
2585         }
2586 
2587 	/*
2588 	 * If we're trying to filter bridge traffic, don't look at anything
2589 	 * other than IP and ARP traffic.  If the filter doesn't understand
2590 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2591 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2592 	 * but of course we don't have an AppleTalk filter to begin with.
2593 	 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2594 	 * ARP traffic.)
2595 	 */
2596 	switch (ether_type) {
2597 		case ETHERTYPE_ARP:
2598 		case ETHERTYPE_REVARP:
2599 			return 0; /* Automatically pass */
2600 		case ETHERTYPE_IP:
2601 # ifdef INET6
2602 		case ETHERTYPE_IPV6:
2603 # endif /* INET6 */
2604 			break;
2605 		default:
2606 			goto bad;
2607 	}
2608 
2609 	/* Strip off the Ethernet header and keep a copy. */
2610 	m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2611 	m_adj(*mp, ETHER_HDR_LEN);
2612 
2613 	/* Strip off snap header, if present */
2614 	if (snap) {
2615 		m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2616 		m_adj(*mp, sizeof(struct llc));
2617 	}
2618 
2619 	/*
2620 	 * Check basic packet sanity and run IPF through pfil.
2621 	 */
2622 	KASSERT(!cpu_intr_p());
2623 	switch (ether_type)
2624 	{
2625 	case ETHERTYPE_IP :
2626 		error = (dir == PFIL_IN) ? bridge_ip_checkbasic(mp) : 0;
2627 		if (error == 0)
2628 			error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2629 		break;
2630 # ifdef INET6
2631 	case ETHERTYPE_IPV6 :
2632 		error = (dir == PFIL_IN) ? bridge_ip6_checkbasic(mp) : 0;
2633 		if (error == 0)
2634 			error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2635 		break;
2636 # endif
2637 	default :
2638 		error = 0;
2639 		break;
2640 	}
2641 
2642 	if (*mp == NULL)
2643 		return error;
2644 	if (error != 0)
2645 		goto bad;
2646 
2647 	error = -1;
2648 
2649 	/*
2650 	 * Finally, put everything back the way it was and return
2651 	 */
2652 	if (snap) {
2653 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2654 		if (*mp == NULL)
2655 			return error;
2656 		bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2657 	}
2658 
2659 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2660 	if (*mp == NULL)
2661 		return error;
2662 	bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2663 
2664 	return 0;
2665 
2666     bad:
2667 	m_freem(*mp);
2668 	*mp = NULL;
2669 	return error;
2670 }
2671 
2672 /*
2673  * Perform basic checks on header size since
2674  * IPF assumes ip_input has already processed
2675  * it for it.  Cut-and-pasted from ip_input.c.
2676  * Given how simple the IPv6 version is,
2677  * does the IPv4 version really need to be
2678  * this complicated?
2679  *
2680  * XXX Should we update ipstat here, or not?
2681  * XXX Right now we update ipstat but not
2682  * XXX csum_counter.
2683  */
2684 static int
2685 bridge_ip_checkbasic(struct mbuf **mp)
2686 {
2687 	struct mbuf *m = *mp;
2688 	struct ip *ip;
2689 	int len, hlen;
2690 
2691 	if (*mp == NULL)
2692 		return -1;
2693 
2694 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2695 		if ((m = m_copyup(m, sizeof(struct ip),
2696 			(max_linkhdr + 3) & ~3)) == NULL) {
2697 			/* XXXJRT new stat, please */
2698 			ip_statinc(IP_STAT_TOOSMALL);
2699 			goto bad;
2700 		}
2701 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
2702 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2703 			ip_statinc(IP_STAT_TOOSMALL);
2704 			goto bad;
2705 		}
2706 	}
2707 	ip = mtod(m, struct ip *);
2708 	if (ip == NULL) goto bad;
2709 
2710 	if (ip->ip_v != IPVERSION) {
2711 		ip_statinc(IP_STAT_BADVERS);
2712 		goto bad;
2713 	}
2714 	hlen = ip->ip_hl << 2;
2715 	if (hlen < sizeof(struct ip)) { /* minimum header length */
2716 		ip_statinc(IP_STAT_BADHLEN);
2717 		goto bad;
2718 	}
2719 	if (hlen > m->m_len) {
2720 		if ((m = m_pullup(m, hlen)) == 0) {
2721 			ip_statinc(IP_STAT_BADHLEN);
2722 			goto bad;
2723 		}
2724 		ip = mtod(m, struct ip *);
2725 		if (ip == NULL) goto bad;
2726 	}
2727 
2728         switch (m->m_pkthdr.csum_flags &
2729                 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
2730                  M_CSUM_IPv4_BAD)) {
2731         case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2732                 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2733                 goto bad;
2734 
2735         case M_CSUM_IPv4:
2736                 /* Checksum was okay. */
2737                 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2738                 break;
2739 
2740         default:
2741                 /* Must compute it ourselves. */
2742                 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2743                 if (in_cksum(m, hlen) != 0)
2744                         goto bad;
2745                 break;
2746         }
2747 
2748         /* Retrieve the packet length. */
2749         len = ntohs(ip->ip_len);
2750 
2751         /*
2752          * Check for additional length bogosity
2753          */
2754         if (len < hlen) {
2755 		ip_statinc(IP_STAT_BADLEN);
2756                 goto bad;
2757         }
2758 
2759         /*
2760          * Check that the amount of data in the buffers
2761          * is as at least much as the IP header would have us expect.
2762          * Drop packet if shorter than we expect.
2763          */
2764         if (m->m_pkthdr.len < len) {
2765 		ip_statinc(IP_STAT_TOOSHORT);
2766                 goto bad;
2767         }
2768 
2769 	/* Checks out, proceed */
2770 	*mp = m;
2771 	return 0;
2772 
2773     bad:
2774 	*mp = m;
2775 	return -1;
2776 }
2777 
2778 # ifdef INET6
2779 /*
2780  * Same as above, but for IPv6.
2781  * Cut-and-pasted from ip6_input.c.
2782  * XXX Should we update ip6stat, or not?
2783  */
2784 static int
2785 bridge_ip6_checkbasic(struct mbuf **mp)
2786 {
2787 	struct mbuf *m = *mp;
2788 	struct ip6_hdr *ip6;
2789 
2790         /*
2791          * If the IPv6 header is not aligned, slurp it up into a new
2792          * mbuf with space for link headers, in the event we forward
2793          * it.  Otherwise, if it is aligned, make sure the entire base
2794          * IPv6 header is in the first mbuf of the chain.
2795          */
2796         if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2797                 struct ifnet *inifp = m->m_pkthdr.rcvif;
2798                 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2799                                   (max_linkhdr + 3) & ~3)) == NULL) {
2800                         /* XXXJRT new stat, please */
2801 			ip6_statinc(IP6_STAT_TOOSMALL);
2802                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2803                         goto bad;
2804                 }
2805         } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2806                 struct ifnet *inifp = m->m_pkthdr.rcvif;
2807                 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2808 			ip6_statinc(IP6_STAT_TOOSMALL);
2809                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2810                         goto bad;
2811                 }
2812         }
2813 
2814         ip6 = mtod(m, struct ip6_hdr *);
2815 
2816         if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2817 		ip6_statinc(IP6_STAT_BADVERS);
2818                 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
2819                 goto bad;
2820         }
2821 
2822 	/* Checks out, proceed */
2823 	*mp = m;
2824 	return 0;
2825 
2826     bad:
2827 	*mp = m;
2828 	return -1;
2829 }
2830 # endif /* INET6 */
2831 #endif /* BRIDGE_IPF */
2832